repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
miyosuda/rodent | [
"3d60a234eecd5e2476b186365eb121f0f3655c2e"
] | [
"examples/02_nav_maze_static/main.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pygame, sys\nfrom pygame.locals import *\n\nfrom nav_maze_static_environment import NavMazeStaticEnvironment\n\nBLACK = (0, 0, 0)\n\n\nclass RandomAgent(object):\n def __init__(self, action_num):\n self.action_num = action_num\n\n def choose_action(self, state):\n return np.random.randint(self.action_num)\n\n\nclass Display(object):\n def __init__(self, display_size):\n self.width = display_size[0]\n self.height = display_size[1]\n\n self.env = NavMazeStaticEnvironment(\n width=self.width, height=self.height)\n self.agent = RandomAgent(self.env.get_action_size())\n\n pygame.init()\n\n self.surface = pygame.display.set_mode(display_size, 0, 24)\n pygame.display.set_caption('rodentia')\n\n self.last_state = self.env.reset()\n\n def update(self):\n self.surface.fill(BLACK)\n self.process()\n pygame.display.update()\n\n \"\"\"\n def get_manual_action(self):\n pressed = pygame.key.get_pressed()\n\n if pressed[K_q]:\n return 0\n if pressed[K_e]:\n return 1\n if pressed[K_a]:\n return 2\n if pressed[K_d]:\n return 3\n if pressed[K_w]:\n return 4\n if pressed[K_s]:\n return 5\n return -1\n \"\"\"\n\n def process(self):\n #action = self.get_manual_action()\n action = self.agent.choose_action(self.last_state)\n\n state, reward, terminal = self.env.step(action=action)\n\n if reward != 0:\n print(\"reward={}\".format(reward))\n\n image = pygame.image.frombuffer(state, (self.width, self.height),\n 'RGB')\n self.surface.blit(image, (0, 0))\n\n self.last_state = state\n\n if terminal:\n self.last_state = self.env.reset()\n\n\ndef main():\n display_size = (256, 256)\n display = Display(display_size)\n clock = pygame.time.Clock()\n\n running = True\n FPS = 60\n\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n\n display.update()\n clock.tick(FPS)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.randint"
]
] |
popura/deepy-pytorch | [
"71d87a82e937d82b9b149041280a392cc24b7299"
] | [
"deepy/data/audio/transform.py"
] | [
"import random\n\nimport torch\n\nfrom deepy.data.transform import Transform, SeparatedTransform\nfrom deepy.data.transform import PairedTransform, PairedCompose, ToPairedTransform\nfrom deepy.nn import functional as myF\n\n\nclass RandomCrop(Transform):\n def __init__(self, length: int, generator=None):\n self.length = length\n self.generator = generator\n \n def __call__(self, data):\n signal_length = data.size(-1)\n start_index = torch.randint(0, signal_length - self.length + 1,\n (1, ),\n generator=self.generator)\n end_index = start_index + self.length\n return data[..., start_index:end_index]\n \n def __repr__(self):\n return self.__class__.__name__ + '(length={})'.format(self.length)\n\n\nclass RandomFrames(RandomCrop):\n def __init__(self, n_frames=5, generator=None):\n super().__init__(length=n_frames, generator=generator)\n self.n_frames = n_frames\n\n def __repr__(self):\n return self.__class__.__name__ + '(n_frames={})'.format(self.n_frames)\n\n\nclass Windowing(Transform):\n def __init__(self, n_frames=5, stride=1, n_signals=None):\n self.n_frames = n_frames\n if not stride == 1:\n raise NotImplementedError\n self.stride = stride\n self.n_signals = n_signals\n \n def __call__(self, data):\n total_frames = data.size(-1)\n\n if self.n_signals == None:\n n_signals = total_frames - self.n_frames + 1\n else:\n n_signals = self.n_signals\n\n return torch.stack([data[..., i:i+self.n_frames] for i in range(n_signals)], dim=1)\n\n def __repr__(self):\n return self.__class__.__name__ + '(n_frames={}, stride={})'.format(self.n_frames, self.stride)\n\n\nclass Plane2Vector(Transform):\n def __init__(self):\n pass\n\n def __call__(self, data):\n return torch.cat([data[..., i, :] for i in range(data.size(-2))], dim=-1)\n \n\n\nclass ToVector(Transform):\n def __init__(self):\n pass\n\n def __call__(self, data):\n return data.reshape(-1, )\n \n def __repr__(self):\n return self.__class__.__name__\n\n\nclass PickUpChannel(Transform):\n def __init__(self, chidx=0):\n self.chidx = chidx\n\n def __call__(self, data):\n return data[self.chidx]\n \n def __repr__(self):\n return self.__class__.__name__ + '(chidx={})'.format(self.chidx)\n"
] | [
[
"torch.randint"
]
] |
ikamensh/machin | [
"af7b423c47bc1412530cf6c96c11bd3af9b3e239"
] | [
"machin/frame/buffers/buffer.py"
] | [
"from typing import Union, Dict, List, Tuple, Any, Callable\nfrom ..transition import (\n Transition,\n Scalar,\n TransitionStorageSmart,\n TransitionStorageBasic,\n)\nimport torch as t\nimport random\n\n\nclass Buffer:\n def __init__(self, buffer_size, buffer_device=\"cpu\", *_, **__):\n \"\"\"\n Create a buffer instance.\n\n Buffer stores a series of transition objects and functions\n as a ring buffer. **It is not thread-safe**.\n\n See Also:\n :class:`.Transition`\n\n\n During sampling, the tensors in \"state\", \"action\" and \"next_state\"\n dictionaries, along with \"reward\", will be concatenated in dimension 0.\n any other custom keys specified in ``**kwargs`` will not be\n concatenated.\n\n Args:\n buffer_size: Maximum buffer size.\n buffer_device: Device where buffer is stored.\n \"\"\"\n self.buffer_size = buffer_size\n self.buffer_device = buffer_device\n self.buffer = TransitionStorageSmart(buffer_size)\n self.index = 0\n\n def append(\n self,\n transition: Union[Transition, Dict],\n required_attrs=(\"state\", \"action\", \"next_state\", \"reward\", \"terminal\"),\n ):\n \"\"\"\n Store a transition object to buffer.\n\n Args:\n transition: A transition object.\n required_attrs: Required attributes. Could be an empty tuple if\n no attribute is required.\n\n Raises:\n ``ValueError`` if transition object doesn't have required\n attributes in ``required_attrs`` or has different attributes\n compared to other transition objects stored in buffer.\n \"\"\"\n if isinstance(transition, dict):\n transition = Transition(**transition)\n elif isinstance(transition, Transition):\n pass\n else: # pragma: no cover\n raise RuntimeError(\n \"Transition object must be a dict or an instance\"\n \" of the Transition class\"\n )\n if not transition.has_keys(required_attrs):\n missing_keys = set(required_attrs) - set(transition.keys())\n raise ValueError(f\"Transition object missing attributes: {missing_keys}\")\n transition.to(self.buffer_device)\n\n if self.size() != 0 and self.buffer[0].keys() != transition.keys():\n raise ValueError(\"Transition object has different attributes!\")\n\n return self.buffer.store(transition)\n\n def size(self):\n \"\"\"\n Returns:\n Length of current buffer.\n \"\"\"\n return len(self.buffer)\n\n def clear(self):\n \"\"\"\n Remove all entries from the buffer\n \"\"\"\n self.buffer.clear()\n\n @staticmethod\n def sample_method_random_unique(\n buffer: List[Transition], batch_size: int\n ) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample unique random samples from buffer.\n\n Note:\n Sampled size could be any value from 0 to ``batch_size``.\n \"\"\"\n if len(buffer) < batch_size:\n batch = random.sample(buffer, len(buffer))\n real_num = len(buffer)\n else:\n batch = random.sample(buffer, batch_size)\n real_num = batch_size\n return real_num, batch\n\n @staticmethod\n def sample_method_random(\n buffer: List[Transition], batch_size: int\n ) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample random samples from buffer.\n\n Note:\n Sampled size could be any value from 0 to ``batch_size``.\n \"\"\"\n indexes = [random.randint(0, len(buffer) - 1) for _ in range(batch_size)]\n batch = [buffer[i] for i in indexes]\n return batch_size, batch\n\n @staticmethod\n def sample_method_all(buffer: List[Transition], _) -> Tuple[int, List[Transition]]:\n \"\"\"\n Sample all samples from buffer. Always return the whole buffer,\n will ignore the ``batch_size`` parameter.\n \"\"\"\n return len(buffer), buffer\n\n def sample_batch(\n self,\n batch_size: int,\n concatenate: bool = True,\n device: Union[str, t.device] = None,\n sample_method: Union[Callable, str] = \"random_unique\",\n sample_attrs: List[str] = None,\n additional_concat_attrs: List[str] = None,\n *_,\n **__,\n ) -> Any:\n \"\"\"\n Sample a random batch from buffer.\n\n See Also:\n Default sample methods are defined as static class methods.\n\n :meth:`.Buffer.sample_method_random_unique`\n\n :meth:`.Buffer.sample_method_random`\n\n :meth:`.Buffer.sample_method_all`\n\n Note:\n \"Concatenation\"\n means ``torch.cat([...], dim=0)`` for tensors,\n and ``torch.tensor([...]).view(batch_size, 1)`` for scalars.\n\n Warnings:\n Custom attributes must not contain tensors. And only scalar custom\n attributes can be concatenated, such as ``int``, ``float``,\n ``bool``.\n\n Args:\n batch_size: A hint size of the result sample. actual sample size\n depends on your sample method.\n sample_method: Sample method, could be one of:\n ``\"random\", \"random_unique\", \"all\"``,\n or a function:\n ``func(list, batch_size)->(list, result_size)``\n concatenate: Whether concatenate state, action and next_state\n in dimension 0.\n If ``True``, for each value in dictionaries of major\n attributes. and each value of sub attributes, returns\n a concatenated tensor. Custom Attributes specified in\n ``additional_concat_attrs`` will also be concatenated.\n If ``False``, return a list of tensors.\n device: Device to copy to.\n sample_attrs: If sample_keys is specified, then only specified keys\n of the transition object will be sampled. You may use\n ``\"*\"`` as a wildcard to collect remaining\n **custom keys** as a ``dict``, you cannot collect major\n and sub attributes using this.\n Invalid sample attributes will be ignored.\n additional_concat_attrs: additional **custom keys** needed to be\n concatenated, will only work if ``concatenate`` is\n ``True``.\n\n Returns:\n 1. Batch size, Sampled attribute values in the same order as\n ``sample_keys``.\n\n 2. Sampled attribute values is a tuple. Or ``None`` if sampled\n batch size is zero (E.g.: if buffer is empty or your sample\n size is 0 and you are not sampling using the \"all\" method).\n\n - For major attributes, result are dictionaries of tensors with\n the same keys in your transition objects.\n\n - For sub attributes, result are tensors.\n\n - For custom attributes, if they are not in\n ``additional_concat_attrs``, then lists, otherwise tensors.\n \"\"\"\n if isinstance(sample_method, str):\n if not hasattr(self, \"sample_method_\" + sample_method):\n raise RuntimeError(\n f\"Cannot find specified sample method: {sample_method}\"\n )\n sample_method = getattr(self, \"sample_method_\" + sample_method)\n batch_size, batch = sample_method(self.buffer, batch_size)\n\n if device is None:\n device = self.buffer_device\n\n return (\n batch_size,\n self.post_process_batch(\n batch, device, concatenate, sample_attrs, additional_concat_attrs\n ),\n )\n\n @classmethod\n def post_process_batch(\n cls,\n batch: List[Transition],\n device: Union[str, t.device],\n concatenate: bool,\n sample_attrs: List[str],\n additional_concat_attrs: List[str],\n ):\n \"\"\"\n Post-process (concatenate) sampled batch.\n \"\"\"\n result = []\n used_keys = []\n\n if len(batch) == 0:\n return None\n if sample_attrs is None:\n sample_attrs = batch[0].keys() if batch else []\n if additional_concat_attrs is None:\n additional_concat_attrs = []\n\n major_attr = set(batch[0].major_attr)\n sub_attr = set(batch[0].sub_attr)\n custom_attr = set(batch[0].custom_attr)\n for attr in sample_attrs:\n if attr in major_attr:\n tmp_dict = {}\n for sub_k in batch[0][attr].keys():\n tmp_dict[sub_k] = cls.make_tensor_from_batch(\n [item[attr][sub_k].to(device) for item in batch],\n device,\n concatenate,\n )\n result.append(tmp_dict)\n used_keys.append(attr)\n elif attr in sub_attr:\n result.append(\n cls.make_tensor_from_batch(\n [item[attr] for item in batch], device, concatenate\n )\n )\n used_keys.append(attr)\n elif attr == \"*\":\n # select custom keys\n tmp_dict = {}\n for remain_k in batch[0].keys():\n if (\n remain_k not in major_attr\n and remain_k not in sub_attr\n and remain_k not in used_keys\n ):\n tmp_dict[remain_k] = cls.make_tensor_from_batch(\n [item[remain_k] for item in batch],\n device,\n concatenate and remain_k in additional_concat_attrs,\n )\n result.append(tmp_dict)\n elif attr in custom_attr:\n result.append(\n cls.make_tensor_from_batch(\n [item[attr] for item in batch],\n device,\n concatenate and attr in additional_concat_attrs,\n )\n )\n used_keys.append(attr)\n return tuple(result)\n\n @staticmethod\n def make_tensor_from_batch(\n batch: List[Union[Scalar, t.Tensor]],\n device: Union[str, t.device],\n concatenate: bool,\n ):\n \"\"\"\n Make a tensor from a batch of data.\n Will concatenate input tensors in dimension 0.\n Or create a tensor of size (batch_size, 1) for scalars.\n\n Args:\n batch: Batch data.\n device: Device to move data to\n concatenate: Whether performing concatenation.\n\n Returns:\n Original batch if batch is empty,\n or tensor depends on your data (if concatenate),\n or original batch (if not concatenate).\n \"\"\"\n if concatenate and len(batch) != 0:\n item = batch[0]\n batch_size = len(batch)\n if t.is_tensor(item):\n batch = [it.to(device) for it in batch]\n return t.cat(batch, dim=0).to(device)\n else:\n try:\n return t.tensor(batch, device=device).view(batch_size, -1)\n except Exception:\n raise ValueError(f\"Batch not concatenable: {batch}\")\n else:\n return batch\n\n def __reduce__(self):\n # for pickling\n return self.__class__, (self.buffer_size, self.buffer_device)\n"
] | [
[
"torch.tensor",
"torch.is_tensor",
"torch.cat"
]
] |
akshit-protonn/models | [
"38c8c6fe4144c93d6aadd19981c2b90570c29eba",
"38c8c6fe4144c93d6aadd19981c2b90570c29eba"
] | [
"official/nlp/modeling/networks/encoder_scaffold_test.py",
"research/delf/delf/python/detect_to_retrieve/cluster_delf_features.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for EncoderScaffold network.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.modeling import activations\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling.networks import encoder_scaffold\n\n\n# Test class that wraps a standard transformer layer. If this layer is called\n# at any point, the list passed to the config object will be filled with a\n# boolean 'True'. We register this class as a Keras serializable so we can\n# test serialization below.\[email protected]_keras_serializable(package=\"TestOnly\")\nclass ValidatedTransformerLayer(layers.Transformer):\n\n def __init__(self, call_list, call_class=None, **kwargs):\n super(ValidatedTransformerLayer, self).__init__(**kwargs)\n self.list = call_list\n self.call_class = call_class\n\n def call(self, inputs):\n self.list.append(True)\n return super(ValidatedTransformerLayer, self).call(inputs)\n\n def get_config(self):\n config = super(ValidatedTransformerLayer, self).get_config()\n config[\"call_list\"] = self.list\n config[\"call_class\"] = tf.keras.utils.get_registered_name(self.call_class)\n return config\n\n\[email protected]_keras_serializable(package=\"TestLayerOnly\")\nclass TestLayer(tf.keras.layers.Layer):\n pass\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldLayerClassTest(keras_parameterized.TestCase):\n\n def tearDown(self):\n super(EncoderScaffoldLayerClassTest, self).tearDown()\n tf.keras.mixed_precision.set_global_policy(\"float32\")\n\n @parameterized.named_parameters(\n dict(testcase_name=\"only_final_output\", return_all_layer_outputs=False),\n dict(testcase_name=\"all_layer_outputs\", return_all_layer_outputs=True))\n def test_network_creation(self, return_all_layer_outputs):\n hidden_size = 32\n sequence_length = 21\n num_hidden_instances = 3\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=num_hidden_instances,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cls=ValidatedTransformerLayer,\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg,\n layer_norm_before_pooling=True,\n return_all_layer_outputs=return_all_layer_outputs)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n output_data, pooled = test_network([word_ids, mask, type_ids])\n\n if return_all_layer_outputs:\n self.assertIsInstance(output_data, list)\n self.assertLen(output_data, num_hidden_instances)\n data = output_data[-1]\n else:\n data = output_data\n self.assertIsInstance(test_network.hidden_layers, list)\n self.assertLen(test_network.hidden_layers, num_hidden_instances)\n self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # The default output dtype is float32.\n self.assertAllEqual(tf.float32, data.dtype)\n self.assertAllEqual(tf.float32, pooled.dtype)\n\n # If call_list[0] exists and is True, the passed layer class was\n # instantiated from the given config properly.\n self.assertNotEmpty(call_list)\n self.assertTrue(call_list[0], \"The passed layer class wasn't instantiated.\")\n\n self.assertTrue(hasattr(test_network, \"_output_layer_norm\"))\n\n def test_network_creation_with_float16_dtype(self):\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n hidden_size = 32\n sequence_length = 21\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # If float_dtype is set to float16, the data output is float32 (from a layer\n # norm) and pool output should be float16.\n self.assertAllEqual(tf.float32, data.dtype)\n self.assertAllEqual(tf.float16, pooled.dtype)\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg,\n dict_outputs=True)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n outputs = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n preds = model.predict([word_id_data, mask_data, type_id_data])\n self.assertEqual(preds[\"pooled_output\"].shape, (3, hidden_size))\n\n # Creates a EncoderScaffold with max_sequence_length != sequence_length\n num_types = 7\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length * 2,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n outputs = test_network([word_ids, mask, type_ids])\n model = tf.keras.Model([word_ids, mask, type_ids], outputs)\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n def test_serialize_deserialize(self):\n # Create a network object that sets all of its config options.\n hidden_size = 32\n sequence_length = 21\n embedding_cfg = {\n \"vocab_size\": 100,\n \"type_vocab_size\": 16,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n # Create a small EncoderScaffold for testing.\n network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cfg=embedding_cfg)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n\nclass Embeddings(tf.keras.Model):\n\n def __init__(self, vocab_size, hidden_size):\n super().__init__()\n self.inputs = [\n tf.keras.layers.Input(\n shape=(None,), dtype=tf.int32, name=\"input_word_ids\"),\n tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name=\"input_mask\")\n ]\n self.attention_mask = layers.SelfAttentionMask()\n self.embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n name=\"word_embeddings\")\n\n def call(self, inputs):\n word_ids, mask = inputs\n word_embeddings = self.embedding_layer(word_ids)\n return word_embeddings, self.attention_mask([word_embeddings, mask])\n\n\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase):\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n\n # Build an embedding network to swap in for the default network. This one\n # will have 2 inputs (mask and word_ids) instead of 3, and won't use\n # positional embeddings.\n network = Embeddings(vocab_size, hidden_size)\n\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cls=network)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n _ = model.predict([word_id_data, mask_data])\n\n def test_serialize_deserialize(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n\n # Build an embedding network to swap in for the default network. This one\n # will have 2 inputs (mask and word_ids) instead of 3, and won't use\n # positional embeddings.\n\n word_ids = tf.keras.layers.Input(\n shape=(sequence_length,), dtype=tf.int32, name=\"input_word_ids\")\n mask = tf.keras.layers.Input(\n shape=(sequence_length,), dtype=tf.int32, name=\"input_mask\")\n embedding_layer = layers.OnDeviceEmbedding(\n vocab_size=vocab_size,\n embedding_width=hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),\n name=\"word_embeddings\")\n word_embeddings = embedding_layer(word_ids)\n attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])\n network = tf.keras.Model([word_ids, mask],\n [word_embeddings, attention_mask])\n\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n }\n\n # Create a small EncoderScaffold for testing.\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cfg=hidden_cfg,\n embedding_cls=network,\n embedding_data=embedding_layer.embeddings)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n test_network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(test_network.get_config(), new_network.get_config())\n\n # Create a model based off of the old and new networks:\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n\n data, pooled = new_network([word_ids, mask])\n new_model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n data, pooled = test_network([word_ids, mask])\n model = tf.keras.Model([word_ids, mask], [data, pooled])\n\n # Copy the weights between models.\n new_model.set_weights(model.get_weights())\n\n # Invoke the models.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n data, cls = model.predict([word_id_data, mask_data])\n new_data, new_cls = new_model.predict([word_id_data, mask_data])\n\n # The output should be equal.\n self.assertAllEqual(data, new_data)\n self.assertAllEqual(cls, new_cls)\n\n # We should not be able to get a reference to the embedding data.\n with self.assertRaisesRegex(RuntimeError, \".*does not have a reference.*\"):\n new_network.get_embedding_table()\n\n\n@keras_parameterized.run_all_keras_modes\nclass EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase):\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list\n }\n # Create a small EncoderScaffold for testing. This time, we pass an already-\n # instantiated layer object.\n\n xformer = ValidatedTransformerLayer(**hidden_cfg)\n\n test_network = encoder_scaffold.EncoderScaffold(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n hidden_cls=xformer,\n embedding_cfg=embedding_cfg)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n # If call_list[0] exists and is True, the passed layer class was\n # called as part of the graph creation.\n self.assertNotEmpty(call_list)\n self.assertTrue(call_list[0], \"The passed layer class wasn't instantiated.\")\n\n @parameterized.parameters(True, False)\n def test_serialize_deserialize(self, use_hidden_cls_instance):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n\n embedding_cfg = {\n \"vocab_size\": vocab_size,\n \"type_vocab_size\": num_types,\n \"hidden_size\": hidden_size,\n \"seq_length\": sequence_length,\n \"max_seq_length\": sequence_length,\n \"initializer\": tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"dropout_rate\": 0.1,\n }\n\n call_list = []\n hidden_cfg = {\n \"num_attention_heads\":\n 2,\n \"intermediate_size\":\n 3072,\n \"intermediate_activation\":\n activations.gelu,\n \"dropout_rate\":\n 0.1,\n \"attention_dropout_rate\":\n 0.1,\n \"kernel_initializer\":\n tf.keras.initializers.TruncatedNormal(stddev=0.02),\n \"call_list\":\n call_list,\n \"call_class\":\n TestLayer\n }\n # Create a small EncoderScaffold for testing. This time, we pass an already-\n # instantiated layer object.\n kwargs = dict(\n num_hidden_instances=3,\n pooled_output_dim=hidden_size,\n pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=0.02),\n embedding_cfg=embedding_cfg)\n\n if use_hidden_cls_instance:\n xformer = ValidatedTransformerLayer(**hidden_cfg)\n test_network = encoder_scaffold.EncoderScaffold(\n hidden_cls=xformer, **kwargs)\n else:\n test_network = encoder_scaffold.EncoderScaffold(\n hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, **kwargs)\n\n # Create another network object from the first object's config.\n new_network = encoder_scaffold.EncoderScaffold.from_config(\n test_network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(test_network.get_config(), new_network.get_config())\n\n # Create a model based off of the old and new networks:\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n\n data, pooled = new_network([word_ids, mask, type_ids])\n new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n data, pooled = test_network([word_ids, mask, type_ids])\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Copy the weights between models.\n new_model.set_weights(model.get_weights())\n\n # Invoke the models.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n data, cls = model.predict([word_id_data, mask_data, type_id_data])\n new_data, new_cls = new_model.predict(\n [word_id_data, mask_data, type_id_data])\n\n # The output should be equal.\n self.assertAllEqual(data, new_data)\n self.assertAllEqual(cls, new_cls)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Clusters DELF features using the K-means algorithm.\n\nAll DELF local feature descriptors for a given dataset's index images are loaded\nas the input.\n\nNote that:\n- we only use features extracted from whole images (no features from boxes are\n used).\n- the codebook should be trained on Paris images for Oxford retrieval\n experiments, and vice-versa.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport time\n\nfrom absl import app\nimport numpy as np\nimport tensorflow as tf\n\nfrom delf import feature_io\nfrom delf.python.datasets.revisited_op import dataset\n\ncmd_args = None\n\n# Extensions.\n_DELF_EXTENSION = '.delf'\n\n# Default DELF dimensionality.\n_DELF_DIM = 128\n\n# Pace to report log when collecting features.\n_STATUS_CHECK_ITERATIONS = 100\n\n\nclass _IteratorInitHook(tf.estimator.SessionRunHook):\n \"\"\"Hook to initialize data iterator after session is created.\"\"\"\n\n def __init__(self):\n super(_IteratorInitHook, self).__init__()\n self.iterator_initializer_fn = None\n\n def after_create_session(self, session, coord):\n \"\"\"Initialize the iterator after the session has been created.\"\"\"\n del coord\n self.iterator_initializer_fn(session)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise RuntimeError('Too many command-line arguments.')\n\n # Process output directory.\n if tf.io.gfile.exists(cmd_args.output_cluster_dir):\n raise RuntimeError(\n 'output_cluster_dir = %s already exists. This may indicate that a '\n 'previous run already wrote checkpoints in this directory, which would '\n 'lead to incorrect training. Please re-run this script by specifying an'\n ' inexisting directory.' % cmd_args.output_cluster_dir)\n else:\n tf.io.gfile.makedirs(cmd_args.output_cluster_dir)\n\n # Read list of index images from dataset file.\n print('Reading list of index images from dataset file...')\n _, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path)\n num_images = len(index_list)\n print('done! Found %d images' % num_images)\n\n # Loop over list of index images and collect DELF features.\n features_for_clustering = []\n start = time.clock()\n print('Starting to collect features from index images...')\n for i in range(num_images):\n if i > 0 and i % _STATUS_CHECK_ITERATIONS == 0:\n elapsed = (time.clock() - start)\n print('Processing index image %d out of %d, last %d '\n 'images took %f seconds' %\n (i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))\n start = time.clock()\n\n features_filename = index_list[i] + _DELF_EXTENSION\n features_fullpath = os.path.join(cmd_args.features_dir, features_filename)\n _, _, features, _, _ = feature_io.ReadFromFile(features_fullpath)\n if features.size != 0:\n assert features.shape[1] == _DELF_DIM\n for feature in features:\n features_for_clustering.append(feature)\n\n features_for_clustering = np.array(features_for_clustering, dtype=np.float32)\n print('All features were loaded! There are %d features, each with %d '\n 'dimensions' %\n (features_for_clustering.shape[0], features_for_clustering.shape[1]))\n\n # Run K-means clustering.\n def _get_input_fn():\n \"\"\"Helper function to create input function and hook for training.\n\n Returns:\n input_fn: Input function for k-means Estimator training.\n init_hook: Hook used to load data during training.\n \"\"\"\n init_hook = _IteratorInitHook()\n\n def _input_fn():\n \"\"\"Produces tf.data.Dataset object for k-means training.\n\n Returns:\n Tensor with the data for training.\n \"\"\"\n features_placeholder = tf.compat.v1.placeholder(\n tf.float32, features_for_clustering.shape)\n delf_dataset = tf.data.Dataset.from_tensor_slices((features_placeholder))\n delf_dataset = delf_dataset.shuffle(1000).batch(\n features_for_clustering.shape[0])\n iterator = tf.compat.v1.data.make_initializable_iterator(delf_dataset)\n\n def _initializer_fn(sess):\n \"\"\"Initialize dataset iterator, feed in the data.\"\"\"\n sess.run(\n iterator.initializer,\n feed_dict={features_placeholder: features_for_clustering})\n\n init_hook.iterator_initializer_fn = _initializer_fn\n return iterator.get_next()\n\n return _input_fn, init_hook\n\n input_fn, init_hook = _get_input_fn()\n\n kmeans = tf.compat.v1.estimator.experimental.KMeans(\n num_clusters=cmd_args.num_clusters,\n model_dir=cmd_args.output_cluster_dir,\n use_mini_batch=False,\n )\n\n print('Starting K-means clustering...')\n start = time.clock()\n for i in range(cmd_args.num_iterations):\n kmeans.train(input_fn, hooks=[init_hook])\n average_sum_squared_error = kmeans.evaluate(\n input_fn, hooks=[init_hook])['score'] / features_for_clustering.shape[0]\n elapsed = (time.clock() - start)\n print('K-means iteration %d (out of %d) took %f seconds, '\n 'average-sum-of-squares: %f' %\n (i, cmd_args.num_iterations, elapsed, average_sum_squared_error))\n start = time.clock()\n\n print('K-means clustering finished!')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--dataset_file_path',\n type=str,\n default='/tmp/gnd_roxford5k.mat',\n help=\"\"\"\n Dataset file for Revisited Oxford or Paris dataset, in .mat format. The\n list of index images loaded from this file is used to collect local\n features, which are assumed to be in <image_name>.delf file format.\n \"\"\")\n parser.add_argument(\n '--features_dir',\n type=str,\n default='/tmp/features',\n help=\"\"\"\n Directory where DELF feature files are to be found.\n \"\"\")\n parser.add_argument(\n '--num_clusters',\n type=int,\n default=1024,\n help=\"\"\"\n Number of clusters to use.\n \"\"\")\n parser.add_argument(\n '--num_iterations',\n type=int,\n default=50,\n help=\"\"\"\n Number of iterations to use.\n \"\"\")\n parser.add_argument(\n '--output_cluster_dir',\n type=str,\n default='/tmp/cluster',\n help=\"\"\"\n Directory where clustering outputs are written to. This directory should\n not exist before running this script; it will be created during\n clustering.\n \"\"\")\n cmd_args, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.keras.Model",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.layers.Input",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.utils.get_registered_name",
"numpy.random.randint",
"tensorflow.test.main",
"tensorflow.keras.Input"
],
[
"tensorflow.io.gfile.exists",
"tensorflow.compat.v1.placeholder",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v1.data.make_initializable_iterator",
"tensorflow.compat.v1.estimator.experimental.KMeans",
"numpy.array",
"tensorflow.data.Dataset.from_tensor_slices"
]
] |
emcoglab/sensorimotor-distance-paper-2021 | [
"94464bb391ea42ffad8bcef6b087c1343ecbe2c7"
] | [
"exclusivity_correlation.py"
] | [
"\"\"\"\n===========================\nComputes the correlation between pairwise distances and mean exclusivity ratings for randomly drawn pairs of norms.\n===========================\n\nDr. Cai Wingfield\n---------------------------\nEmbodied Cognition Lab\nDepartment of Psychology\nUniversity of Lancaster\[email protected]\ncaiwingfield.net\n---------------------------\n2022\n---------------------------\n\"\"\"\n\nfrom numpy import corrcoef, zeros\nfrom numpy.random import default_rng, seed\n\nfrom linguistic_distributional_models.utils.logging import print_progress\nfrom linguistic_distributional_models.utils.maths import DistanceType, distance\nfrom sensorimotor_norms.sensorimotor_norms import SensorimotorNorms, DataColNames\n\nsn = SensorimotorNorms(use_breng_translation=False, verbose=True)\n\n\ndef exclusivity_correlation(n_draws: int):\n rng = default_rng()\n\n all_words = list(sn.iter_words())\n random_words = rng.choice(all_words, 2 * n_draws, replace=True)\n first_words = random_words[:n_draws]\n second_words = random_words[n_draws:]\n\n distances = zeros((n_draws,)) # Preallocate vectors to be correlated\n mean_exclusivities = zeros((n_draws,))\n for i in range(n_draws):\n w1, w2 = first_words[i], second_words[i]\n v1, v2 = sn.sensorimotor_vector_for_word(w1), sn.sensorimotor_vector_for_word(w2)\n e1, e2 = sn.stat_for_word(w1, DataColNames.exclusivity_sensorimotor), sn.stat_for_word(w2, DataColNames.exclusivity_sensorimotor)\n\n # For the pair\n distances[i] = distance(v1, v2, DistanceType.cosine) # vector distance\n mean_exclusivities[i] = (e1 + e2) / 2 # mean exclusivity\n\n print_progress(i + 1, n_draws)\n\n return corrcoef(distances, mean_exclusivities)\n\n\nif __name__ == \"__main__\":\n seed(451)\n correlation = exclusivity_correlation(n_draws=10_000)\n print(correlation)\n"
] | [
[
"numpy.random.seed",
"numpy.random.default_rng",
"numpy.zeros",
"numpy.corrcoef"
]
] |
AnastasiaaSenina/openvino_training_extensions | [
"267425d64372dff5b9083dc0ca6abfc305a71449"
] | [
"pytorch_toolkit/action_recognition/action_recognition/models/multi_frame_baseline.py"
] | [
"from torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom ..utils import get_fine_tuning_parameters\nfrom .backbone import make_encoder\nfrom .modules import squash_dims, unsquash_dim\n\n\nclass MultiFrameBaseline(nn.Module):\n \"\"\"Simple baseline that runs a classifier on each frame independently and averages logits.\"\"\"\n\n def __init__(self, sample_duration, encoder='resnet34', n_classes=400, input_size=224, pretrained=True,\n input_channels=3):\n \"\"\"Average prediction over multiple frames\"\"\"\n super().__init__()\n\n # backbone\n encoder = make_encoder(encoder, input_size=input_size, input_channels=input_channels, pretrained=pretrained)\n self.resnet = encoder.features # name is kept for compatibility with older checkpoints\n self.last_feature_size = encoder.features_shape[1]\n self.fc = nn.Linear(encoder.features_shape[0], n_classes)\n self.dropout = nn.Dropout2d(0.5)\n\n self.sequence_size = sample_duration\n self.init_weights()\n\n def init_weights(self):\n \"\"\"Initialize the weights.\"\"\"\n self.fc.weight.data.normal_(0.0, 0.02)\n self.fc.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\"Extract the image feature vectors.\"\"\"\n # (B x T x C x H x W) -> (B*T x C x H x W)\n images = squash_dims(images, (0, 1))\n\n features = self.resnet(images)\n # features = self.dropout(features)\n\n features = F.avg_pool2d(features, self.last_feature_size) # (B*T) x C\n features = unsquash_dim(features, 0, (-1, self.sequence_size))\n ys = self.fc(features.squeeze(-1).squeeze(-1))\n\n return ys.mean(1)\n\n def trainable_parameters(self):\n param_groups = [\n ('trainable', {'re': r''}),\n ]\n\n return get_fine_tuning_parameters(self, param_groups)\n"
] | [
[
"torch.nn.functional.avg_pool2d",
"torch.nn.Linear",
"torch.nn.Dropout2d"
]
] |
KazukiOnodera/Microsoft-Malware-Prediction | [
"103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80"
] | [
"py/trash/005-2_agg_each_lgb_1.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 5 22:33:48 2019\n\n@author: Kazuki\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os, gc\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count\n\nfrom sklearn.metrics import roc_auc_score\n\nimport utils , utils_cat\nutils.start(__file__)\n#==============================================================================\n\nSEED = np.random.randint(9999)\nprint('SEED:', SEED)\n\n\nDROP = [\n# 'f002_EngineVersion', 'f002_AvSigVersion', 'f002_AppVersion',\n# \n# 'f003_AvSigVersion', 'f003_OsBuildLab', 'f003_Census_OSVersion',\n# 'f003_date_min', 'f003_date_max'\n ]\n\nNFOLD = 5\n\nLOOP = 1\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n \n 'learning_rate': 0.05,\n 'max_depth': -1,\n 'num_leaves': 2**6 -1,\n 'max_bin': 127,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.7,\n# 'nthread': 32,\n 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n }\n\nNROUND = 500\nESR = 50\nVERBOSE_EVAL = 25\n\nTRAIN_TH = 0.6\nVALID_TH = 0.8\n\noutpath_tr = '../data/train_f005_1.f'\noutpath_te = '../data/test_f005_1.f'\n\n# =============================================================================\n# load\n# =============================================================================\n\nfiles_tr = sorted(glob('../data/f005/train_f005*.f'))[20:40]\n\n\n[print(i,f) for i,f in enumerate(files_tr)]\n\nX_train = pd.concat([\n pd.read_feather(f).sample(frac=0.5, random_state=SEED) for f in tqdm(files_tr, mininterval=60)\n ], axis=1)\ny_train = utils.load_target().sample(frac=0.5, random_state=SEED)['HasDetections']\n\nif len(DROP)>0:\n X_train.drop(DROP, axis=1, inplace=True)\n\n\n#adv = pd.read_csv('../data/oof_802_adv.py.csv').iloc[:8921483].oof\n#adv_th = adv.quantile(VALID_TH)\n#\n#X_valid = X_train[adv>adv.quantile(VALID_TH)]\n#y_valid = y_train[adv>adv.quantile(VALID_TH)]\n#\n#X_train = X_train[adv<=adv.quantile(TRAIN_TH)]\n#y_train = y_train[adv<=adv.quantile(TRAIN_TH)]\n\nif X_train.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X_train.shape {X_train.shape}')\n#print(f'X_valid.shape {X_valid.shape}')\n\ngc.collect()\n\nCAT = list( set(X_train.columns)&set(utils_cat.ALL))\nprint(f'CAT: {CAT}')\n\n\n# =============================================================================\n# hold out\n# =============================================================================\n\n\ndtrain = lgb.Dataset(X_train, y_train.values, \n categorical_feature=CAT, \n free_raw_data=False)\n\n#dvalid = lgb.Dataset(X_valid, y_valid.values, \n# categorical_feature=CAT, \n# free_raw_data=False)\ngc.collect()\n\n\n\n\nmodel = lgb.train(params=param, train_set=dtrain, num_boost_round=NROUND, \n# valid_sets=[dtrain, dvalid], \n# valid_names=['train','valid'], \n# feval=ex.eval_auc,\n categorical_feature=CAT, \n# early_stopping_rounds=ESR,\n verbose_eval=VERBOSE_EVAL)\n\n\n\nimp = ex.getImp(model)\nimp['split'] /= imp['split'].max()\nimp['gain'] /= imp['gain'].max()\nimp['total'] = imp['split'] + imp['gain']\nimp.sort_values('total', ascending=False, inplace=True)\nimp.reset_index(drop=True, inplace=True)\n\n\nimp.to_csv(f'LOG/imp_{__file__}.csv', index=False)\n\n\n# =============================================================================\n# \n# =============================================================================\n\nimp = pd.read_csv('LOG/imp_005-2_agg_each_lgb_1.py.csv')\nCOL = imp.head(30).feature.tolist()\n\nX_train = pd.concat([\n pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)\n ], axis=1)[COL]\n\nX_train.to_feather(outpath_tr)\n\n\n\nfiles_te = sorted(glob('../data/f005/test_f005*.f'))[20:40]\n\nX_test = pd.concat([\n pd.read_feather(f) for f in tqdm(files_te, mininterval=60)\n ], axis=1)[COL]\n\nX_test.to_feather(outpath_te)\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n"
] | [
[
"pandas.read_csv",
"pandas.read_feather",
"numpy.random.randint"
]
] |
jdavidrcamacho/tedi | [
"f963e781e0a3c7be3df338a85a08ab974b6b8019"
] | [
"tedi/kernels.py"
] | [
"\"\"\"\nCovariance functions\n\"\"\"\nimport numpy as np\n#because it makes life easier down the line\npi, exp, sine, cosine, sqrt = np.pi, np.exp, np.sin, np.cos, np.sqrt\n__all__ = ['Constant', 'WhiteNoise', 'SquaredExponential' , 'Periodic', \n 'QuasiPeriodic', 'RationalQuadratic', 'Cosine', 'Exponential',\n 'Matern32', 'Matern52', 'RQP']\n\n\nclass kernel(object):\n \"\"\"\n Definition the kernels that will be used. To simplify my life all the\n kernels defined are the sum of kernel + white noise\n \"\"\"\n def __init__(self, *args):\n \"\"\" Puts all kernel arguments in an array pars. \"\"\"\n self.pars = np.array(args, dtype=float)\n def __call__(self, r):\n \"\"\" r = t - t' \"\"\"\n raise NotImplementedError\n def __repr__(self):\n \"\"\" Representation of each kernel instance \"\"\"\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(str, self.pars)))\n def __add__(self, b):\n return Sum(self, b)\n def __radd__(self, b):\n return self.__add__(b)\n\n def __mul__(self, b):\n return Multiplication(self, b)\n def __rmul__(self, b):\n return self.__mul__(b)\n\n\nclass _operator(kernel):\n \"\"\" To allow operations between two kernels \"\"\"\n def __init__(self, k1, k2):\n self.k1 = k1\n self.k2 = k2\n self.kerneltype = 'complex'\n @property\n def pars(self):\n return np.append(self.k1.pars, self.k2.pars)\n\n\nclass Sum(_operator):\n \"\"\" To allow the sum of kernels \"\"\"\n def __repr__(self):\n return \"{0} + {1}\".format(self.k1, self.k2)\n def __call__(self, r):\n return self.k1(r) + self.k2(r)\n\n\nclass Multiplication(_operator):\n \"\"\" To allow the multiplication of kernels \"\"\"\n def __repr__(self):\n return \"{0} * {1}\".format(self.k1, self.k2)\n def __call__(self, r):\n return self.k1(r) * self.k2(r)\n\n\n##### Constant kernel #########################################################\nclass Constant(kernel):\n \"\"\"\n This kernel returns its constant argument c \n \n Parameters\n ----------\n c: float\n Constant\n \"\"\"\n def __init__(self, c):\n super(Constant, self).__init__(c)\n self.c = c\n self.params_number = 1 #number of hyperparameters\n def __call__(self, r):\n return self.c**2 * np.ones_like(r)\n\n\n##### White noise kernel ######################################################\nclass WhiteNoise(kernel):\n \"\"\"\n Definition of the white noise kernel.\n \n Parameters\n ----------\n wn: float\n White noise amplitude\n \"\"\"\n def __init__(self, wn):\n super(WhiteNoise, self).__init__(wn)\n self.wn = wn\n self.type = 'stationary'\n self.derivatives = 1 #number of derivatives in this kernel\n self.params_number = 1 #number of hyperparameters\n def __call__(self, r):\n# return self.wn**2 * np.identity(len(r))\n return self.wn**2 * np.diag(np.diag(np.ones_like(r)))\n\n\n##### Squared exponential kernel ##############################################\nclass SquaredExponential(kernel):\n \"\"\"\n Squared Exponential kernel, also known as radial basis function or RBF \n kernel in other works.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Length-scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(SquaredExponential, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * exp(-0.5 * r**2 / self.ell**2)\n\n\n##### Periodic kernel #########################################################\nclass Periodic(kernel):\n \"\"\"\n Definition of the periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Lenght scale\n P: float\n Period\n \"\"\"\n def __init__(self, amplitude, P, ell):\n super(Periodic, self).__init__(amplitude, P, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 3 #number of hyperparameters\n def __call__(self, r):\n return self.amplitude**2*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell**2)\n\n\n##### Quasi periodic kernel ###################################################\nclass QuasiPeriodic(kernel):\n \"\"\"\n This kernel is the product between the periodic and the squared \n exponential kernels. \n It is commonly known as the quasi-periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Evolutionary time scale\n ell_p: float\n Length scale of the periodic component\n P: float\n Kernel periodicity\n \"\"\"\n def __init__(self, amplitude, ell_e, P, ell_p):\n super(QuasiPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 4\n def __call__(self, r):\n return self.amplitude**2 *exp(- 2*sine(pi*np.abs(r)/self.P)**2 \\\n /self.ell_p**2 - r**2/(2*self.ell_e**2))\n\n\n##### Rational quadratic kernel ###############################################\nclass RationalQuadratic(kernel):\n \"\"\"\n Definition of the rational quadratic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha: float\n Amplitude of large and small scale variations\n ell: float\n Characteristic lenght scale to define the kernel \"smoothness\"\n \"\"\"\n def __init__(self, amplitude, alpha, ell):\n super(RationalQuadratic, self).__init__(amplitude, alpha, ell)\n self.amplitude = amplitude\n self.alpha = alpha\n self.ell = ell\n self.params_number = 3\n def __call__(self, r):\n return self.amplitude**2*(1+0.5*r**2/(self.alpha*self.ell**2))**(-self.alpha)\n\n\n##### Cosine kernel ###########################################################\nclass Cosine(kernel):\n \"\"\"\n Definition of the cosine kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n \"\"\"\n def __init__(self, amplitude, P):\n super(Cosine, self).__init__(amplitude, P)\n self.amplitude = amplitude\n self.P = P\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * cosine(2*pi*np.abs(r) / self.P)\n\n\n##### Exponential kernel ######################################################\nclass Exponential(kernel):\n \"\"\"\n Definition of the exponential kernel. This kernel arises when setting v=1/2\n in the matern family of kernels\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Exponential, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * exp(- np.abs(r)/self.ell)\n\n\n##### Matern 3/2 kernel #######################################################\nclass Matern32(kernel):\n \"\"\"\n Definition of the Matern 3/2 kernel. This kernel arise when setting v=3/2 \n in the matern family of kernels\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Matern32, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 *(1 + np.sqrt(3)*np.abs(r)/self.ell) \\\n *np.exp(-np.sqrt(3)*np.abs(r) / self.ell)\n\n\n#### Matern 5/2 kernel ########################################################\nclass Matern52(kernel):\n \"\"\"\n Definition of the Matern 5/2 kernel. This kernel arise when setting v=5/2 \n in the matern family of kernels\n\n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell: float\n Characteristic lenght scale\n \"\"\"\n def __init__(self, amplitude, ell):\n super(Matern52, self).__init__(amplitude, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.params_number = 2\n def __call__(self, r):\n return self.amplitude**2 * (1 + (3*np.sqrt(5)*self.ell*np.abs(r) \\\n +5*np.abs(r)**2)/(3*self.ell**2) ) \\\n *exp(-np.sqrt(5.0)*np.abs(r)/self.ell)\n\n\n##### RQP kernel ##############################################################\nclass RQP(kernel):\n \"\"\"\n Definition of the product between the periodic kernel and the rational \n quadratic kernel that we called RQP kernel.\n \n Info: Tests show that, if alpha goes to infinity, the RQP tends to the quasi\n periodic kernel, if alpha goes to zero it tends to the periodic kernel.\n There is a goldilocks region of alpha where this kernel is much better \n than the quasi periodic kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e and ell_p: float\n Aperiodic and periodic lenght scales\n alpha: float\n alpha of the rational quadratic kernel\n P: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, amplitude, alpha, ell_e, P, ell_p):\n super(RQP, self).__init__(amplitude, alpha, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha = alpha\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5\n def __call__(self, r):\n a = exp(- 2*sine(pi*np.abs(r)/self.P)**2 / self.ell_p**2)\n b = (1+ r**2/ (2*self.alpha*self.ell_e**2))#**self.alpha\n return self.amplitude**2 * a / (np.sign(b) * (np.abs(b)) ** self.alpha)\n\n\n##### Paciorek's kernel #######################################################\nclass Paciorek(kernel):\n \"\"\"\n Definition of the modified Paciorek's kernel (stationary version). \n \n Parameters\n ----------\n amplitude: float\n Amplitude/amplitude of the kernel\n ell_1: float\n First lenght scale\n ell_2: float\n Second lenght scale\n \"\"\"\n def __init__(self, amplitude, ell_1, ell_2):\n super(Paciorek, self).__init__(amplitude, ell_1, ell_2)\n self.amplitude = amplitude\n self.ell_1 = ell_1\n self.ell_2 = ell_2\n self.params_number = 3\n def __call__(self, r):\n a = sqrt(2*self.ell_1*self.ell_2 / (self.ell_1**2+self.ell_2**2))\n b = exp(-2*r*r / (self.ell_1**2+self.ell_2**2))\n return self.amplitude**2 * a *b\n\n\n###############################################################################\nclass PiecewiseSE(kernel):\n \"\"\"\n Product of the Squared Exponential and Piecewice kernels\n \n Parameters\n ----------\n eta1: float\n Amplitude of the kernel\n eta2: float\n Aperiodic lenght scale\n eta3: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, eta1, eta2, eta3):\n super(PiecewiseSE, self).__init__(eta1, eta2, eta3)\n self.eta1 = eta1\n self.eta2 = eta2\n self.eta3 = eta3\n self.params_number = 3\n def __call__(self, r):\n SE_term = self.eta1**2 * exp(-0.5 * r**2 / self.eta2**2)\n r = r/(0.5*self.eta3)\n piecewise = (3*np.abs(r) +1) * (1 - np.abs(r))**3\n piecewise = np.where(np.abs(r)>1, 0, piecewise)\n k = SE_term*piecewise\n return k\n\n\n###############################################################################\nclass PiecewiseRQ(kernel):\n \"\"\"\n Product of the Rational Quadratic and Piecewice kernels\n \n Parameters\n ----------\n eta1: float\n Amplitude of the kernel\n alpha: float\n alpha of the rational quadratic kernel\n eta2: float\n Aperiodic lenght scale\n eta3: float\n Periodic repetitions of the kernel\n \"\"\"\n def __init__(self, eta1, alpha, eta2, eta3):\n super(PiecewiseRQ, self).__init__(eta1, alpha, eta2, eta3)\n self.eta1 = eta1\n self.alpha = alpha\n self.eta2 = eta2\n self.eta3 = eta3\n self.params_number = 3\n def __call__(self, r):\n RQ_term = self.eta1**2 * (1+0.5*r**2/(self.alpha*self.eta2**2))**(-self.alpha)\n r = r/(0.5*self.eta3)\n piecewise = (3*np.abs(r) +1) * (1 - np.abs(r))**3\n piecewise = np.where(np.abs(r)>1, 0, piecewise)\n k = RQ_term*piecewise\n return k\n\n\n##### New periodic kernel ######################################################\nclass NewPeriodic(kernel):\n \"\"\"\n Definition of a new periodic kernel derived from mapping the rational \n quadratic kernel to the 2D space u(x) = (cos x, sin x)\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha2: float\n Alpha parameter of the rational quadratic mapping\n P: float\n Period\n l: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, amplitude, alpha2, P, l):\n super(NewPeriodic, self).__init__(amplitude, alpha2, P, l)\n self.amplitude = amplitude\n self.alpha2 = alpha2\n self.P = P\n self.l = l\n self.params_number = 4\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.l**2))**(-self.alpha2)\n return self.amplitude**2 * a\n\n\n##### New periodic kernel ######################################################\nclass QuasiNewPeriodic(kernel):\n \"\"\"\n Definition of a new quasi-periodic kernel. Derived from mapping the rational\n quadratic kernel to the 2D space u(x) = (cos x, sin x) and multiplying it by\n a squared exponential kernel\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha2: float\n Alpha parameter of the rational quadratic mapping\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, amplitude, alpha2, ell_e, P, ell_p):\n super(QuasiNewPeriodic, self).__init__(amplitude, alpha2, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha2 = alpha2\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.ell_p**2))**(-self.alpha2)\n b = exp(-0.5 * r**2 / self.ell_e**2)\n return self.amplitude**2 * a * b\n\n\nclass NewRQP(kernel):\n \"\"\"\n Definition of a new quasi-periodic kernel. Derived from mapping the rational\n quadratic kernel to the 2D space u(x) = (cos x, sin x) and multiplying it by\n a rational quadratic kernel\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n alpha1: float\n Alpha parameter of the rational quadratic kernel\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n alpha2: float\n Another alpha parameter from the mapping \n \"\"\"\n def __init__(self, amplitude, alpha1, alpha2, ell_e, P, ell_p):\n super(NewRQP, self).__init__(amplitude, alpha1, alpha2,\n ell_e, P, ell_p)\n self.amplitude = amplitude\n self.alpha1 = alpha1\n self.alpha2 = alpha2\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r):\n a = (1 + 2*sine(pi*np.abs(r)/self.P)**2/(self.alpha2*self.ell_p**2))**(-self.alpha2)\n b = (1+ 0.5*r**2/ (self.alpha1*self.ell_e**2))**(-self.alpha1)\n return self.amplitude**2 * a * b\n\n\n##### New periodic kernel ######################################################\nclass HarmonicPeriodic(kernel):\n \"\"\"\n Definition of a periodic kernel that models a periodic signal\n with a N number of harmonics\n \n Parameters\n ----------\n N: int\n Number of harmonics\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n ell: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, N, amplitude, P, ell):\n super(HarmonicPeriodic, self).__init__(N, amplitude, P, ell)\n self.N = N\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 4 #number of hyperparameters\n def __call__(self, r, s):\n # r = np.abs(r)\n # s = np.abs(s)\n first = sine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n second = sine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n firstPart = (first - second)**2\n first = 0.5/np.tan(pi*r/self.P)\n second = cosine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n third = 0.5/np.tan(pi*s/self.P)\n fourth = cosine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n secondPart = (first-second-third+fourth)**2\n return self.amplitude**2*exp(-0.5*(firstPart + secondPart)/self.ell**2)\n\n\n##### New quasi-periodic kernel ################################################\nclass QuasiHarmonicPeriodic(kernel):\n \"\"\"\n Definition of a quasi-periodic kernel that models a periodic signals \n with a N number of harmonics\n \n Parameters\n ----------\n N: int\n Number of harmonics\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Aperiodic lenght scale\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n \"\"\"\n def __init__(self, N, amplitude, ell_e, P, ell_p):\n super(QuasiHarmonicPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.N = N\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 5 #number of hyperparameters\n def __call__(self, r, s):\n first = sine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n second = sine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n firstPart = (first - second)**2\n first = 0.5/np.tan(pi*r/self.P)\n second = cosine((self.N+0.5)*2*pi*r/self.P) / 2*sine(pi*r/self.P)\n third = 0.5/np.tan(pi*s/self.P)\n fourth = cosine((self.N+0.5)*2*pi*s/self.P) / 2*sine(pi*s/self.P)\n secondPart = (first-second-third+fourth)**2\n a = exp(-0.5*(firstPart + secondPart)/self.ell_p**2)\n b = exp(-0.5 * (r-s)**2 / self.ell_e**2)\n return self.amplitude**2 * a * b\n\n\n##### New quasi-periodic kernel ################################################\nclass CosPeriodic(kernel):\n \"\"\"\n Periodic kernel derived by mapping the squared exponential kernel into thw\n 2D space u(t) = [cos(t + phi), sin(t + phi)]\n \n SPOILER ALERT: If you do the math the phi terms disappear \n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n P: float\n Period\n ell_p: float\n Periodic lenght scale\n phi: float\n Phase\n \"\"\"\n def __init__(self, amplitude, P, ell):\n super(CosPeriodic, self).__init__(P, ell)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.params_number = 3 #number of hyperparameters\n def __call__(self, r):\n return self.amplitude**2*exp(-2*cosine(pi*np.abs(r)/self.P)**2/self.ell**2)\n\n\n##### New quasi-periodic kernel ################################################\nclass QuasiCosPeriodic(kernel):\n \"\"\"\n This kernel is the product between the cosPeriodic kernel \n and the squared exponential kernel, it is just another the quasi-periodic \n kernel.\n \n Parameters\n ----------\n amplitude: float\n Amplitude of the kernel\n ell_e: float\n Evolutionary time scale\n ell_p: float\n Length scale of the periodic component\n P: float\n Kernel periodicity\n \"\"\"\n def __init__(self, amplitude, ell_e, P, ell_p):\n super(QuasiCosPeriodic, self).__init__(amplitude, ell_e, P, ell_p)\n self.amplitude = amplitude\n self.ell_e = ell_e\n self.P = P\n self.ell_p = ell_p\n self.params_number = 4\n def __call__(self, r):\n return self.amplitude**2 *exp(- 2*cosine(pi*np.abs(r)/self.P)**2 \\\n /self.ell_p**2 - r**2/(2*self.ell_e**2))\n\n\n### END\n\n##### New periodic kernel ######################################################\nclass unknown(kernel):\n \"\"\"\n \n Parameters\n ----------\n \n \"\"\"\n def __init__(self, amplitude, P, ell, phi):\n super(unknown, self).__init__(amplitude, P, ell, phi)\n self.amplitude = amplitude\n self.ell = ell\n self.P = P\n self.phi = phi\n self.params_number = 4 #number of hyperparameters\n def __call__(self, r, s):\n # r = np.abs(r)\n # s = np.abs(s)\n first = sine(2*pi*r/self.P - self.phi) - sine(2*pi*s/self.P - self.phi)\n second = sine(2*pi*r/self.P + self.phi) - sine(2*pi*s/self.P - self.phi)\n firstPart = first**2 + second**2\n return self.amplitude**2*exp(-0.5*(firstPart)/self.ell**2)\n"
] | [
[
"numpy.sqrt",
"numpy.sign",
"numpy.append",
"numpy.ones_like",
"numpy.abs",
"numpy.tan",
"numpy.array"
]
] |
aynetdia/flair | [
"7e0958423ceb9744a87b0c27fd66f7be4caf0d99"
] | [
"flair/embeddings/document.py"
] | [
"from abc import abstractmethod\nimport logging\nfrom typing import List, Union\n\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom transformers import AutoTokenizer, AutoConfig, AutoModel, CONFIG_MAPPING, PreTrainedTokenizer\n\nimport flair\nfrom flair.data import Sentence\nfrom flair.embeddings.base import Embeddings, ScalarMix\nfrom flair.embeddings.token import TokenEmbeddings, StackedEmbeddings, FlairEmbeddings\nfrom flair.nn import LockedDropout, WordDropout\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nlog = logging.getLogger(\"flair\")\n\n\nclass DocumentEmbeddings(Embeddings):\n \"\"\"Abstract base class for all document-level embeddings. Every new type of document embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"sentence-level\"\n\n\nclass TransformerDocumentEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n model: str = \"bert-base-uncased\",\n fine_tune: bool = True,\n batch_size: int = 1,\n layers: str = \"-1\",\n layer_mean: bool = False,\n **kwargs\n ):\n \"\"\"\n Bidirectional transformer embeddings of words from various transformer architectures.\n :param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for\n options)\n :param fine_tune: If True, allows transformers to be fine-tuned during training\n :param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer\n models tend to be huge.\n :param layers: string indicating which layers to take for embedding (-1 is topmost layer)\n :param layer_mean: If True, uses a scalar mix of layers as embedding\n \"\"\"\n super().__init__()\n\n # temporary fix to disable tokenizer parallelism warning\n # (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)\n import os\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n # load tokenizer and transformer model\n self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model, **kwargs)\n if not 'config' in kwargs:\n config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)\n self.model = AutoModel.from_pretrained(model, config=config, **kwargs)\n else:\n self.model = AutoModel.from_pretrained(None, **kwargs)\n\n # model name\n self.name = 'transformer-document-' + str(model)\n self.base_model_name = str(model)\n\n # when initializing, embeddings are in eval mode by default\n self.model.eval()\n self.model.to(flair.device)\n\n # embedding parameters\n if layers == 'all':\n # send mini-token through to check how many layers the model has\n hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]\n self.layer_indexes = [int(x) for x in range(len(hidden_states))]\n else:\n self.layer_indexes = [int(x) for x in layers.split(\",\")]\n\n self.layer_mean = layer_mean\n self.fine_tune = fine_tune\n self.static_embeddings = not self.fine_tune\n self.batch_size = batch_size\n\n # check whether CLS is at beginning or end\n self.initial_cls_token: bool = self._has_initial_cls_token(tokenizer=self.tokenizer)\n\n @staticmethod\n def _has_initial_cls_token(tokenizer: PreTrainedTokenizer) -> bool:\n # most models have CLS token as last token (GPT-1, GPT-2, TransfoXL, XLNet, XLM), but BERT is initial\n tokens = tokenizer.encode('a')\n initial_cls_token: bool = False\n if tokens[0] == tokenizer.cls_token_id: initial_cls_token = True\n return initial_cls_token\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences.\"\"\"\n\n # using list comprehension\n sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]\n for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]\n\n for batch in sentence_batches:\n self._add_embeddings_to_sentences(batch)\n\n return sentences\n\n def _add_embeddings_to_sentences(self, sentences: List[Sentence]):\n \"\"\"Extract sentence embedding from CLS token or similar and add to Sentence object.\"\"\"\n\n # gradients are enabled if fine-tuning is enabled\n gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()\n\n with gradient_context:\n\n # first, subtokenize each sentence and find out into how many subtokens each token was divided\n subtokenized_sentences = []\n\n # subtokenize sentences\n for sentence in sentences:\n # tokenize and truncate to max subtokens (TODO: check better truncation strategies)\n subtokenized_sentence = self.tokenizer.encode(sentence.to_tokenized_string(),\n add_special_tokens=True,\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n )\n\n subtokenized_sentences.append(\n torch.tensor(subtokenized_sentence, dtype=torch.long, device=flair.device))\n\n # find longest sentence in batch\n longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))\n\n # initialize batch tensors and mask\n input_ids = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n mask = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n for s_id, sentence in enumerate(subtokenized_sentences):\n sequence_length = len(sentence)\n input_ids[s_id][:sequence_length] = sentence\n mask[s_id][:sequence_length] = torch.ones(sequence_length)\n\n # put encoded batch through transformer model to get all hidden states of all encoder layers\n hidden_states = self.model(input_ids, attention_mask=mask)[-1] if len(sentences) > 1 \\\n else self.model(input_ids)[-1]\n\n # iterate over all subtokenized sentences\n for sentence_idx, (sentence, subtokens) in enumerate(zip(sentences, subtokenized_sentences)):\n\n index_of_CLS_token = 0 if self.initial_cls_token else len(subtokens) - 1\n\n cls_embeddings_all_layers: List[torch.FloatTensor] = \\\n [hidden_states[layer][sentence_idx][index_of_CLS_token] for layer in self.layer_indexes]\n\n # use scalar mix of embeddings if so selected\n if self.layer_mean:\n sm = ScalarMix(mixture_size=len(cls_embeddings_all_layers))\n sm_embeddings = sm(cls_embeddings_all_layers)\n\n cls_embeddings_all_layers = [sm_embeddings]\n\n # set the extracted embedding for the token\n sentence.set_embedding(self.name, torch.cat(cls_embeddings_all_layers))\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return (\n len(self.layer_indexes) * self.model.config.hidden_size\n if not self.layer_mean\n else self.model.config.hidden_size\n )\n\n def __getstate__(self):\n # special handling for serializing transformer models\n config_state_dict = self.model.config.__dict__\n model_state_dict = self.model.state_dict()\n\n if not hasattr(self, \"base_model_name\"): self.base_model_name = self.name.split('transformer-document-')[-1]\n\n # serialize the transformer models and the constructor arguments (but nothing else)\n model_state = {\n \"config_state_dict\": config_state_dict,\n \"model_state_dict\": model_state_dict,\n \"embedding_length_internal\": self.embedding_length,\n\n \"base_model_name\": self.base_model_name,\n \"fine_tune\": self.fine_tune,\n \"batch_size\": self.batch_size,\n \"layer_indexes\": self.layer_indexes,\n \"layer_mean\": self.layer_mean,\n }\n\n return model_state\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n # necessary for reverse compatibility with Flair <= 0.7\n if 'use_scalar_mix' in self.__dict__.keys():\n self.__dict__['layer_mean'] = d['use_scalar_mix']\n\n # special handling for deserializing transformer models\n if \"config_state_dict\" in d:\n\n # load transformer model\n config_class = CONFIG_MAPPING[d[\"config_state_dict\"][\"model_type\"]]\n loaded_config = config_class.from_dict(d[\"config_state_dict\"])\n\n # constructor arguments\n layers = ','.join([str(idx) for idx in self.__dict__['layer_indexes']])\n\n # re-initialize transformer word embeddings with constructor arguments\n embedding = TransformerDocumentEmbeddings(\n model=self.__dict__['base_model_name'],\n fine_tune=self.__dict__['fine_tune'],\n batch_size=self.__dict__['batch_size'],\n layers=layers,\n layer_mean=self.__dict__['layer_mean'],\n\n config=loaded_config,\n state_dict=d[\"model_state_dict\"],\n )\n\n # I have no idea why this is necessary, but otherwise it doesn't work\n for key in embedding.__dict__.keys():\n self.__dict__[key] = embedding.__dict__[key]\n\n else:\n model_name = self.__dict__['name'].split('transformer-document-')[-1]\n # reload tokenizer to get around serialization issues\n try:\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n except:\n pass\n self.tokenizer = tokenizer\n\n\nclass DocumentPoolEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n fine_tune_mode: str = \"none\",\n pooling: str = \"mean\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param fine_tune_mode: if set to \"linear\" a trainable layer is added, if set to\n \"nonlinear\", a nonlinearity is added as well. Set this to make the pooling trainable.\n :param pooling: a string which can any value from ['mean', 'max', 'min']\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n self.__embedding_length = self.embeddings.embedding_length\n\n # optional fine-tuning on top of embedding layer\n self.fine_tune_mode = fine_tune_mode\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n self.embedding_flex = torch.nn.Linear(\n self.embedding_length, self.embedding_length, bias=False\n )\n self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)\n self.embedding_flex_nonlinear_map = torch.nn.Linear(\n self.embedding_length, self.embedding_length\n )\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n if pooling not in ['min', 'max', 'mean']:\n raise ValueError(f\"Pooling operation for {self.mode!r} is not defined\")\n\n self.pooling = pooling\n self.name: str = f\"document_{self.pooling}\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n word_embeddings = self.embedding_flex(word_embeddings)\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n word_embeddings = self.embedding_flex_nonlinear(word_embeddings)\n word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)\n\n if self.pooling == \"mean\":\n pooled_embedding = torch.mean(word_embeddings, 0)\n elif self.pooling == \"max\":\n pooled_embedding, _ = torch.max(word_embeddings, 0)\n elif self.pooling == \"min\":\n pooled_embedding, _ = torch.min(word_embeddings, 0)\n\n sentence.set_embedding(self.name, pooled_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n def extra_repr(self):\n return f\"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}\"\n\n\nclass DocumentTFIDFEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n train_dataset,\n **vectorizer_params,\n ):\n \"\"\"The constructor for DocumentTFIDFEmbeddings.\n :param train_dataset: the train dataset which will be used to construct vectorizer\n :param vectorizer_params: parameters given to Scikit-learn's TfidfVectorizer constructor\n \"\"\"\n super().__init__()\n\n import numpy as np\n self.vectorizer = TfidfVectorizer(dtype=np.float32, **vectorizer_params)\n self.vectorizer.fit([s.to_original_text() for s in train_dataset])\n \n self.__embedding_length: int = len(self.vectorizer.vocabulary_)\n\n self.to(flair.device)\n\n self.name: str = f\"document_tfidf\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n raw_sentences = [s.to_original_text() for s in sentences]\n tfidf_vectors = torch.from_numpy(self.vectorizer.transform(raw_sentences).A)\n \n for sentence_id, sentence in enumerate(sentences):\n sentence.set_embedding(self.name, tfidf_vectors[sentence_id])\n \n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentRNNEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n rnn_type=\"GRU\",\n fine_tune: bool = True,\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the rnn\n :param rnn_layers: the number of layers for the rnn\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the rnn or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n :param rnn_type: 'GRU' or 'LSTM'\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.rnn_type = rnn_type\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.static_embeddings = False if fine_tune else True\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n\n # bidirectional RNN on top of embedding layer\n if rnn_type == \"LSTM\":\n self.rnn = torch.nn.LSTM(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n else:\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n batch_first=True,\n )\n\n self.name = \"document_\" + self.rnn._get_name()\n\n # dropouts\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None\n self.locked_dropout = (\n LockedDropout(locked_dropout) if locked_dropout > 0.0 else None\n )\n self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n self.eval()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n # TODO: remove in future versions\n if not hasattr(self, \"locked_dropout\"):\n self.locked_dropout = None\n if not hasattr(self, \"word_dropout\"):\n self.word_dropout = None\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n # embed words in the sentence\n self.embeddings.embed(sentences)\n\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n longest_token_sequence_in_batch: int = max(lengths)\n\n pre_allocated_zero_tensor = torch.zeros(\n self.embeddings.embedding_length * longest_token_sequence_in_batch,\n dtype=torch.float,\n device=flair.device,\n )\n\n all_embs: List[torch.Tensor] = list()\n for sentence in sentences:\n all_embs += [\n emb for token in sentence for emb in token.get_each_embedding()\n ]\n nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)\n\n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embeddings.embedding_length * nb_padding_tokens\n ]\n all_embs.append(t)\n\n sentence_tensor = torch.cat(all_embs).view(\n [\n len(sentences),\n longest_token_sequence_in_batch,\n self.embeddings.embedding_length,\n ]\n )\n\n # before-RNN dropout\n if self.dropout:\n sentence_tensor = self.dropout(sentence_tensor)\n if self.locked_dropout:\n sentence_tensor = self.locked_dropout(sentence_tensor)\n if self.word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n # reproject if set\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n # push through RNN\n packed = pack_padded_sequence(\n sentence_tensor, lengths, enforce_sorted=False, batch_first=True\n )\n rnn_out, hidden = self.rnn(packed)\n outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)\n\n # after-RNN dropout\n if self.dropout:\n outputs = self.dropout(outputs)\n if self.locked_dropout:\n outputs = self.locked_dropout(outputs)\n\n # extract embeddings from RNN\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[sentence_no, length - 1]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[sentence_no, 0]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n if self.static_embeddings:\n embedding = embedding.detach()\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _apply(self, fn):\n\n # models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute\n # check if this is the case and if so, set it\n for child_module in self.children():\n if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, \"_flat_weights_names\"):\n _flat_weights_names = []\n\n if child_module.__dict__[\"bidirectional\"]:\n num_direction = 2\n else:\n num_direction = 1\n for layer in range(child_module.__dict__[\"num_layers\"]):\n for direction in range(num_direction):\n suffix = \"_reverse\" if direction == 1 else \"\"\n param_names = [\"weight_ih_l{}{}\", \"weight_hh_l{}{}\"]\n if child_module.__dict__[\"bias\"]:\n param_names += [\"bias_ih_l{}{}\", \"bias_hh_l{}{}\"]\n param_names = [\n x.format(layer, suffix) for x in param_names\n ]\n _flat_weights_names.extend(param_names)\n\n setattr(child_module, \"_flat_weights_names\",\n _flat_weights_names)\n\n child_module._apply(fn)\n\n\nclass DocumentLMEmbeddings(DocumentEmbeddings):\n def __init__(self, flair_embeddings: List[FlairEmbeddings]):\n super().__init__()\n\n self.embeddings = flair_embeddings\n self.name = \"document_lm\"\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(flair_embeddings):\n self.add_module(\"lm_embedding_{}\".format(i), embedding)\n if not embedding.static_embeddings:\n self.static_embeddings = False\n\n self._embedding_length: int = sum(\n embedding.embedding_length for embedding in flair_embeddings\n )\n\n @property\n def embedding_length(self) -> int:\n return self._embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n # iterate over sentences\n for sentence in sentences:\n sentence: Sentence = sentence\n\n # if its a forward LM, take last state\n if embedding.is_forward_lm:\n sentence.set_embedding(\n embedding.name,\n sentence[len(sentence) - 1]._embeddings[embedding.name],\n )\n else:\n sentence.set_embedding(\n embedding.name, sentence[0]._embeddings[embedding.name]\n )\n\n return sentences\n\n\nclass SentenceTransformerDocumentEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n model: str = \"bert-base-nli-mean-tokens\",\n batch_size: int = 1,\n convert_to_numpy: bool = False,\n ):\n \"\"\"\n :param model: string name of models from SentencesTransformer Class\n :param name: string name of embedding type which will be set to Sentence object\n :param batch_size: int number of sentences to processed in one batch\n :param convert_to_numpy: bool whether the encode() returns a numpy array or PyTorch tensor\n \"\"\"\n super().__init__()\n\n try:\n from sentence_transformers import SentenceTransformer\n except ModuleNotFoundError:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"sentence-transformers\" is not installed!')\n log.warning(\n 'To use Sentence Transformers, please first install with \"pip install sentence-transformers\"'\n )\n log.warning(\"-\" * 100)\n pass\n\n self.model = SentenceTransformer(model)\n self.name = 'sentence-transformers-' + str(model)\n self.batch_size = batch_size\n self.convert_to_numpy = convert_to_numpy\n self.static_embeddings = True\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]\n for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]\n\n for batch in sentence_batches:\n self._add_embeddings_to_sentences(batch)\n\n return sentences\n\n def _add_embeddings_to_sentences(self, sentences: List[Sentence]):\n\n # convert to plain strings, embedded in a list for the encode function\n sentences_plain_text = [sentence.to_plain_string() for sentence in sentences]\n\n embeddings = self.model.encode(sentences_plain_text, convert_to_numpy=self.convert_to_numpy)\n for sentence, embedding in zip(sentences, embeddings):\n sentence.set_embedding(self.name, embedding)\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return self.model.get_sentence_embedding_dimension()\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.min",
"torch.no_grad",
"torch.nn.GRU",
"torch.enable_grad",
"torch.max",
"torch.eye",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.utils.rnn.pad_packed_sequence",
"sklearn.feature_extraction.text.TfidfVectorizer",
"torch.mean",
"torch.ones",
"torch.nn.LSTM",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.zeros",
"torch.nn.ReLU"
]
] |
Penn-TopGuNN/TopGuNN | [
"e736e467f1991a33c5ee54407665cbd9fef1e521"
] | [
"code/embed_and_filter.py"
] | [
"import numpy as np\nimport torch\nfrom transformers import BertTokenizer, BertModel\nfrom torch.utils.data import DataLoader \nimport util\nfrom util import MaskableList\nfrom collections import defaultdict, Counter\nfrom sentence_transformers import SentenceTransformer\nimport spacy\nimport time\nimport itertools\nfrom itertools import islice\nimport os\nimport argparse\nfrom sklearn.preprocessing import normalize\nfrom sqlitedict import SqliteDict\nimport ast\nimport pickle as pkl\nimport sqlite3\n\nnlp = spacy.load(\"en_core_web_lg\", disable=[\"ner\"]) ## you only need the parser and tagger\n## device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") ##.to(device)\n## NOTE: once debugging is ironed-out remove all print statements, csv file, and time study files, for AWS\n\n'''usage: (if you use embed_and_filter_job_launcher.py) \npython3 -u code/embed_and_filter_job_launcher.py \\\nyou must change the command line arguments inside the embed_and_filter_job_launcher.py file\n'''\n\n'''usage: (if you use embed_and_filter.sh)\npython3 -u code/embed_and_filter.py \\\n-job_id $i \\\n-outDir 'betatest/out/' \\\n-dataDir 'betatest/data/' \\\n-NUM_JOBS 2 \\\n-NUM_GPUS 2 \\\n-PROC_PER_GPU 1 \\\n-gpu_ids 0 1 \\\n-batch_size 175 \\\n-clip_len 225 \\\n-job_slices \"job_slices.pkl\" \\\n-query_sentences 'betatest/data/query_sentences.txt' \\\n-sentences_dict 'sentences.db' \\\n-trace_dict 'trace.db' \\\n-spacy_toks_dict 'spacy_toks.db' \\\n-spacy_pos_dict 'spacy_pos.db' \\\n-spacy_deps_dict 'spacy_deps.db' \\\n--BERT \\\n--MEAN \\\n> 'betatest/out/embed_and_filter_job'$i'.stdout' 2>&1\nalternative: | tee betatest/out/stdout_job_array.txt) 3>&1 1>&2 2>&3 | tee betatest/out/stderr_job_array.txt\n'''\n\n'''global argparser'''\ntotal_nword_embeddings, nskipped, time_elapsed_embedding, time_elapsed_filtering = 0, 0, 0, 0\nbert_tokenizer, bert_model = None, None\nparser = argparse.ArgumentParser(description='Processing list of files...')\nparser.add_argument('-outDir', required=True, help='Directory where all outfiles will be written to. Example: out/')\nparser.add_argument('-dataDir', required=True, help='Directory where all data files are located. Example: data/')\nparser.add_argument('-job_id', required=True, help='job_id responsible for x-partition of the amalgams.')\n# parser.add_argument('-NUM_JOBS', type=int, required=True, help='example: 5 (should match npartitions==NUM_GPUS)')\nparser.add_argument('-batch_size', type=int, required=True, help='example: 400 (400 sentences in each batch)')\nparser.add_argument('-clip_len', type=int, required=True, help='number of sentences to batch')\n#parser.add_argument('-NUM_GPUS', type=int, required=True, help='number of GPUs')\n#parser.add_argument('-PROC_PER_GPU', type=int, required=True, help='number of processes per GPU')\nparser.add_argument('-gpu_id', type=int, required=True, help='list gpu_ids available separated by white space, i.e. - 3 4 5 16')\nparser.add_argument('-job_slices', type=str, required=True, help=\"the job slices file output from create_amalgams.py. Example: 'job_slices.pkl'\")\nparser.add_argument('-query_sentences', type=str, required=True, help=\"query sentences filename. Example: 'query_sentences.txt'\")\nparser.add_argument('-sentences_dict', required=True, help=\"sqlite db filename. Example: 'sentences_dict.db'\")\nparser.add_argument('-trace_dict', required=True, help=\"sqlite db filename. Example: 'trace_dict.db'\")\nparser.add_argument('-spacy_toks_dict', required=True, help=\"sqlite db filename. Example: 'spacy_toks_dict.db'\")\nparser.add_argument('-spacy_pos_dict', required=True, help=\"sqlite db filename. Example: 'spacy_pos_dict.db'\")\nparser.add_argument('-spacy_deps_dict', required=True, help=\"sqlite db filename. Example: 'spacy_deps_dict.db'\")\nparser.add_argument('--BERT', action='store_false', dest='SBERT_flag', required=False, help='Enable BERT as the model')\nparser.add_argument('--MEAN', action='store_false', dest='HEAD_flag', required=False, help='Calculates embeddings using the mean of the subword units')\nparser.add_argument('--SBERT', action='store_true', dest='SBERT_flag', required=False, help='Enable SBERT as the model')\nparser.add_argument('--HEAD', action='store_true', dest='HEAD_flag', required=False, help='Calculates embedding using only the headword embedding of the subword unit')\nargs = parser.parse_args()\n\n'''global variables'''\n## load job partition file\njob_slices = util.pickle_load(args.outDir+args.job_slices)\nprint('\\nlen(job_slices): {}'.format(len(job_slices)))\n\n\n #################################################\n ########## Embed and Filter ############\n #################################################\n\n \ndef embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks, spacy_pos, spacy_deps): ## , bert_tokenizer, bert_model, SBERT_flag, HEAD_flag\n ''' Takes in a batch of sentences and generates BERT embeddings for them. \n Args:\n Returns:\n Note:\n remove bert_tokenizer, bert_model, SBERT_flag, HEAD_flag from method signature when not running multiprocessing\n make sure SBERT_flag, and HEAD_flag are added back in\n '''\n global time_elapsed_embedding, time_elapsed_filtering\n global bert_tokenizer, bert_model, args\n start_embed_time = time.time()\n\n cur_words, cur_embeds = [], []\n content_tags = ['ADJ', 'ADV', 'NOUN', 'VERB']\n aux_tags = ['aux', 'auxpass', 'poss', 'possessive', 'cop', 'punct']\n\n ## tensor board, web ui (pytorch)\n ## perform lowercasing of all the sentences for embeddings\n sent_iter=iter(sentence_batch)\n lowercased_sentence_batch = [sent.lower() for sent in sent_iter]\n \n if args.SBERT_flag:\n\n return bert_model.encode([sentence])[0]\n\n else:\n\n ##pytorch logging library\n\n # try:\n\n ## batched encoding is a dict with keys = dict_keys(['input_ids', 'token_type_ids', 'attention_mask'])\n NER_encoded_batch = [bert_tokenizer.batch_encode_plus(tok) for tok in spacy_toks] ## bert_NER_toks \n # encoded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch) ## regular bert_toks \n \n ## We want BERT to process our examples all at once (as one lowercased_sentence_batch).\n ## For that reason, we need to pad all lists to the same size, so we can represent the input as one 2-d array.\n padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, pad_to_max_length=True)\n\n ## Grab indices and attn masks from the padded lowercased_sentence_batch.\n ## We need to tell BERT to ignore (mask) the padding we've added when it's processed as input.\n padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])\n\n NER_iter = iter(NER_encoded_batch)\n bert_NER_toks = [[bert_tokenizer.convert_ids_to_tokens(NER_unit)[1:-1] for NER_unit in cur_dict['input_ids']] for cur_dict in NER_iter]\n\n padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors\n attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors\n \n # print('padded_tinput_ids.size()[1] ', padded_tinput_ids.size())\n\n if padded_tinput_ids.size()[1] > args.clip_len:\n print('\\n\\nclipping sentences round {} '.format(round_id))\n # print('\\nclipped sentences: ', sentence_batch)\n # print('\\nbert_NER_toks: ', bert_NER_toks)\n # print(' after change round {} - type(padded_tinput_ids) and size: {} {} '.format(i, type(padded_tinput_ids), padded_tinput_ids.size()))\n # bert_NER_toks = [NER_unit[:args.clip_len] for NER_unit in bert_NER_toks]\n print('before padded_tinput_ids.size: ', padded_tinput_ids.size())\n padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, max_length=args.clip_len, pad_to_max_length=True)\n padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])\n print('padded_input_ids.dtype, attention_masks.dtype: ', padded_input_ids.dtype, attention_masks.dtype)\n padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors\n attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors\n print('after padded_tinput_ids.size: ', padded_tinput_ids.size())\n print('---end clipped sentences---')\n print('\\n\\n')\n\n # print('after having been clipped - padded_tinput_ids.size: ', padded_tinput_ids.size())\n try:\n with torch.no_grad():\n embeds = bert_model(padded_tinput_ids, attention_mask=attention_masks)\n except RuntimeError:\n print('\\n\\nLine 143 CUDA out of memory. ')\n print('padded_tinput_ids.size: ', padded_tinput_ids.size())\n return -1\n\n ## Saves relevant word embeddings from the padding (removing [CLS] and [SEP] tokens)\n ## for each sentence, where the last token resides\n mask_iter = iter(np.array(attention_masks.cpu()))\n relevant_ids = np.array([[i,len(arr)-1-list(arr[::-1]).index(1)] for i, arr in enumerate(mask_iter)])\n ## changes [SEP] tokens attention to 0\n attention_masks[relevant_ids[:,0], relevant_ids[:,1]]=0 ## temp[:,0] return 0th col for all rows, temp[:,1]] return 1st col for all rows. Change corresponding [row, col] in arrays to 0\n ## changes [CLS] tokens attention to 0\n attention_masks[:,0]=0\n\n ## attention masks to be applied to relevant embeddings within each torch tensor\n mask_iter, embeds_iter = iter(attention_masks), iter(embeds[0]) \n relevant_embeds = [MaskableList(sublist)[submask] for sublist, submask in zip(embeds_iter, mask_iter)]\n\n ## reflects the bert_NER full-token words (not bert's subword units)\n pos_iter, dep_iter = iter(spacy_pos), iter(spacy_deps)\n relevant_annotations_mask = [(np.in1d(cur_pos,content_tags)) & (~np.in1d(cur_dep,aux_tags)) for cur_pos, cur_dep in zip(pos_iter,dep_iter)]\n\n embed_time = time.time() - start_embed_time\n time_elapsed_embedding += embed_time\n\n start_filter_time = time.time()\n\n if args.HEAD_flag:\n ## use only embedding of the full-token word for each subword unit\n\n for i in range(len(bert_NER_toks)):\n end_index,j,k=0,0,0\n while(j<len(relevant_embeds[i])):\n end_index=end_index+len(bert_NER_toks[i][k])\n if relevant_annotations_mask[i][k]:\n cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))\n ## stack, mean, and numpy 'em\n temp = torch.mean(torch.stack(relevant_embeds[i][j:j+1]),0).cpu().numpy()\n cur_embeds.append(temp)\n j,k=end_index,k+1 \n\n else:\n # use mean of subwords units to calculate embeddings\n try: \n for i in range(len(bert_NER_toks)):\n end_index,j,k=0,0,0 \n while(j<len(relevant_embeds[i])):\n end_index=end_index+len(bert_NER_toks[i][k])\n # if (round_id > 799 and round_id < 803) or (round_id > 984 and round_id < 988):\n # print('i {}, k {}, len(bert_NER_toks[i]) {}, bert_NER_toks[i][k] {}'.format(i, k, len(bert_NER_toks[i]), bert_NER_toks[i][k]))\n # print('bert_NER_toks[i]: ', bert_NER_toks[i])\n if relevant_annotations_mask[i][k]:\n cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))\n ## stack, mean, and numpy 'em\n temp = torch.mean(torch.stack(relevant_embeds[i][j:end_index]),0).cpu().numpy() ##is this end_index or end_index+1\n cur_embeds.append(temp)\n j,k=end_index,k+1 \n except IndexError as e:\n print('\\n\\n---IndexError: list index out of range!---')\n print(e)\n print('round_id: ', round_id)\n print('i, k:', i, k)\n print('len(sentence_batch), len(trace_batch[0]): ', len(sentence_batch), len(trace_batch[0]))\n print('len(bert_NER_toks)', len(bert_NER_toks))\n print('len(bert_NER_toks[i]): ', len(bert_NER_toks[i]))\n # print('\\nbert_NER_toks[i]: ', bert_NER_toks[i])\n # print('\\nbert_NER_toks', bert_NER_toks)\n print('--end current error--\\n\\n')\n\n filter_time = (time.time() - start_filter_time)\n time_elapsed_filtering += filter_time\n\n # print('round %d Time elapsed filtering content words:\\t%s' % (round_id, time.strftime(\"%H:%M:%S\", time.gmtime(filter_time))))\n # except AttributeError:\n # print('\\n\\n---AttributeError----NoneType object has no attribute batch_encode_plus!')\n # print('spacy_toks: ')\n # print(spacy_toks)\n # print('trace_batch: ')\n # print(trace_batch)\n # print('sentence_batch: ')\n # print(sentence_batch)\n # print('print(list(sentence_batch)):')\n # print(list(sentence_batch))\n # print('---end of line---\\n\\n')\n\n if round_id % 100 == 0:\n print('finished batch {}. len(words): {} len(embeds): {}'.format(round_id, len(cur_words), len(cur_embeds)))\n\n return cur_words, cur_embeds\n\ndef embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps):\n '''Iterates through giga_dict and batches sentences to send of embed_all_sentences().\n Args:\n Returns:\n Note:\n '''\n\n global args, total_nword_embeddings\n\n words, word_embeds = [], []\n\n batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter = iter(batched_sentences), iter(batched_trace_info), iter(batched_spacy_toks), iter(batched_spacy_pos), iter(batched_spacy_deps)\n\n for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter)):\n \n if round_id % 100 == 0:\n print('\\nprocessing embedding {}... percentage processed {}'.format(round_id, (round_id/len(batched_sentences))*100))\n\n cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)\n\n words.extend(cur_words)\n word_embeds.extend(cur_embeds)\n\n total_nword_embeddings += len(cur_embeds)\n\n return words, word_embeds\n\n \ndef handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname):\n\n global args, job_slices, time_elapsed_embedding, time_elapsed_filtering\n\n embed_time, filtering_time = 0, 0\n batch_size, outDir = args.batch_size, args.outDir\n print('size of batch: ', batch_size)\n\n ## Reads in gigaword file\n # sentences, trace = read_file(gigaword_fname)\n print('len(sentences), len(trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps): ', len(cur_sentences), len(cur_trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps))\n\n ## use pytorch library DataLoader to batch sentences and nlp annotations\n batched_sentences = DataLoader(cur_sentences, batch_size=batch_size)\n batched_trace_info = DataLoader(cur_trace, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_toks = DataLoader(cur_spacy_toks, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_pos = DataLoader(cur_spacy_pos, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_deps = DataLoader(cur_spacy_deps, batch_size=batch_size, collate_fn=custom_collate) \n \n print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_sentences), len(batched_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))\n\n ## Embeds sentences from all batches\n words, word_embeds = embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps) \n\n print('these lengths should match: len(words): {}, len(word_embeds): {}, total_nword_embeds_check: {} '.format(len(words), len(word_embeds), total_nword_embeddings))\n\n word_dict_start = time.time()\n words_iter = iter(words)\n idx_iter = range(len(words))\n words_dict.update([(idx,word) for idx,word in zip(idx_iter,words_iter)])\n words_dict.commit()\n words_dict.close()\n word_dict_time = time.time() - word_dict_start\n\n ## memmap word_embeds\n memmap_start = time.time()\n fp = np.memmap(word_embeds_fname, dtype='float32', mode='w+', shape=(len(word_embeds),768))\n fp[:] = word_embeds[:]\n del fp\n memmap_time = time.time() - memmap_start\n\n words_dict_fname = str(words_dict)[str(words_dict).index(\"(\")+1:str(words_dict).index(\")\")]\n \n ## write shapes of each word_embedding job to a file to create word index later\n with open(args.outDir+'shapes.txt','a') as fout:\n fout.write(word_embeds_fname+' '+str(len(word_embeds))+'\\n')\n fout.write(words_dict_fname+' '+str(len(words))+'\\n')\n fout.close()\n\n # print stats for sanity check\n print('\\n---stats---:')\n print('total time embeddings docs: %s' % (time.strftime(\"%H:%M:%S\", time.gmtime(time_elapsed_embedding))))\n print('total time filtering content words: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(time_elapsed_filtering))))\n print('total time creating word_sqlite_dict: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(word_dict_time))))\n print('total elapsed copying word_embeds to memmap: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(memmap_time))))\n\ndef create_query_matrix():\n\n print('creating query matrix...')\n global args\n\n ## xq files (query data)\n xq_fname = args.outDir+'xq.dat' ## mmep query word embeddings\n # qsents_fname = args.outDir+'qsents.pkl' ## sentences_dict\n # qwords_fname = args.outDir+'qwords.pkl' ## qwords_dict\n qsentences_dict, qwords_dict = SqliteDict(args.outDir+'qsentences.db'), SqliteDict(args.outDir+'qwords.db')\n\n batch_size = args.batch_size\n print('batch_size for query_matrix: ', batch_size)\n xq, q_words, q_sentences, q_trace = [], [], [], [] \n\n ## use len(query sentences as the batch_size)\n ## read in query sentences\n with open(args.query_sentences, 'r') as fin:\n for sent_id, line in enumerate(fin.read().splitlines()):\n q_sentences.append(line.strip())\n q_trace.append((args.query_sentences, sent_id))\n\n print('len(q_sentences) and len(q_trace): ', len(q_sentences), len(q_trace))\n\n spacy_docs = list(nlp.pipe(q_sentences)) ##no nead to clip len for spacy toks for the query matrix\n spacy_toks = [[tok.text for tok in doc] for doc in spacy_docs]\n spacy_pos = [[tok.pos_ for tok in doc] for doc in spacy_docs]\n spacy_deps = [[tok.dep_ for tok in doc] for doc in spacy_docs]\n\n ## use pytorch library DataLoader to batch sentences and helper func batchify to batch spacy annotations\n batched_q_sentences = DataLoader(q_sentences, batch_size=batch_size)\n batched_q_trace_info = DataLoader(q_trace, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_toks = DataLoader(spacy_toks, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_pos = DataLoader(spacy_pos, batch_size=batch_size, collate_fn=custom_collate)\n batched_spacy_deps = DataLoader(spacy_deps, batch_size=batch_size, collate_fn=custom_collate)\n\n print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_q_sentences), len(batched_q_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))\n\n for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batched_q_sentences, batched_q_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps)):\n \n cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)\n\n\n q_words.extend(cur_words)\n xq.extend([normalize([embed])[0] for embed in cur_embeds])\n\n print('xq.shape: ', len(xq), len(xq[0]))\n\n qwords_dict_fname = str(qwords_dict)[str(qwords_dict).index(\"(\")+1:str(qwords_dict).index(\")\")]\n\n with open(args.outDir+'shapes.txt','a') as fout:\n fout.write(xq_fname+' '+str(len(xq))+'\\n')\n fout.write(qwords_dict_fname+' '+str(len(q_words))+'\\n')\n fout.close()\n\n ## memmap qword_embeds\n fp = np.memmap(xq_fname, dtype='float32', mode='w+', shape=(len(xq),768))\n fp[:] = xq[:]\n del fp\n\n qsentences_dict.update([(idx,sent) for idx,sent in enumerate(q_sentences)])\n qwords_dict.update([(idx,qword) for idx,qword in enumerate(q_words)])\n\n qsentences_dict.commit()\n qwords_dict.commit()\n\n qsentences_dict.close()\n qwords_dict.close()\n\n print('finished processing query matrix...')\n\n # return xq_fname, qsents_fname, qwords_fname\n\n\n\n #################################################\n ########### HELPER FUNCTIONS #########\n #################################################\n\ndef get_partition_slice(it, size, section):\n '''I could change this to only return the start and end index of each subarray instead of all the indices for that partition\n '''\n it = iter(it)\n return list(iter(lambda: tuple(islice(it, size)), ()))[section]\n\ndef get_slice(it, size):\n '''I could change this to only return the start and end index of each subarray instead of all the indices for that partition\n '''\n it = iter(it)\n return list(iter(lambda: tuple(islice(it, size)), ()))\n\ndef custom_collate(x):\n return x \n\ndef batchify(sentences, batch_size):\n\n batched_items, this_batch, = [], []\n for cur_item in islice(sentences,None,None):\n this_batch.append(cur_item)\n if len(this_batch) == batch_size:\n batched_items.append(this_batch)\n this_batch = []\n if len(this_batch) > 0:\n batched_items.append(this_batch)\n\n return batched_items\n\n\ndef fast_read_from_sqlite_dict(sqlite_dict, start_index, end_index):\n\n sqlite_dict_db = sqlite3.connect(sqlite_dict)\n sqlite_dict_db_cursor = sqlite_dict_db.cursor()\n sqlite_dict_db_cursor.execute(\"SELECT value FROM unnamed WHERE CAST(key as INTEGER) >= ? AND CAST(key as INTEGER) <= ?;\", (start_index, end_index))\n \n return [pkl.loads(x) for x in itertools.chain.from_iterable(sqlite_dict_db_cursor.fetchall())]\n\n\n# import itertools\n# trace_iter_1, trace_iter_2 = itertools.tee(trace_iter)\n# cur_trace_data = [(value, key) for key, value in zip(trace_iter_1, fast_read_from_sqlite_dict(trace_data, trace_iter_2))]\n\n## do sanity check in ipython on loading these dictionaries and reading in using fast read, find out how to do cur_trace\n## be careful about the indexing, b/c it looks like whatever is indexed in fast read includes the end index, whereas in trace_iter = list(range(start, end)) end does not. So you might need to do +1 or -1\n\n #################################################\n ########### Main #########\n ################################################# \n\n\ndef main(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps):\n\n global args \n\n print('did you make it here?')\n\n ## xb files \n words_dict = SqliteDict(args.outDir+'words_job'+args.job_id+'.db')\n word_embeds_fname = args.outDir+'word_embeds_job'+args.job_id+'.dat'\n\n print('\\nprocessing files for job {}...'.format(args.job_id))\n\n start = time.time()\n\n ## Generates words and respective word_embeds for each partition of the sentence index \n ## and outputs them to outfolders to combine later for creating annoy index\n print('handling batches for job %s...' % (args.job_id))\n handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname)\n\n handle_batches_time = time.time()-start\n\n print('time handling batches: %s' % (time.strftime(\"%H:%M:%S\", time.gmtime(handle_batches_time))))\n\n print('finished job {}'.format(args.job_id))\n\n\nif __name__ == '__main__':\n\n main_begin = time.time()\n\n print('---argparser---:')\n for arg in vars(args):\n print(arg, '\\t', getattr(args, arg), '\\t', type(arg))\n\n # run processing on GPU <gpu_id>\n cuda_idx = args.gpu_id\n\n with torch.cuda.device(cuda_idx):\n\n ## initialize bert_tokenizer and bert_model as global variable for all jobs\n if args.SBERT_flag:\n print('loading SBERT')\n ## Loads SBERT\n bert_tokenizer = None\n bert_model = SentenceTransformer('bert-base-nli-mean-tokens') ## model = SentenceTransformer('bert-base-nli-stsb-mean-tokens')\n bert_model = bert_model.cuda()\n else:\n print('loading regular BERT')\n ## Loads BERT-base uncased\n ## BERT-Base, Uncased: 12-layer, 768-hidden, 12-heads, 110M parameters\n bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n bert_model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=True)\n bert_model = bert_model.cuda()\n # bert_model = apex.amp.initialize(bert_model, opt_level=\"O2\").to(device)\n\n if int(args.job_id) == 1:\n print('\\nOnly processing query matrix during job {}: '.format(args.job_id))\n create_query_matrix()\n\n # print(\"l\\nen(sent_data): {}, len(trace_data): {}, len(spacy_toks): {} len(spacy_pos): {} len(spacy_deps): {}\".format(len(sent_data), len(trace_data), len(spacy_toks), len(spacy_pos), len(spacy_deps)))\n\n ## get correct partition for this job\n start_index, end_index = job_slices[int(args.job_id)-1] \n print('\\njob {} - start index: {} end index: {} len(cur_partition): {}'.format(args.job_id, start_index, end_index, end_index-start_index))\n\n start = time.time()\n cur_sent_data = fast_read_from_sqlite_dict(args.outDir+args.sentences_dict, start_index, end_index)\n trace_iter = iter(list(range(start_index, end_index+1)))\n cur_trace_data = [(value, key) for key, value in zip(trace_iter, fast_read_from_sqlite_dict(args.outDir+args.trace_dict, start_index, end_index))]\n cur_spacy_toks = fast_read_from_sqlite_dict(args.outDir+args.spacy_toks_dict, start_index, end_index)\n cur_spacy_pos = fast_read_from_sqlite_dict(args.outDir+args.spacy_pos_dict, start_index, end_index)\n cur_spacy_deps = fast_read_from_sqlite_dict(args.outDir+args.spacy_deps_dict, start_index, end_index)\n retrieve_time = time.time() - start\n\n print('total elapsed time retrieving the current partition: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(retrieve_time))))\n\n\n print(\"\\nlen(cur_sent_data): {}, len(cur_trace_data): {}\".format(len(cur_sent_data), len(cur_trace_data)))\n print(\"len(cur_spacy_toks): {} len(cur_spacy_pos): {} len(cur_spacy_deps): {}\".format(len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps)))\n\n main(cur_sent_data, cur_trace_data, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps)\n\n main_end = time.time() - main_begin\n print('total time inside main: %s'% (time.strftime(\"%H:%M:%S\", time.gmtime(main_end))))\n\n # ## start job on partition of the sentence index\n # split_size = int(len(sent_data)/args.NUM_JOBS)\n # cur_partition = get_slice(list(range(len(sent_data))), split_size, (int(args.job_id)-1))\n # print('job {} - start index {} end index {}'.format(args.job_id, cur_partition[0], cur_partition[-1]))\n # if len(cur_partition)>=2:\n # i, j = cur_partition[0], cur_partition[-1]\n # main(sent_data[i:j+1], trace_data[i:j+1], spacy_toks[i:j+1], spacy_pos[i:j+1], spacy_deps[i:j+1])\n # else:\n # i = cur_partition[0]\n # main(sent_data[i:], trace_data[i:], spacy_toks[i:], spacy_pos[i:], spacy_deps[i:])\n\n\n'''\n## To run this file:\n## Create virtual environment on nlpgrid\npython3 -m venv <path_for_virtual_environment>\n\n## Reno example:\npython3 -m venv ~/venv3/ ##becca's venv: giga\n\n## Activate virtual environment\nsource <path_for_virtual_environment>/bin/activate\n\n## Reno example:\nsource ~/venv3/bin/activate \ne.g. ~/giga/bin/activate ##becca's venv: giga\n\n## Install packages necessary\npip install nltk\npip install numpy\npip install tqdm\npip install torch==1.4.0\npip install transformers\npip install annoy\npip install faiss-gpu (see section on installing faiss for more info)\npip install sklearn\npip install -U sentence-transformers\npip install ndjson\npip install spacy\npython3 -m spacy download en_core_web_lg\n\n## confirm torch version\npython3\n>>>import torch\n>>>print(torch.__version__) //should be 1.4.0\n\n## installing faiss\n to check which cuda version you have in nlpgrid\n cat /usr/local/cuda/version.txt\n for CPU version\n conda install faiss-cpu -c pytorch\n for GPU version\n conda install faiss-cpu -c pytorch\n conda install faiss-gpu cudatoolkit=8.0 -c pytorch # For CUDA8\n conda install faiss-gpu cudatoolkit=9.0 -c pytorch # For CUDA9\n conda install faiss-gpu cudatoolkit=10.0 -c pytorch # For CUDA10\n for nlpgrid gpus\n pip install faiss-gpu\n## confirm faiss\n python3\n >>>import faiss\n >>>import numpy as np\n## confirm annoy\n python3\n >>>import annoy\n >>>from annoy import AnnoyIndex\n \n'''\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.stack",
"torch.no_grad",
"torch.tensor",
"numpy.in1d",
"sklearn.preprocessing.normalize",
"numpy.array",
"torch.cuda.device"
]
] |
dantaslab/resfams_update | [
"982091818a299d316811fe98c7656762be7284fb"
] | [
"Analysis/Precision-Recall_Analysis/scripts/add_tp_seqs.py"
] | [
"import sys\nimport os\nimport pandas as pd\nimport csv\nimport argparse\nfrom collections import OrderedDict\nfrom io import StringIO\n\n\ndef main(argv):\n args = parse_arguments(argv)\n out = args.out_path\n file1 = args.file1\n file2 = args.file2\n file3 = args.file3\n\n\n ddf1 = addSeqs(file1,file2)\n ddf2 = removeSeqs(ddf1,file3)\n\n\n with open(out, 'w+') as output:\n for row in ddf2:\n print(\"\\t\".join(row))\n output.write(\"\\t\".join(row)+\"\\n\")\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser(\n prog = 'mapping.py',\n description = 'A program to map two files (csv of txt) to each other')\n parser.add_argument(\n '-f1', '--file1',\n help = 'Enter first file.',\n required = True\n )\n parser.add_argument(\n '-f2', '--file2',\n help = 'Enter fplist file.',\n required = True\n )\n parser.add_argument(\n '-f3', '--file3',\n help = 'Enter nhlist file.',\n required = True\n )\n parser.add_argument(\n '-o', '-outpath',\n dest = 'out_path',\n help = 'Enter path to dropped seqs file'\n )\n\n return parser.parse_args()\n\n\ndef addSeqs(file1, file2):\n df1 = pd.read_table(file1, sep=\"\\t\", names=['seq_name','dbID'])\n\n df2 = pd.read_table(file2, sep=\"\\t\", skiprows=2, usecols=[0,2], names=['seq_name','dbID'])\n\n\n ddf = pd.concat([df1,df2])\n ddf = ddf.groupby('seq_name')['dbID'].apply(list).map(set).str.join('|')\n ddf = ddf.reset_index()\n print(ddf.head())\n\n return ddf\n\n\ndef removeSeqs(ddf1, file3):\n\n data = ddf1.values.tolist()\n\n nhlist = []\n with open(file3) as f3:\n reader = csv.reader(f3, delimiter='\\t')\n next(reader, None)\n next(reader, None)\n for row in reader:\n # print(row)\n nhlist.append(row)\n\n\n ddf2 = []\n for row in data:\n if row[1] != None:\n rfids = str(row[1]).split(\"|\")\n else:\n rfids = []\n\n\n for seq in nhlist:\n id = seq[2]\n\n if row[0] == seq[0]:\n for rfid in rfids:\n if id == rfid:\n rfids.remove(id)\n\n array = [row[0],\"|\".join(rfids)]\n ddf2.append(array)\n\n return ddf2\n\n\n\n\n\nif __name__==\"__main__\":\n main(sys.argv[1:])\n"
] | [
[
"pandas.read_table",
"pandas.concat"
]
] |
mendezr/MetPy | [
"0c75c14ac4af360b06ed7c4735b17709caef2449"
] | [
"metpy/io/tests/test_io_tools.py"
] | [
"# Copyright (c) 2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `io.tools` module.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom metpy.io._tools import hexdump, UnitLinker\nfrom metpy.io.cdm import Dataset\nfrom metpy.testing import assert_array_equal, ignore_deprecation\nfrom metpy.units import units\n\n\[email protected]()\n@ignore_deprecation\ndef test_var():\n \"\"\"Fixture to create a dataset and variable for tests.\"\"\"\n ds = Dataset()\n ds.createDimension('x', 5)\n var = ds.createVariable('data', 'f4', ('x',), 5)\n var[:] = np.arange(5)\n return var\n\n\ndef test_unit_linker(test_var):\n \"\"\"Test that UnitLinker successfully adds units.\"\"\"\n test_var.units = 'meters'\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5) * units.m)\n\n\ndef test_unit_linker_get_units(test_var):\n \"\"\"Test that we can get the units from UnitLinker.\"\"\"\n test_var.units = 'knots'\n new_var = UnitLinker(test_var)\n assert new_var.units == units('knots')\n\n\ndef test_unit_linker_missing(test_var):\n \"\"\"Test that UnitLinker works with missing units.\"\"\"\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5))\n\n\ndef test_unit_linker_bad(test_var):\n \"\"\"Test that UnitLinker ignores bad unit strings.\"\"\"\n test_var.units = 'badunit'\n new_var = UnitLinker(test_var)\n assert_array_equal(new_var[:], np.arange(5))\n\n\ndef test_unit_override(test_var):\n \"\"\"Test that we can override a variable's bad unit string.\"\"\"\n test_var.units = 'C'\n new_var = UnitLinker(test_var)\n new_var.units = 'degC'\n assert_array_equal(new_var[:], np.arange(5) * units.degC)\n\n\ndef test_unit_override_obj(test_var):\n \"\"\"Test that we can override with an object.\"\"\"\n test_var.units = 'C'\n new_var = UnitLinker(test_var)\n new_var.units = units.degC\n assert_array_equal(new_var[:], np.arange(5) * units.degC)\n\n\ndef test_attribute_forwarding(test_var):\n \"\"\"Test that we are properly able to access attributes from the variable.\"\"\"\n test_var.att = 'abc'\n new_var = UnitLinker(test_var)\n assert new_var.att == test_var.att\n\n\ndef test_hexdump():\n \"\"\"Test hexdump tool.\"\"\"\n data = bytearray([77, 101, 116, 80, 121])\n assert hexdump(data, 4, width=8) == '4D657450 79------ 0 0 MetPy'\n"
] | [
[
"numpy.arange"
]
] |
mcx/open_spiel | [
"062cbfc07621343e7d77209cb421ba690328142b"
] | [
"open_spiel/python/algorithms/double_oracle_test.py"
] | [
"# Copyright 2019 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for open_spiel.python.algorithms.double_oracle.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as np\n\nfrom open_spiel.python.algorithms import double_oracle\nimport pyspiel\n\n\nclass DoubleOracleTest(absltest.TestCase):\n\n def test_rock_paper_scissors(self):\n game = pyspiel.load_matrix_game(\"matrix_rps\")\n solver = double_oracle.DoubleOracleSolver(game)\n solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])\n np.testing.assert_allclose(solution[0], np.ones(3)/3.)\n np.testing.assert_allclose(solution[1], np.ones(3)/3.)\n self.assertEqual(iteration, 3)\n self.assertAlmostEqual(value, 0.0)\n\n def test_single_step(self):\n game = pyspiel.load_matrix_game(\"matrix_rps\")\n solver = double_oracle.DoubleOracleSolver(game)\n solver.subgame_strategies = [[0], [0]]\n best_response, best_response_utility = solver.step()\n self.assertListEqual(best_response, [1, 1])\n self.assertListEqual(best_response_utility, [1.0, 1.0])\n\n def test_kuhn_poker(self):\n game = pyspiel.extensive_to_matrix_game(pyspiel.load_game(\"kuhn_poker\"))\n solver = double_oracle.DoubleOracleSolver(game)\n solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])\n\n # check if solution is Nash\n exp_utilty = solution[0] @ solver.payoffs @ solution[1]\n self.assertAlmostEqual(max(solver.payoffs[0] @ solution[1]), exp_utilty[0])\n self.assertAlmostEqual(max(solution[0] @ solver.payoffs[1]), exp_utilty[1])\n\n self.assertEqual(iteration, 8)\n self.assertAlmostEqual(value, 0.0)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.ones"
]
] |
nan-wang/PaddleOCR | [
"09604c38e42591c240771edbbff43a6dd7ebf592",
"09604c38e42591c240771edbbff43a6dd7ebf592"
] | [
"tools/infer_table.py",
"ppocr/data/imaug/iaa_augment.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport os\nimport sys\nimport json\n\n__dir__ = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(__dir__)\nsys.path.append(os.path.abspath(os.path.join(__dir__, '..')))\n\nos.environ[\"FLAGS_allocator_strategy\"] = 'auto_growth'\n\nimport paddle\nfrom paddle.jit import to_static\n\nfrom ppocr.data import create_operators, transform\nfrom ppocr.modeling.architectures import build_model\nfrom ppocr.postprocess import build_post_process\nfrom ppocr.utils.save_load import init_model\nfrom ppocr.utils.utility import get_image_file_list\nimport tools.program as program\nimport cv2\n\ndef main(config, device, logger, vdl_writer):\n global_config = config['Global']\n\n # build post process\n post_process_class = build_post_process(config['PostProcess'],\n global_config)\n\n # build model\n if hasattr(post_process_class, 'character'):\n config['Architecture'][\"Head\"]['out_channels'] = len(\n getattr(post_process_class, 'character'))\n\n model = build_model(config['Architecture'])\n\n init_model(config, model, logger)\n\n # create data ops\n transforms = []\n use_padding = False\n for op in config['Eval']['dataset']['transforms']:\n op_name = list(op)[0]\n if 'Label' in op_name:\n continue\n if op_name == 'KeepKeys':\n op[op_name]['keep_keys'] = ['image']\n if op_name == \"ResizeTableImage\":\n use_padding = True\n padding_max_len = op['ResizeTableImage']['max_len']\n transforms.append(op)\n\n global_config['infer_mode'] = True\n ops = create_operators(transforms, global_config)\n\n model.eval()\n for file in get_image_file_list(config['Global']['infer_img']):\n logger.info(\"infer_img: {}\".format(file))\n with open(file, 'rb') as f:\n img = f.read()\n data = {'image': img}\n batch = transform(data, ops)\n images = np.expand_dims(batch[0], axis=0)\n images = paddle.to_tensor(images)\n preds = model(images)\n post_result = post_process_class(preds)\n res_html_code = post_result['res_html_code']\n res_loc = post_result['res_loc']\n img = cv2.imread(file)\n imgh, imgw = img.shape[0:2]\n res_loc_final = []\n for rno in range(len(res_loc[0])):\n x0, y0, x1, y1 = res_loc[0][rno]\n left = max(int(imgw * x0), 0)\n top = max(int(imgh * y0), 0)\n right = min(int(imgw * x1), imgw - 1)\n bottom = min(int(imgh * y1), imgh - 1)\n cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)\n res_loc_final.append([left, top, right, bottom])\n res_loc_str = json.dumps(res_loc_final)\n logger.info(\"result: {}, {}\".format(res_html_code, res_loc_final))\n logger.info(\"success!\")\n\n\nif __name__ == '__main__':\n config, device, logger, vdl_writer = program.preprocess()\n main(config, device, logger, vdl_writer)\n\n",
"# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is refer from:\nhttps://github.com/WenmuZhou/DBNet.pytorch/blob/master/data_loader/modules/iaa_augment.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport imgaug\nimport imgaug.augmenters as iaa\n\n\nclass AugmenterBuilder(object):\n def __init__(self):\n pass\n\n def build(self, args, root=True):\n if args is None or len(args) == 0:\n return None\n elif isinstance(args, list):\n if root:\n sequence = [self.build(value, root=False) for value in args]\n return iaa.Sequential(sequence)\n else:\n return getattr(iaa, args[0])(\n *[self.to_tuple_if_list(a) for a in args[1:]])\n elif isinstance(args, dict):\n cls = getattr(iaa, args['type'])\n return cls(**{\n k: self.to_tuple_if_list(v)\n for k, v in args['args'].items()\n })\n else:\n raise RuntimeError('unknown augmenter arg: ' + str(args))\n\n def to_tuple_if_list(self, obj):\n if isinstance(obj, list):\n return tuple(obj)\n return obj\n\n\nclass IaaAugment():\n def __init__(self, augmenter_args=None, **kwargs):\n if augmenter_args is None:\n augmenter_args = [{\n 'type': 'Fliplr',\n 'args': {\n 'p': 0.5\n }\n }, {\n 'type': 'Affine',\n 'args': {\n 'rotate': [-10, 10]\n }\n }, {\n 'type': 'Resize',\n 'args': {\n 'size': [0.5, 3]\n }\n }]\n self.augmenter = AugmenterBuilder().build(augmenter_args)\n\n def __call__(self, data):\n image = data['image']\n shape = image.shape\n\n if self.augmenter:\n aug = self.augmenter.to_deterministic()\n data['image'] = aug.augment_image(image)\n data = self.may_augment_annotation(aug, data, shape)\n return data\n\n def may_augment_annotation(self, aug, data, shape):\n if aug is None:\n return data\n\n line_polys = []\n for poly in data['polys']:\n new_poly = self.may_augment_poly(aug, shape, poly)\n line_polys.append(new_poly)\n data['polys'] = np.array(line_polys)\n return data\n\n def may_augment_poly(self, aug, img_shape, poly):\n keypoints = [imgaug.Keypoint(p[0], p[1]) for p in poly]\n keypoints = aug.augment_keypoints(\n [imgaug.KeypointsOnImage(\n keypoints, shape=img_shape)])[0].keypoints\n poly = [(p.x, p.y) for p in keypoints]\n return poly\n"
] | [
[
"numpy.expand_dims"
],
[
"numpy.array"
]
] |
allisonchen23/Pensieve-PPO2 | [
"c4add70d3e1e28a6d2e90f2571ca53d1d35647e1"
] | [
"src/a2c.py"
] | [
"import tflearn\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport time\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\nFEATURE_NUM = 128\nEPS = 1e-4\nGAMMA = 0.99\n\n\nclass Network():\n def CreateNetwork(self, inputs):\n with tf.variable_scope('actor'):\n split_0 = tflearn.fully_connected(\n inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')\n split_1 = tflearn.fully_connected(\n inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')\n split_2 = tflearn.conv_1d(\n inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')\n split_3 = tflearn.conv_1d(\n inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')\n split_4 = tflearn.conv_1d(\n inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')\n split_5 = tflearn.fully_connected(\n inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')\n\n split_2_flat = tflearn.flatten(split_2)\n split_3_flat = tflearn.flatten(split_3)\n split_4_flat = tflearn.flatten(split_4)\n\n merge_net = tflearn.merge(\n [split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')\n\n net = tflearn.fully_connected(\n merge_net, FEATURE_NUM, activation='relu')\n\n pi = tflearn.fully_connected(net, self.a_dim, activation='softmax')\n value = tflearn.fully_connected(net, 1, activation='linear')\n return pi, value\n\n def get_network_params(self):\n return self.sess.run(self.network_params)\n\n def set_network_params(self, input_network_params):\n self.sess.run(self.set_network_params_op, feed_dict={\n i: d for i, d in zip(self.input_network_params, input_network_params)\n })\n\n def __init__(self, sess, state_dim, action_dim, learning_rate):\n self.quality = 0\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.lr_rate = learning_rate\n self.sess = sess\n self.outputs = tf.placeholder(tf.float32, [None, 1])\n self.inputs = tf.placeholder(\n tf.float32, [None, self.s_dim[0], self.s_dim[1]])\n self.acts = tf.placeholder(tf.float32, [None, self.a_dim])\n self.entropy_weight = tf.placeholder(tf.float32)\n self.pi, self.val = self.CreateNetwork(inputs=self.inputs)\n self.real_out = tf.clip_by_value(self.pi, EPS, 1. - EPS)\n self.log_prob = tf.log(tf.reduce_sum(tf.multiply(\n self.real_out, self.acts), reduction_indices=1, keepdims=True))\n self.entropy = tf.multiply(self.real_out, tf.log(self.real_out))\n\n # Get all network parameters\n self.network_params = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')\n\n # Set all network parameters\n self.input_network_params = []\n for param in self.network_params:\n self.input_network_params.append(\n tf.placeholder(tf.float32, shape=param.get_shape()))\n self.set_network_params_op = []\n for idx, param in enumerate(self.input_network_params):\n self.set_network_params_op.append(\n self.network_params[idx].assign(param))\n\n self.loss = tflearn.mean_square(self.val, self.outputs) \\\n - tf.reduce_mean(self.log_prob * tf.stop_gradient(self.outputs - self.val)) \\\n + self.entropy_weight * tf.reduce_mean(self.entropy)\n\n self.optimize = tf.train.AdamOptimizer(\n self.lr_rate).minimize(self.loss)\n\n def predict(self, input):\n action = self.sess.run(self.real_out, feed_dict={\n self.inputs: input\n })\n return action[0]\n\n def get_entropy(self, step):\n if step < 20000:\n return 5.\n elif step < 50000:\n return 3.\n elif step < 70000:\n return 1.\n elif step < 90000:\n return 0.5\n elif step < 120000:\n return 0.3\n else:\n return 0.1\n\n def train(self, s_batch, a_batch, v_batch, epoch):\n # print s_batch.shape, a_batch.shape, v_batch.shape\n # s_batch, a_batch, v_batch = tflearn.data_utils.shuffle(\n # s_batch, a_batch, v_batch)\n self.sess.run(self.optimize, feed_dict={\n self.inputs: s_batch,\n self.acts: a_batch,\n self.outputs: v_batch,\n self.entropy_weight: self.get_entropy(epoch)\n })\n\n def compute_v(self, s_batch, a_batch, r_batch, terminal):\n ba_size = len(s_batch)\n\n R_batch = np.zeros([len(r_batch), 1])\n\n if terminal:\n R_batch[-1, 0] = 0 # terminal state\n else:\n v_batch = self.sess.run(self.val, feed_dict={\n self.inputs: s_batch\n })\n R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state\n for t in reversed(range(ba_size - 1)):\n R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]\n\n return list(R_batch)\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.get_collection",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.stop_gradient",
"tensorflow.clip_by_value",
"tensorflow.log"
]
] |
coryschwartz/nebula-crawler | [
"34ebe1109a5117949b4f285891a065adcc0bae08"
] | [
"analysis/mixed/plot_churn.py"
] | [
"import psycopg2\nimport toml\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom lib import node_time, node_uptime\n\nconfig = toml.load(\"./db.toml\")['psql']\nconn = psycopg2.connect(\n host=config['host'],\n port=config['port'],\n database=config['database'],\n user=config['user'],\n password=config['password'],\n)\n\nstart, end = node_time.get_time_range(conn)\nuptimes = [i for i in node_uptime.get_node_uptime(conn, start, end) if i]\n\n# Plotting cdf of uptimes, code adapted from ../churn/cdf.py\nhist_values, bin_edges = np.histogram(\n uptimes, bins=len(uptimes), density=True\n)\n\n# Since we provided an integer to the bins parameter above. The edges are equal width.\n# This means the width between the first two elements is equal for all edges.\nedge_width = bin_edges[1] - bin_edges[0]\n\n# Integerate over histogram\ncumsum = np.cumsum(hist_values) * edge_width\n\n# build plot\nplt.plot(bin_edges[1:], cumsum, label=\"All sessions\")\n\n# Presentation logic\nplt.rc('font', size=8)\nplt.gca().xaxis.set_major_formatter(lambda x, pos=None: x / 3600)\nplt.xlabel(\"Hours\")\nplt.ylabel(\"Percentage of online peers\")\nplt.tight_layout()\nplt.xticks(np.arange(0, max(bin_edges[1:]), 3 * 60 * 60))\nplt.xlim(-60 * 60, 24 * 60 * 60)\nplt.grid(True)\nplt.legend()\nplt.title(\"Session cdf from %s to %s\" % (start.replace(microsecond=0), end.replace(microsecond=0)))\n\n# Finalize\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.cumsum",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
bendevera/DS-Unit-3-Sprint-2-SQL-and-Databases | [
"24290da3bdbfaaadfa87c23f6f4196e2220360ab"
] | [
"module2-sql-for-analysis/insert_titantic.py"
] | [
"import pandas as pd \nimport psycopg2\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# read in our data\ndf = pd.read_csv('./titanic.csv')\nprint(f\"DF shape: {df.shape}\")\n\n# create connection to db we want to move the data to\nconn = psycopg2.connect(\n host=os.getenv('DB_HOST'), \n dbname=os.getenv('DB_USER'), \n user=os.getenv('DB_USER'), \n password=os.getenv('DB_PASSWORD')\n)\ncur = conn.cursor()\n\n# ensure the table is fresh by dropping if exists and creating from scratch\nquery = \"select exists(select * from information_schema.tables where table_name='titantic')\"\ncur.execute(query)\n\nif cur.fetchone()[0]:\n print(\"dropping table...\")\n query = \"DROP TABLE titantic;\"\n cur.execute(query)\n\nprint(\"creating table...\")\nquery = \"\"\"\nCREATE TABLE titantic (\n id SERIAL PRIMARY KEY,\n survived BOOLEAN,\n class TEXT,\n name TEXT,\n sex TEXT,\n age INTEGER,\n siblings BOOLEAN,\n parents BOOLEAN,\n fare REAL\n)\n\"\"\"\ncur.execute(query)\n\ndef get_name(name):\n return name.replace(\"'\", \"\")\n\ndef get_row(row):\n return (bool(row[0]), row[1], get_name(row[2]), row[3], row[4], bool(row[5]), bool(row[6]), row[7])\n\n# for each row in the csv, add a row to the postgres db\nprint(\"adding rows...\")\nfor row in df.values:\n query = \"INSERT INTO titantic (survived, class, name, sex, age, siblings, parents, fare) VALUES \" + str(get_row(row)) + \";\"\n cur.execute(query)\n\nquery = \"SELECT * FROM titantic\"\ncur.execute(query)\nrows = cur.fetchall()\nprint(f\"Num rows: {len(rows)}\")\nconn.commit()\ncur.close()"
] | [
[
"pandas.read_csv"
]
] |
cclauss/h2o4gpu | [
"9885416deb3285f5d0f33023d6c07373ac4fc0b7"
] | [
"src/interface_py/h2o4gpu/util/import_data.py"
] | [
"#- * - encoding : utf - 8 - * -\n\"\"\"\n:copyright: 2017-2018 H2O.ai, Inc.\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\n\n\ndef import_data(data_path,\n use_pandas=False,\n intercept=True,\n valid_fraction=0.2,\n classification=True):\n \"\"\"Import Data for H2O GPU Edition\n\n This function will read in data and prepare it for H2O4GPU's GLM solver.\n\n Note, the data is assumed to be all numeric,i.e.,\n categoricals are one hot encoded, etc.\n\n :param data_path : str\n A path to a dataset (The dataset needs to be all numeric)\n :param use_pandas : bool\n Indicate if Pandas should be used to parse\n :param intercept : bool\n Indicate if intercept term is needed\n :param valid_fraction : float\n Percentage of dataset reserved for a validation set\n :param classification : bool\n Classification problem?\n :returns\n If valid_fraction > 0 it will return the following:\n train_x: numpy array of train input variables\n train_y: numpy array of y variable\n valid_x: numpy array of valid input variables\n valid_y: numpy array of valid y variable\n family : string that would either be \"logistic\" if classification is set\n to True, otherwise \"elasticnet\"\n If valid_fraction == 0 it will return the following:\n train_x: numpy array of train input variables\n train_y: numpy array of y variable\n family : string that would either be \"logistic\" if classification is set\n to True, otherwise \"elasticnet\"\n \"\"\"\n #Can import data using pandas or feather.\n use_pandas = use_pandas\n\n data_file = data_path # If importing using pandas\n\n if use_pandas:\n print(\"Reading Data with Pandas\")\n import pandas as pd\n data = pd.read_csv(data_file)\n else:\n print(\"Reading Data with Feather\")\n import feather\n data = feather.read_dataframe(data_file)\n print(data.shape)\n import numpy as np\n data_x = np.array(\n data.iloc[:, :data.shape[1] - 1],\n dtype='float32',\n order='C',\n copy=False)\n data_y = np.array(\n data.iloc[:, data.shape[1] - 1], dtype='float32', order='C', copy=False)\n\n #Setup train / validation set split\n #(assuming form of mxn where m = row count and n = col count)\n morig = data_x.shape[0]\n norig = data_x.shape[1]\n print(\"Original m=%d n=%d\" % (morig, norig))\n import sys\n sys.stdout.flush()\n\n #Do train / valid split\n if valid_fraction > 0:\n valid_fraction = valid_fraction\n HO = int(valid_fraction * morig)\n H = morig - HO\n print(\"Size of Train rows=%d & valid rows=%d\" % (H, HO))\n sys.stdout.flush()\n train_x = data_x[0:H, :]\n train_y = data_y[0:H]\n valid_x = data_x[H:morig, :]\n valid_y = data_y[H:morig]\n print(\"Size of Train cols=%d valid cols=%d\" % (train_x.shape[1],\n valid_x.shape[1]))\n else:\n train_x = data_x\n train_y = data_y\n\n\n#Using intercept\n if intercept:\n train_x = np.hstack(\n [train_x,\n np.ones((train_x.shape[0], 1), dtype=train_x.dtype)])\n if valid_fraction > 0:\n valid_x = np.hstack(\n [valid_x,\n np.ones((valid_x.shape[0], 1), dtype=valid_x.dtype)])\n print(\"Size of Train cols=%d & valid cols=%d after adding \"\n \"intercept column\" % (train_x.shape[1], valid_x.shape[1]))\n else:\n print(\"Size of Train cols=%d after adding intercept column\" %\n (train_x.shape[1]))\n\n if classification:\n family = \"logistic\"\n else:\n family = \"elasticnet\"\n if valid_fraction > 0:\n return train_x, train_y, valid_x, valid_y, family\n\n return train_x, train_y, family\n"
] | [
[
"numpy.array",
"numpy.ones",
"pandas.read_csv"
]
] |
geez0219/ARC | [
"f2176f0d442d4a2d6028f0770b1efc1a9ae982b8",
"f2176f0d442d4a2d6028f0770b1efc1a9ae982b8"
] | [
"source/meta_compare/language_modeling/sls_language_modeling.py",
"source/normal_compare/image_classification/base_lr.py"
] | [
"import os\n\nimport fastestimator as fe\nimport numpy as np\nimport sls\nimport torch\nimport torch.nn as nn\nimport wget\nfrom fastestimator.op.numpyop import NumpyOp\nfrom fastestimator.op.tensorop import TensorOp\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\n\n\ndef get_ptb(folder_path, seq_length=64):\n file_names = [\"ptb.train.txt\", \"ptb.valid.txt\", \"ptb.test.txt\"]\n urls = [\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'\n ]\n # Read text\n texts = []\n for file_name, url in zip(file_names, urls):\n text = []\n file_path = os.path.join(folder_path, file_name)\n if not os.path.exists(file_path):\n wget.download(url, out=folder_path)\n with open(file_path, 'r') as f:\n for line in f:\n text.extend(line.split() + ['<eos>'])\n texts.append(text)\n # Build dictionary from training data\n vocab = sorted(set(texts[0]))\n word2idx = {u: i for i, u in enumerate(vocab)}\n #convert word to index and split the sequences and discard the last incomplete sequence\n data = [[word2idx[word] for word in text[:-(len(text) % seq_length)]] for text in texts]\n train_data, eval_data, test_data = [np.array(d).reshape(-1, seq_length) for d in data]\n return train_data, eval_data, test_data\n\n\nclass CreateInputAndTarget(NumpyOp):\n def forward(self, data, state):\n return data[:-1], data[1:]\n\n\nclass DimesionAdjust(TensorOp):\n def forward(self, data, state):\n x, y = data\n return x.T, y.T.reshape(-1)\n\n\nclass Perplexity(fe.trace.Trace):\n def on_epoch_end(self, data):\n ce = data[\"ce\"]\n data.write_with_log(self.outputs[0], np.exp(ce))\n\n\nclass BuildModel(nn.Module):\n def __init__(self, vocab_size=10000, embedding_dim=300, rnn_units=600):\n super().__init__()\n self.embed_layer = nn.Embedding(vocab_size, embedding_dim)\n self.lstm_layer = nn.LSTM(embedding_dim, rnn_units)\n self.dropout = nn.Dropout(0.5)\n self.fc = nn.Linear(rnn_units, vocab_size)\n\n nn.init.xavier_uniform_(self.lstm_layer.weight_ih_l0.data)\n nn.init.xavier_uniform_(self.lstm_layer.weight_hh_l0.data)\n\n def forward(self, x):\n x = self.embed_layer(x)\n x, _ = self.lstm_layer(x)\n x = x.view(x.size(0) * x.size(1), x.size(2))\n x = self.dropout(x)\n x = self.fc(x)\n return x\n\n\nclass DummpyUpdate(UpdateOp):\n def forward(self, data, state):\n pass\n\n\nclass SGDLinesSearch(fe.op.tensorop.TensorOp):\n def __init__(self, model, opt, loss_op, inputs, outputs, mode=\"train\"):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.model = model\n self.opt = opt\n self.loss_op = loss_op\n\n def forward(self, data, state):\n x, y = data\n closure = lambda: self.loss_op.forward((self.model(x), y), state=state)\n self.opt.zero_grad()\n loss = self.opt.step(closure=closure)\n return loss\n\n\nclass PrintLR(fe.trace.Trace):\n def __init__(self, opt):\n super().__init__(mode=\"train\")\n self.opt = opt\n\n def on_batch_end(self, data):\n if self.system.global_step % self.system.log_steps == 0 or self.system.global_step == 1:\n data.write_with_log(\"model_lr\", float(self.opt.state['step_size']))\n\n\ndef get_estimator(data_dir, epochs=98, batch_size=128, seq_length=20, vocab_size=10000):\n train_data, _, test_data = get_ptb(folder_path=data_dir, seq_length=seq_length + 1)\n pipeline = fe.Pipeline(train_data=fe.dataset.NumpyDataset(data={\"x\": train_data}),\n eval_data=fe.dataset.NumpyDataset(data={\"x\": test_data}),\n batch_size=batch_size,\n ops=CreateInputAndTarget(inputs=\"x\", outputs=(\"x\", \"y\")),\n drop_last=True)\n # step 2\n model = fe.build(model_fn=lambda: BuildModel(vocab_size, embedding_dim=300, rnn_units=600), optimizer_fn=\"sgd\")\n opt = sls.Sls(model.parameters())\n network = fe.Network(ops=[\n DimesionAdjust(inputs=(\"x\", \"y\"), outputs=(\"x\", \"y\")),\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\", mode=None),\n SGDLinesSearch(model=model,\n opt=opt,\n loss_op=CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", form=\"sparse\", from_logits=True),\n inputs=(\"x\", \"y\"),\n outputs=\"ce\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", form=\"sparse\", from_logits=True, mode=\"eval\"),\n DummpyUpdate(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [Perplexity(inputs=\"ce\", outputs=\"perplexity\", mode=\"eval\"), PrintLR(opt=opt)]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n",
"import fastestimator as fe\nimport tensorflow as tf\nfrom fastestimator.op.numpyop.meta import Sometimes\nfrom fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\nfrom fastestimator.op.numpyop.univariate import CoarseDropout, Normalize\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.trace.metric import Accuracy\nfrom tensorflow.python.keras import layers\n\n\ndef residual(x, num_channel):\n x = layers.Conv2D(num_channel, 3, padding='same')(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Conv2D(num_channel, 3, padding='same')(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef my_model():\n # prep layers\n inp = layers.Input(shape=(32, 32, 3))\n x = layers.Conv2D(64, 3, padding='same')(inp)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n # layer1\n x = layers.Conv2D(128, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Add()([x, residual(x, 128)])\n # layer2\n x = layers.Conv2D(256, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n # layer3\n x = layers.Conv2D(512, 3, padding='same')(x)\n x = layers.MaxPool2D()(x)\n x = layers.BatchNormalization(momentum=0.8)(x)\n x = layers.LeakyReLU(alpha=0.1)(x)\n x = layers.Add()([x, residual(x, 512)])\n # layers4\n x = layers.GlobalMaxPool2D()(x)\n x = layers.Flatten()(x)\n x = layers.Dense(10)(x)\n x = layers.Activation('softmax', dtype='float32')(x)\n model = tf.keras.Model(inputs=inp, outputs=x)\n return model\n\n\ndef get_estimator(init_lr, epochs=30, batch_size=128):\n # step 1\n train_data, eval_data = fe.dataset.data.cifar10.load_data()\n\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n Normalize(inputs=\"x\", outputs=\"x\", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),\n PadIfNeeded(min_height=40, min_width=40, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n RandomCrop(32, 32, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n Sometimes(HorizontalFlip(image_in=\"x\", image_out=\"x\", mode=\"train\")),\n CoarseDropout(inputs=\"x\", outputs=\"x\", mode=\"train\", max_holes=1)\n ])\n # step 2\n model = fe.build(model_fn=my_model, optimizer_fn=lambda: tf.optimizers.Adam(init_lr)) # 1e-2, 1e-3, 1e-4\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\"),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [Accuracy(true_key=\"y\", pred_key=\"y_pred\")]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.nn.Embedding",
"numpy.exp",
"numpy.array",
"torch.nn.Dropout"
],
[
"tensorflow.python.keras.layers.Add",
"tensorflow.python.keras.layers.MaxPool2D",
"tensorflow.python.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.python.keras.layers.Activation",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.layers.GlobalMaxPool2D",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.layers.Flatten",
"tensorflow.optimizers.Adam",
"tensorflow.python.keras.layers.LeakyReLU",
"tensorflow.python.keras.layers.Conv2D"
]
] |
longyearxuk/sokrg | [
"001fcf8275eb158765de4e99e0d442b1712aa061"
] | [
"visualize_normal_score_transfers.py"
] | [
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom numpy.random import randn\nfrom krg_utils import transform_normal_scores\n\nr = randn(10000)\nslip_sc = pd.read_csv('slip_nscore_transform_table.csv')\n\nslip = transform_normal_scores(r, slip_sc)\n\navg_slip = slip_sc['x'].sum() / len(slip_sc['x'])\navg_score = slip_sc['nscore'].sum() / len(slip_sc['nscore'])\nprint(avg_slip, slip.mean())\n\n# visualize\nfig, ax = plt.subplots()\nax.plot(slip_sc['x'], slip_sc['nscore'])\nax.axvline(x=avg_slip, linestyle='--', color='black')\nplt.show()"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.show",
"numpy.random.randn",
"matplotlib.pyplot.subplots"
]
] |
rajayalamanchili/image_caption_generator | [
"0d1b8a3262b3dcf9329c4685d9f4026bdf7274db"
] | [
"src/visualization/visualize.py"
] | [
"from src.data.datasets import FlickrDataset\nfrom src.config import config\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom PIL import Image\n\ndef display_img_FlickrDataset(dataset, index=0, predicted_caption=None):\n \n image = Image.open(dataset.images_directory / dataset.image_ids[index])\n caption_txt = \"\\n\".join(dataset.img_caption_dict[dataset.image_ids[index]])\n \n fig = plt.figure(figsize=(30, 12))\n ax = fig.add_subplot(1, 2, 1)\n ax.imshow(image)\n ax.axis(\"off\")\n ax = fig.add_subplot(1, 2, 2)\n ax.text(0,0.1,\"Actual:\", fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.15,caption_txt, fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.4,\"Predicted:\", fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.text(0,0.45,caption_txt, fontsize=15, verticalalignment=\"top\", weight=\"bold\")\n ax.axis(\"off\")\n ax.invert_yaxis()\n \nif __name__ == \"__main__\":\n\n training_dataset = FlickrDataset(file_name=config.CAPTIONS_TRAIN_FILE, dtype=\"train\")\n \n display_img_FlickrDataset(training_dataset, 100)\n "
] | [
[
"matplotlib.pyplot.figure"
]
] |
fhmjones/ocgy-dv-fjversion | [
"176a47d28daabc93821f37decb38fff320491885"
] | [
"dashdir/parse-csv.py"
] | [
"# any work with the data file\n# make a nicer csv to pull from\nimport pandas as pd\nimport gsw\n\n\n# all of the parameters from the full data: 'Longitude [degrees_east]', 'Latitude [degrees_north]',\n# 'PRESSURE [dbar]', 'DEPTH [m]', 'CTDTMP [deg C]', 'CTDSAL', 'SALINITY_D_CONC_BOTTLE', 'SALINITY_D_CONC_PUMP',\n# 'SALINITY_D_CONC_FISH', 'SALINITY_D_CONC_UWAY', 'NITRATE_D_CONC_BOTTLE [umol/kg]', 'NITRATE_D_CONC_PUMP [umol/kg]',\n# 'NITRATE_D_CONC_FISH [umol/kg]', 'NITRATE_D_CONC_UWAY [umol/kg]', 'NITRATE_LL_D_CONC_BOTTLE [umol/kg]',\n# 'NITRATE_LL_D_CONC_FISH [umol/kg]', 'NO2+NO3_D_CONC_BOTTLE [umol/kg]', 'NO2+NO3_D_CONC_FISH [umol/kg]',\n# 'Fe_D_CONC_BOTTLE [nmol/kg]', 'Fe_D_CONC_FISH [nmol/kg]', 'Fe_II_D_CONC_BOTTLE [nmol/kg]', 'Fe_II_D_CONC_FISH [nmol/kg]',\n# 'Fe_S_CONC_BOTTLE [nmol/kg]', 'Fe_S_CONC_FISH [nmol/kg]'\n\ndef average_data(cruise_data):\n # from https://stackoverflow.com/questions/48830324/pandas-average-columns-with-same-value-in-other-columns\n cruise_data = cruise_data.groupby(['Latitude', 'Longitude', 'Station', 'Depth'], as_index=False).mean()\n return cruise_data\n\n\n# removes specifically empty iron data.\ndef remove_empty_data(cruise_data):\n grouped_data = cruise_data.groupby(['Latitude', 'Longitude', 'Station'])\n for name, group in grouped_data:\n if (group['Iron'].isna().values.all()):\n cruise_data = cruise_data.drop(grouped_data.get_group(name).index)\n return cruise_data\n\n\n# add data for [Nitrate] : [Fe] or [Nitrate]/[Fe]\ndef get_nitrate(cruise_data, index, row):\n current_depth = row['Depth']\n min = None\n max = None\n if row['Depth'] <= 100:\n min, max = current_depth - 5, current_depth + 5\n elif row['Depth'] > 100:\n min, max = current_depth - 10, current_depth + 10\n\n avg_nitrate = cruise_data['Nitrate'][((cruise_data.Depth <= max) & (cruise_data.Depth >= min))].mean()\n return avg_nitrate\n\n\ndef add_ratio_data(cruise_data):\n ratio = []\n\n for index, row in cruise_data.iterrows():\n if row['Iron'] is None:\n ratio.append(None)\n else:\n nitrate = get_nitrate(cruise_data, index, row)\n ratio.append(nitrate / row['Iron'])\n\n cruise_data['Ratio'] = ratio\n\n''' \ndef add_ratio_data(cruise_data):\n nit = cruise_data['Nitrate']\n iron = cruise_data['Iron']\n ratio = nit / iron\n\n cruise_data['Ratio'] = ratio\n'''\n\ndef add_density_data(cruise_data):\n #from: http://www.teos-10.org/pubs/gsw/html/gsw_sigma0.html\n practical_salinity = cruise_data['Salinity']\n pressure = cruise_data['Pressure']\n longitude = cruise_data['Longitude']\n latitude = cruise_data['Latitude']\n absolute_salinity = gsw.SA_from_SP(practical_salinity, pressure, longitude, latitude)\n temperature = cruise_data['Temperature']\n sigma0 = gsw.sigma0(absolute_salinity, temperature)\n\n cruise_data['Density'] = sigma0\n\n'''\ndef add_test_density_data(cruise_data):\n practical_salinity = cruise_data['Salinity']\n temperature = cruise_data['Temperature']\n sigma0 = gsw.sigma0(practical_salinity, temperature)\n\n cruise_data['Density'] = sigma0\n'''\n\n# read in original data\nGA03_data = pd.read_csv(\"./data/GA03w.csv\")\nGIPY05_data = pd.read_csv(\"./data/GIPY05e.csv\")\nGP02_data = pd.read_csv(\"./data/GP02w.csv\")\nGIPY04_data = pd.read_csv(\"./data/GIPY04.csv\")\n#TEST_data = pd.read_csv(\"./data/TestCruise.csv\")\n\nheaders = ['Station', 'Latitude', 'Longitude', 'Depth', 'Temperature', 'Salinity', 'Nitrate', 'Iron', 'Pressure']\n\n'''\n#make TEST dataframe\ndata = [TEST_data['Station'], TEST_data['Latitude [degrees_north]'], TEST_data['Longitude [degrees_east]'],\n TEST_data['DEPTH [m]'], TEST_data['CTDTMP [deg C]'], TEST_data['CTDSAL'], TEST_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n TEST_data['Fe_D_CONC_BOTTLE [nmol/kg]'], TEST_data['PRESSURE [dbar]']]\nTEST = pd.concat(data, axis=1, keys=headers)\nadd_density_data(TEST)\nTEST.to_csv('TEST_filtered.csv', index=False)\n'''\n\n\n# make GA03 dataframe and csv\ndata = [GA03_data['Station'], GA03_data['Latitude [degrees_north]'], GA03_data['Longitude [degrees_east]'],\n GA03_data['DEPTH [m]'], GA03_data['CTDTMP [deg C]'], GA03_data['CTDSAL'], GA03_data['NITRATE_D_CONC_BOTTLE [umol/kg]'],\n GA03_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GA03_data['PRESSURE [dbar]']]\nGA03 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGA03 = GA03[((GA03.Longitude <= 360 - 60) & (GA03.Longitude >= 360 - 65)) | (GA03.Longitude >= 360 - 25)]\n#GA03 = average_data(GA03)\nadd_ratio_data(GA03)\nadd_density_data(GA03)\nGA03 = remove_empty_data(GA03)\nGA03 = GA03[(GA03.Depth <= 500)]\n\nstations = []\npositions = []\nfor i in range(len(GA03)):\n station = GA03['Station'].values[i]\n lat = GA03['Latitude'].values[i]\n lon = GA03['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\nfor i in [4]: # choosing specific profiles\n GA03 = GA03.drop(GA03[(GA03.Latitude == positions[i][0]) & (GA03.Longitude == positions[i][1])].index)\nGA03.to_csv('./data/GA03_filtered.csv', index=False)\n\n# make GIPY05 dataframe and csv\ndata = [GIPY05_data['Station'], GIPY05_data['Latitude [degrees_north]'], GIPY05_data['Longitude [degrees_east]'],\n GIPY05_data['DEPTH [m]'],\n GIPY05_data['CTDTMP [deg C]'], GIPY05_data['CTDSAL'], GIPY05_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n GIPY05_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GIPY05_data['PRESSURE [dbar]']]\nGIPY05 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGIPY05 = GIPY05[(GIPY05.Latitude >= -45) | (GIPY05.Latitude <= -65)]\n#GIPY05 = average_data(GIPY05)\nadd_ratio_data(GIPY05)\nadd_density_data(GIPY05)\nGIPY05 = remove_empty_data(GIPY05)\nGIPY05 = GIPY05[(GIPY05.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GIPY05)):\n station = GIPY05['Station'].values[i]\n lat = GIPY05['Latitude'].values[i]\n lon = GIPY05['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\n# for i in []: #choosing specific profiles\n# GIPY05 = GIPY05.drop(GIPY05[(GIPY05.Latitude == positions[i][0]) & (GIPY05.Longitude == positions[i][1])].index)\nGIPY05.to_csv('./data/GIPY05_filtered.csv', index=False)\n\n# make GP02 dataframe and csv\ndata = [GP02_data['Station'], GP02_data['Latitude [degrees_north]'], GP02_data['Longitude [degrees_east]'],\n GP02_data['DEPTH [m]'],\n GP02_data['CTDTMP [deg C]'], GP02_data['CTDSAL'], GP02_data['NO2+NO3_D_CONC_BOTTLE [umol/kg]'],\n GP02_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GP02_data['PRESSURE [dbar]']]\nGP02 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGP02 = GP02[(GP02.Longitude <= 155) | (GP02.Longitude >= 180)]\n# GP02 = average_data(GP02)\nadd_ratio_data(GP02)\nadd_density_data(GP02)\nGP02 = remove_empty_data(GP02)\nGP02 = GP02[(GP02.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GP02)):\n station = GP02['Station'].values[i]\n lat = GP02['Latitude'].values[i]\n lon = GP02['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\n# for i in []: #choosing specific profiles\n# GP02 = GP02.drop(GP02[(GP02.Latitude == positions[i][0]) & (GP02.Longitude == positions[i][1])].index)\nGP02.to_csv('./data/GP02_filtered.csv', index=False)\n\n# make GIPY04 dataframe and csv\ndata = [GIPY04_data['Station'], GIPY04_data['Latitude [degrees_north]'], GIPY04_data['Longitude [degrees_east]'],\n GIPY04_data['DEPTH [m]'],\n GIPY04_data['CTDTMP [deg C]'], GIPY04_data['CTDSAL'], GIPY04_data['NITRATE_D_CONC_BOTTLE [umol/kg]'],\n GIPY04_data['Fe_D_CONC_BOTTLE [nmol/kg]'], GIPY04_data['PRESSURE [dbar]']]\nGIPY04 = pd.concat(data, axis=1, keys=headers)\n# remove unwanted lons and lats\nGIPY04 = GIPY04[(GIPY04.Latitude >= -45)]\n# GIPY04 = average_data(GIPY04)\nadd_ratio_data(GIPY04)\nadd_density_data(GIPY04)\nGIPY04 = remove_empty_data(GIPY04)\nGIPY04 = GIPY04[(GIPY04.Depth <= 500)]\n\npositions = []\nstations = []\nfor i in range(len(GIPY04)):\n station = GIPY04['Station'].values[i]\n lat = GIPY04['Latitude'].values[i]\n lon = GIPY04['Longitude'].values[i]\n if len(positions) == 0 or [lat, lon] != positions[-1]:\n positions.append([lat, lon])\n stations.append(station)\n# print(stations)\nfor i in [0, 2, 4]: # choosing specific profiles\n GIPY04 = GIPY04.drop(GIPY04[(GIPY04.Latitude == positions[i][0]) & (GIPY04.Longitude == positions[i][1])].index)\nGIPY04.to_csv('./data/GIPY04_filtered.csv', index=False)\n"
] | [
[
"pandas.read_csv",
"pandas.concat"
]
] |
agtoever/twixtbot-ui | [
"366d7bef33fdbaa260ea8b3330fa9ab29ad05f03"
] | [
"src/plot.py"
] | [
"\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport constants as ct\n\n\nclass ThreeBarPlot():\n\n def __init__(self, canvas, bar_color):\n self.bar_color = bar_color\n self.prepare(canvas)\n\n def update(self, values=None, xmax=None):\n\n # clear the subplot\n ax1 = self.sub_plot\n ax1.clear()\n ax1.invert_yaxis()\n\n if values is not None:\n ind = np.arange(3)\n labels = [str(m).upper() for m in values[\"moves\"]]\n if \"Y\" in values:\n ax1.set_xlim(xmin=0, xmax=xmax)\n ax1.barh(ind, values[\"Y\"],\n color=self.bar_color, tick_label=labels)\n offset = xmax * 0.02\n for i, v in enumerate(values[\"Y\"]):\n ax1.text(v + offset, i + 0.27, str(v),\n color=ct.PLOT_LABEL_COLOR,\n fontfamily=ct.PLOT_LABEL_FONT[0],\n fontsize=ct.PLOT_LABEL_FONT[1])\n\n plt.subplots_adjust(left=None, bottom=None,\n right=None, top=None, wspace=0, hspace=0)\n self.agg.draw()\n\n def prepare(self, canvas):\n\n fig, ax1 = plt.subplots(figsize=(2.4, 0.7))\n\n ax1.tick_params(axis='x', which='major', labelcolor=\"black\", top=False,\n labeltop=False, labelbottom=False, bottom=False)\n ax1.tick_params(axis='y', which='major', labelcolor=\"black\",\n labelleft=True, labelsize=8, pad=.8)\n\n ax1.spines['bottom'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n agg = FigureCanvasTkAgg(fig, canvas)\n agg.get_tk_widget().pack()\n\n self.sub_plot = ax1\n self.agg = agg\n\n\nclass EvalHistPlot():\n def __init__(self, canvas, stgs):\n self.sub_plot = None\n self.agg = None\n self.prepare(canvas)\n self.stgs = stgs\n\n def sc_to_color(self, sc):\n if sc > 0:\n return self.stgs.get(ct.K_COLOR[1])\n return self.stgs.get(ct.K_COLOR[2])\n\n def update(self, values=None):\n # clear the subplot\n ax1 = self.sub_plot\n ax1.clear()\n\n if values is not None:\n ax1.bar(values.keys(), values.values(),\n color=list(map(self.sc_to_color, values.values())))\n\n xmax = max(10, len(values))\n plt.xlim(-1, xmax)\n plt.xticks(np.arange(0, xmax, xmax // 6))\n plt.ylim([-1, 1])\n\n plt.subplots_adjust(left=None, bottom=0.3,\n right=None, top=0.9, wspace=0, hspace=0)\n self.agg.draw()\n\n def prepare(self, canvas):\n fig, ax1 = plt.subplots(figsize=(2.4, 0.7))\n\n ax1.tick_params(axis='x', which='major', labelcolor=\"black\",\n labelsize=8, pad=.8, top=False, bottom=False)\n ax1.tick_params(axis='y', which='major', labelcolor=\"black\",\n labelsize=8, pad=.8)\n ax1.autoscale(True, axis='x', tight=True)\n\n ax1.spines['bottom'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n agg = FigureCanvasTkAgg(fig, canvas)\n agg.get_tk_widget().pack()\n\n self.sub_plot = ax1\n self.agg = agg\n"
] | [
[
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylim"
]
] |
hanouticelina/reinforcement-learning | [
"c7c6765486ea9546bbd8ce75e6032a408a1410cf"
] | [
"TME 8. DDPG/utils.py"
] | [
"import time\nimport subprocess\nfrom collections import namedtuple,defaultdict\nimport logging\nimport json\nimport os\nimport yaml\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport threading\nimport numpy as np\nimport gym\nfrom collections import deque\nimport random\nimport torch.autograd\nfrom torch.autograd import Variable\n\nclass OUNoise(object):\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_space.shape[0]\n self.low = action_space.low\n self.high = action_space.high\n self.reset()\n \n def reset(self):\n self.state = np.ones(self.action_dim) * self.mu\n \n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\n self.state = x + dx\n return self.state\n \n def get_action(self, action, t=0):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\n return np.clip(action + ou_state, self.low, self.high)\n\n\n\ndef loadTensorBoard(outdir):\n t = threading.Thread(target=launchTensorBoard, args=([outdir]))\n t.start()\n\ndef launchTensorBoard(tensorBoardPath):\n print('tensorboard --logdir=' + tensorBoardPath)\n ret=os.system('tensorboard --logdir=' + tensorBoardPath)\n if ret!=0:\n syspath = os.path.dirname(sys.executable)\n print(os.path.dirname(sys.executable))\n ret = os.system(syspath+\"/\"+'tensorboard --logdir=' + tensorBoardPath)\n return\n\nclass Orn_Uhlen:\n def __init__(self, n_actions, mu=0, theta=0.15, sigma=0.2):\n self.n_actions = n_actions\n self.X = np.ones(n_actions) * mu\n self.mu = mu\n self.sigma = sigma\n self.theta = theta\n\n def reset(self):\n self.X = np.ones(self.n_actions) * self.mu\n\n def sample(self):\n dX = self.theta * (self.mu - self.X)\n dX += self.sigma * np.random.randn(self.n_actions)\n self.X += dX\n return torch.FloatTensor(self.X)\n\nclass FeatureExtractor(object):\n def __init__(self):\n super().__init__()\n\n def getFeatures(self,obs):\n pass\n\nclass NothingToDo(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n ob=env.reset()\n self.outSize=len(ob)\n\n def getFeatures(self,obs):\n return obs\n\n###### Pour Gridworld #############################\"\n\nclass MapFromDumpExtractor(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize = env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize\n\n def getFeatures(self, obs):\n #prs(obs)\n return obs.reshape(1,-1)\n\nclass MapFromDumpExtractor2(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n outSize=env.start_grid_map.reshape(1, -1).shape[1]\n self.outSize=outSize*3\n\n def getFeatures(self, obs):\n state=np.zeros((3,np.shape(obs)[0],np.shape(obs)[1]))\n state[0]=np.where(obs == 2,1,state[0])\n state[1] = np.where(obs == 4, 1, state[1])\n state[2] = np.where(obs == 6, 1, state[2])\n return state.reshape(1,-1)\n\n\n\n\nclass DistsFromStates(FeatureExtractor):\n def __init__(self,env):\n super().__init__()\n self.outSize=16\n\n def getFeatures(self, obs):\n #prs(obs)\n #x=np.loads(obs)\n x=obs\n #print(x)\n astate = list(map(\n lambda x: x[0] if len(x) > 0 else None,\n np.where(x == 2)\n ))\n astate=np.array(astate)\n a3=np.where(x == 3)\n d3=np.array([0])\n if len(a3[0])>0:\n astate3 = np.concatenate(a3).reshape(2,-1).T\n d3=np.power(astate-astate3,2).sum(1).min().reshape(1)\n\n #d3 = np.array(d3).reshape(1)\n a4 = np.where(x == 4)\n d4 = np.array([0])\n if len(a4[0]) > 0:\n astate4 = np.concatenate(a4).reshape(2,-1).T\n d4 = np.power(astate - astate4, 2).sum(1).min().reshape(1)\n #d4 = np.array(d4)\n a5 = np.where(x == 5)\n d5 = np.array([0])\n #prs(a5)\n if len(a5[0]) > 0:\n astate5 = np.concatenate(a5).reshape(2,-1).T\n d5 = np.power(astate - astate5, 2).sum(1).min().reshape(1)\n #d5 = np.array(d5)\n a6 = np.where(x == 6)\n d6 = np.array([0])\n if len(a6[0]) > 0:\n astate6 = np.concatenate(a6).reshape(2,-1).T\n d6 = np.power(astate - astate6, 2).sum(1).min().reshape(1)\n #d6=np.array(d6)\n\n #prs(\"::\",d3,d4,d5,d6)\n ret=np.concatenate((d3,d4,d5,d6)).reshape(1,-1)\n ret=np.dot(ret.T,ret)\n return ret.reshape(1,-1)\n\n#######################################################################################\n\n\n\n\n\n\n# class Qfunction(nn.Module):\n# def __init__(self):\n# super(Qfunction,self).__init__()\n#\n# def setcuda(self, device):\n#\n# #FeatureExtractor.floatTensor = torch.cuda.FloatTensor(1, device=device)\n# #FeatureExtractor.longTensor = torch.cuda.LongTensor(1, device=device)\n# self.cuda(device=device)\n\n\n\n\n\n\nclass convMDP(nn.Module):\n def __init__(self, inSize, outSize, layers=[], convs=None, finalActivation=None, batchNorm=False,init_batchNorm=False,activation=torch.tanh):\n super(convMDP, self).__init__()\n #print(inSize,outSize)\n\n self.inSize=inSize\n self.outSize=outSize\n self.batchNorm=batchNorm\n self.init_batchNorm = init_batchNorm\n self.activation=activation\n\n self.convs=None\n if convs is not None:\n self.convs = nn.ModuleList([])\n for x in convs:\n self.convs.append(nn.Conv2d(x[0], x[1], x[2], stride=x[3]))\n inSize = np.sqrt(inSize / x[0])\n inSize=((inSize-x[2])/x[3])+1\n inSize=inSize*inSize*x[1]\n #print(inSize)\n\n self.layers = nn.ModuleList([])\n self.bn = nn.ModuleList([])\n i=0\n if batchNorm or init_batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=inSize))\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n if batchNorm:\n self.bn.append(nn.BatchNorm1d(num_features=x))\n\n #nn.init.xavier_uniform_(self.layers[i].weight)\n nn.init.normal_(self.layers[i].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[i].bias.data,0.0,0.02)\n i+=1\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n #nn.init.uniform_(self.layers[-1].weight)\n nn.init.normal_(self.layers[-1].weight.data, 0.0, 0.02)\n nn.init.normal_(self.layers[-1].bias.data, 0.0, 0.02)\n self.finalActivation=finalActivation\n\n def setcuda(self, device):\n self.cuda(device=device)\n\n\n\n def forward(self, x):\n #print(\"d\", x.size(),self.inSize)\n x=x.view(-1,self.inSize)\n\n if self.convs is not None:\n\n n=x.size()[0]\n i=0\n for c in self.convs:\n if i==0:\n w=np.sqrt(x.size()[1])\n x=x.view(n,c.in_channels,w,w)\n x=c(x)\n x=self.activation(x)\n i+=1\n x=x.view(n,-1)\n\n #print(x.size())\n if self.batchNorm or self.init_batchNorm:\n x=self.bn[0](x)\n x = self.layers[0](x)\n\n\n for i in range(1, len(self.layers)):\n x = self.activation(x)\n #if self.drop is not None:\n # x = nn.drop(x)\n if self.batchNorm:\n x = self.bn[i](x)\n x = self.layers[i](x)\n\n\n if self.finalActivation is not None:\n x=self.finalActivation(x)\n #print(\"f\",x.size())\n return x\n\nclass NN(nn.Module):\n def __init__(self, inSize, outSize, layers=[]):\n super(NN, self).__init__()\n self.layers = nn.ModuleList([])\n for x in layers:\n self.layers.append(nn.Linear(inSize, x))\n inSize = x\n self.layers.append(nn.Linear(inSize, outSize))\n\n def setcuda(self, device):\n self.cuda(device=device)\n\n def forward(self, x):\n x = self.layers[0](x)\n for i in range(1, len(self.layers)):\n x = torch.tanh(x)\n x = self.layers[i](x)\n\n return x\n\n\nclass Critic(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Critic, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n\n def forward(self, state, action):\n \"\"\"\n Params state and actions are torch tensors\n \"\"\"\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n\n return x\n\nclass Actor(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, learning_rate = 3e-4):\n super(Actor, self).__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, output_size)\n \n def forward(self, state):\n \"\"\"\n Param state is a torch tensor\n \"\"\"\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = torch.tanh(self.linear3(x))\n\n return x\n\nclass LogMe(dict):\n def __init__(self,writer,term=True):\n self.writer = writer\n self.dic = defaultdict(list)\n self.term = term\n def write(self,i):\n if len(self.dic)==0: return\n s=f\"Epoch {i} : \"\n for k,v in self.dic.items():\n self.writer.add_scalar(k,sum(v)*1./len(v),i)\n s+=f\"{k}:{sum(v)*1./len(v)} -- \"\n self.dic.clear()\n if self.term: logging.info(s)\n def update(self,l):\n for k,v in l:\n self.add(k,v)\n def direct_write(self,k,v,i):\n self.writer.add_scalar(k,v,i)\n def add(self,k,v):\n self.dic[k].append(v)\n\ndef save_src(path):\n current_dir = os.getcwd()\n package_dir = current_dir.split('RL', 1)[0]\n #path = os.path.abspath(path)\n os.chdir(package_dir)\n #print(package_dir)\n src_files = subprocess.Popen(('find', 'RL', '-name', '*.py', '-o', '-name', '*.yaml'),\n stdout=subprocess.PIPE)\n #print(package_dir,path)\n #path=os.path.abspath(path)\n\n\n #print(str(src_files))\n\n subprocess.check_output(('tar', '-zcf', path+\"/arch.tar\", '-T', '-'), stdin=src_files.stdout, stderr=subprocess.STDOUT)\n src_files.wait()\n os.chdir(current_dir)\n\n\n\ndef prs(*args):\n st = \"\"\n for s in args:\n st += str(s)\n print(st)\n\n\nclass DotDict(dict):\n \"\"\"dot.notation access to dictionary attributes (Thomas Robert)\"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ndef load_yaml(path):\n with open(path, 'r') as stream:\n opt = yaml.load(stream,Loader=yaml.Loader)\n return DotDict(opt)\n\ndef write_yaml(file,dotdict):\n d=dict(dotdict)\n with open(file, 'w', encoding='utf8') as outfile:\n yaml.dump(d, outfile, default_flow_style=False, allow_unicode=True)\n\n\nclass EpsilonGreedyDecay:\n def __init__(self, epsilon, eta, epsilon_min):\n self.eta = eta\n self.epsilon=epsilon\n self.epsilon_min=epsilon_min\n def act(self, episode, q_values):\n decay = self.epsilon / (1 + (self.eta * episode))\n if decay<self.epsilon_min:\n decay=self.epsilon_min\n if np.random.random() > decay:\n _,action = torch.max(q_values,0) # we take the action that maximize the q_value\n return action.item()\n return np.random.randint(len(q_values))"
] | [
[
"numpy.ones",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.max",
"torch.cat",
"torch.nn.BatchNorm1d",
"numpy.concatenate",
"torch.nn.init.normal_",
"torch.tanh",
"numpy.where",
"numpy.power",
"numpy.array",
"torch.FloatTensor",
"torch.nn.Linear",
"numpy.random.randn",
"numpy.random.random",
"numpy.clip",
"numpy.shape",
"numpy.sqrt",
"numpy.dot"
]
] |
dhrubokarmaker/RoeBot | [
"fbd86c6c2e5930b0ec41be1b6001ad182cb8e49c"
] | [
"main.py"
] | [
"import nltk \nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom tensorflow.python.ops.gen_array_ops import expand_dims_eager_fallback\n\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport random\nimport json\nimport tensorflow as tf\nimport pickle\nimport discord\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nwith open(\"intents.json\") as file:\n data = json.load(file)\ntry:\n with open(\"data.pickle\",\"rb\") as f:\n words,labels,training,output = pickle.load(f)\nexcept:\n words = []\n labels = []\n docs_x = []\n docs_y = []\n\n for intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n\n for x,doc in enumerate(docs_x):\n bag = []\n wrds = [stemmer.stem(w) for w in doc]\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n \n output_row = out_empty[:]\n\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n training = numpy.array(training)\n output = numpy.array(output)\n\n with open(\"data.pickle\",\"wb\") as f:\n pickle.dump((words,labels,training,output),f)\n\n\ntf.compat.v1.reset_default_graph()\nnet = tflearn.input_data(shape=[None,len(training[0])])\nnet = tflearn.fully_connected(net,8)\nnet = tflearn.fully_connected(net,8)\nnet = tflearn.fully_connected(net,len(output[0]),activation=\"softmax\")\nnet = tflearn.regression(net)\nmodel = tflearn.DNN(net)\n\ntry:\n model.load(\"model.tflearn\")\nexcept:\n model.fit(training,output,n_epoch=1000,batch_size=8,show_metric=True)\n model.save(\"model.tflearn\") \n \n \ndef bag_of_words(s,words):\n bag = [0 for _ in range(len(words))]\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for s_word in s_words:\n for i,w in enumerate(words):\n if w == s_word:\n bag[i] = 1\n\n return numpy.array(bag)\n\ndef chat(message):\n result = model.predict([bag_of_words(message,words)])[0]\n result_index = numpy.argmax(result)\n if result[result_index] > 0.80:\n tag = labels[result_index]\n for intent in data[\"intents\"]:\n if intent[\"tag\"] == tag:\n responses = intent[\"responses\"]\n \n return random.choice(responses)\n else:\n return \"Didn't get that :(\"\n\n\nclient = discord.Client()\n\[email protected]\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('$roe'):\n text = message.content.lstrip('$roe')\n await message.channel.send(chat(text))\n \nclient.run(os.getenv('TOKEN'))\n\n\n"
] | [
[
"tensorflow.compat.v1.reset_default_graph",
"numpy.array",
"numpy.argmax"
]
] |
joegle/hrv-biofeedback | [
"08152889798d41bd9246c4550174377bf3eaa8f1"
] | [
"python-heart/examples/record.py"
] | [
"#!/usr/bin/env python2\nfrom __future__ import print_function\nimport heart\nimport datetime\nimport time\nimport sys\nimport numpy as np\nimport argparse\nimport os\nimport stat\n\nclass recorder(heart.Heart_Monitor):\n \"\"\"Command line tool that records the Arduino heart beat data into timestamped file\"\"\"\n\n def __init__(self, args):\n heart.Heart_Monitor.__init__(self,args.source)\n now = datetime.datetime.now()\n \n start_time = now.strftime('%Y-%m-%d-%H:%M:%S')\n stat_mode = os.stat(args.source).st_mode\n if stat.S_ISREG(stat_mode) or args.test:\n print(\"TESTING Not writing data to anywhere\")\n self.datafile = open(\"/dev/null\",\"w\")\n else:\n self.datafile = open(start_time+\".txt\",\"w\")\n print(\"Writing data to '%s'\" % (self.datafile.name))\n \n #self.datafile.write(\"# %s\"%(start_time))\n self.datafile.write(\"# R wave intervals in milliseconds per line\\n\")\n if args.message:\n self.log(\"annotation: \" + args.message)\n\n def fifteen(self):\n # Fifteen minute mark\n print(\"$\",end='')\n \n def log(self, message):\n self.datafile.write(\"# %s\\n\"%(message))\n \n def start(self):\n \"\"\"Begins the infinite loop of detecting heart beats\"\"\"\n sys.stderr.write(\"Starting monitor (Control-C to quit)\\n\")\n self.datafile.write(\"# start time: %s, %s\\n\"%(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),time.time()))\n while True:\n self.listen_for_beat()\n\n def on_beat(self):\n #self.stream.write(chr(self.beat_time%255))\n print(self.beat_time, file=self.datafile)\n char = \".\"\n \n if np.sum(self.RR_intervals)/60000 >= 15:\n char = '$'\n \n print(char, end=\"\")\n sys.stdout.flush()\n\n def session_summary(self):\n print(\"\\n= Session Summary =\")\n \n print(\"File: {0}\".format(self.datafile.name))\n print(\"Beats: {0:>6}\".format(len(self.RR_intervals)))\n print(\"Time: {0:>7} minutes\".format(round(np.sum(self.RR_intervals)/60000,2)))\n print(\"Mean: {0:>7}\".format(round(np.average(self.RR_intervals), 2)))\n print(\"STD: {0:>8}\".format(round(np.std(self.RR_intervals), 2)))\n print(\"BPM: {0:>8}\".format(round(60000/np.average(self.RR_intervals), 2)))\n\n def on_quit(self):\n self.datafile.write(\"# end time: %s, %s\\n\"%(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),time.time()))\n sys.stderr.write(\"Quitting monitor\\n\") \n self.session_summary()\n self.datafile.close()\n\nguide = \"\"\"# Examples\n./record.py sample.txt\n./record.py /dev/ttyUSB0\n./record.py /dev/usbmodem1411\n\"\"\"\n\nparser = argparse.ArgumentParser(description='Record heart beat intervals',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=guide)\n\nparser.add_argument('-m','--message', help='Log a message')\nparser.add_argument('-t','--test', help=\"Test run\", action='store_true')\nparser.add_argument('source',help=\"Serial device or test data file (/dev/ttyUSB0, /dev/tty.usbmodem1411, sample.txt)\")\n\nargs = parser.parse_args()\n\n\ndef main():\n session = recorder(args)\n session.start()\n\nif __name__ == \"__main__\":\n main()\n\n"
] | [
[
"numpy.sum",
"numpy.std",
"numpy.average"
]
] |
jasonleeinf/nmtlab | [
"122b70cc226d9ce17ad106a3bd3a5318bd3b359f"
] | [
"nmtlab/trainers/hvd_utils.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport torch\nimport horovod.torch as hvd\n\n\ndef broadcast_optimizer_state(optimizer, root_rank):\n \"\"\"\n This function is copied from the newest horovod version.\n But the newest version has to be compiled with gcc7\n \"\"\"\n if isinstance(optimizer, torch.optim.LBFGS):\n # TODO(travis): L-BFGS cannot be easily supported without serializing\n # the entire state_dict, as its structure is deeply nested and contains\n # None type parameter values\n raise ValueError('cannot broadcast torch.optim.LBFGS state')\n\n state_dict = optimizer.state_dict()\n\n # Newly created optimizers will not have their state initialized, so\n # do that initialization here\n if len(state_dict['state']) == 0:\n for group in optimizer.param_groups:\n for p in group['params']:\n p.grad = torch.autograd.Variable(\n p.data.new(p.size()).zero_())\n optimizer.step()\n state_dict = optimizer.state_dict()\n\n params = []\n callbacks = {}\n occurrences = collections.defaultdict(int)\n\n # Some optimizer parameters may be represented as scalars instead of\n # tensors. In such cases, we need to wrap the scalar in a tensor, then\n # broadcast, then update the appropriate value in the state_dict with the\n # new unwrapped scalar value via a callback.\n def _create_callback(pid, name, t, p):\n def _from_tensor():\n state_dict['state'][pid][name] = t(p.numpy()[0])\n return _from_tensor\n\n # Groups are unordered, but their params will be distinct\n for group in state_dict['param_groups']:\n # The params list here is ordered by the layers in the model\n for pid in group['params']:\n if pid not in state_dict['state']:\n continue\n param_state = state_dict['state'][pid]\n for name, p in param_state.items():\n # Some parameter names may appear more than once, in which\n # case we ensure they have a unique identifier defined by\n # their order\n occurrences[name] += 1\n key = '%s.%d' % (str(name), occurrences[name])\n\n if not torch.is_tensor(p):\n # Wrap the scalar in a FloatTensor, and remember its type\n # so we can cast it back after unwrapping\n t = type(p)\n p = torch.Tensor([p])\n callbacks[key] = _create_callback(pid, name, t, p)\n\n params.append((key, p))\n\n # Synchronized broadcast of all parameters\n hvd.broadcast_parameters(params, root_rank)\n\n # Post-broadcast clenaup for non-tensor parameters\n for key, p in params:\n if key in callbacks:\n callbacks[key]()\n"
] | [
[
"torch.is_tensor",
"torch.Tensor"
]
] |
YuehChuan/nnom | [
"68af27a0631244f2bb78cd4e4f2da916f122991a"
] | [
"examples/keyword_spotting/model/mfcc.py"
] | [
"\n\nfrom python_speech_features import mfcc\nimport scipy.io.wavfile as wav\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport os\nimport random\n\ndef load_noise(path='dat/_background_noise_/'):\n noise = []\n files = os.listdir(path)\n for f in files:\n filename = f\n if ('wav' not in filename):\n continue\n f = os.path.join(path, f)\n (rate, sig) = wav.read(f)\n noise.append(sig)\n return noise\n\ndef generate_mfcc(sig, rate, sig_len, noise=None, noise_weight=0.1, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512, lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):\n if(len(sig) != sig_len):\n if(len(sig)< sig_len):\n sig = np.pad(sig, (0, sig_len - len(sig)), 'constant')\n if(len(sig) >sig_len):\n sig = sig[0:sig_len]\n # i dont know, 'tensorflow' normalization\n sig = sig.astype('float') / 32768\n\n if(noise is not None):\n noise = noise[random.randint(0, len(noise)-1)] # pick a noise\n start = random.randint(0, len(noise)-sig_len) # pick a sequence\n noise = noise[start:start+sig_len]\n noise = noise.astype('float')/32768\n sig = sig * (1-noise_weight) + noise * noise_weight\n #wav.write('noise_test.wav', rate, sig)\n mfcc_feat = mfcc(sig, rate, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n mfcc_feat = mfcc_feat.astype('float32')\n return mfcc_feat\n\ndef merge_mfcc_file(input_path='dat/', mix_noise=True, sig_len=16000, winlen=0.03125, winstep=0.03125/2, numcep=13, nfilt=26, nfft=512,\n lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):\n\n train_data = []\n test_data = []\n validate_data = []\n train_lable = []\n test_label = []\n validate_label =[]\n\n if mix_noise:\n noise = load_noise()\n else:\n noise = None\n\n with open(input_path + 'testing_list.txt', 'r') as f:\n test_list = f.read()\n with open(input_path + 'validation_list.txt', 'r') as f:\n validate_list = f.read()\n\n files = os.listdir(input_path)\n for fi in files:\n fi_d = os.path.join(input_path, fi)\n # folders of each cmd\n if os.path.isdir(fi_d):\n label = fi_d.split('/')[1] # get the label from the dir\n print(label)\n # noise in training\n if 'noise' in label:\n for f in os.listdir(fi_d):\n filename = f\n if('wav' not in filename):\n continue\n f = os.path.join(fi_d, f)\n (rate, sig) = wav.read(f)\n for i in range(0, len(sig), sig_len):\n data = generate_mfcc(sig[i:i+sig_len], rate, sig_len, winlen=winlen, winstep=winstep, numcep=numcep,\n nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n data = np.array(data) # ?? no idea why this works\n train_data.append(data)\n train_lable.append('noise')\n\n continue\n # dataset\n for f in os.listdir(fi_d):\n filename = f\n f = os.path.join(fi_d, f)\n (rate, sig) = wav.read(f)\n data = generate_mfcc(sig, rate, sig_len, noise=noise, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,\n highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)\n data = np.array(data) # ?? no idea why this works\n\n # split dataset into train, test, validate\n if filename in test_list:\n test_data.append(data)\n test_label.append(label)\n elif filename in validate_list:\n validate_data.append(data)\n validate_label.append(label)\n else:\n train_data.append(data)\n train_lable.append(label)\n\n # finalize\n train_data = np.array(train_data)\n test_data = np.array(test_data)\n validate_data = np.array(validate_data)\n\n return (train_data, train_lable), (test_data, test_label), (validate_data, validate_label)\n\n\nif __name__ == \"__main__\":\n\n # test\n (x_train, y_train), (x_test, y_test), (x_val, y_val) = merge_mfcc_file()\n\n np.save('train_data.npy', x_train)\n np.save('train_label.npy', y_train)\n np.save('test_data.npy', x_test)\n np.save('test_label.npy', y_test)\n np.save('val_data.npy', x_val)\n np.save('val_label.npy', y_val)\n\n print('x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())\n\n mfcc_feat = x_train[3948]\n mfcc_feat = np.swapaxes(mfcc_feat, 0, 1)\n ig, ax = plt.subplots()\n cax = ax.imshow(mfcc_feat, interpolation='nearest', origin='lower', aspect='auto')\n ax.set_title('MFCC')\n plt.show()\n"
] | [
[
"numpy.save",
"numpy.swapaxes",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"scipy.io.wavfile.read",
"numpy.array"
]
] |
brjdenis/qaserver | [
"93a4c3272cf38199e7ef67d1285a9ffacef46883"
] | [
"pyqaserver/picketfence_module.py"
] | [
"import sys\nimport os\nimport tempfile\nfrom multiprocessing import Pool\nimport datetime\nimport numpy as np\nimport matplotlib.style\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# To revert back to matplotlib 1.0 style\nmatplotlib.style.use('classic')\n\nfrom pylinac.core.profile import SingleProfile as PylinacSingleProfile\n\nparent_module = sys.modules['.'.join(__name__.split('.')[:-1]) or '__main__']\nif __name__ == '__main__' or parent_module.__name__ == '__main__':\n #sys.path.append(os.path.abspath(os.path.realpath(\"python_packages\")))\n import config\n from python_packages.bottlepy.bottle import Bottle, request, TEMPLATE_PATH, template, redirect, response\n import general_functions\n import RestToolbox_modified as RestToolbox\n from python_packages import mpld3\nelse:\n from . import config\n from .python_packages.bottlepy.bottle import Bottle, request, TEMPLATE_PATH, template, redirect, response\n from . import general_functions\n from . import RestToolbox_modified as RestToolbox\n from .python_packages import mpld3\n\nCUR_DIR = os.path.realpath(os.path.dirname(__file__))\n\n# Path to Bottle templates\nTEMPLATE_PATH.insert(0, os.path.join(CUR_DIR, 'views'))\n\n# Url to some mpld3 library\nD3_URL = config.D3_URL\nMPLD3_URL = config.MPLD3_URL\n\nPI = np.pi\n\n# MLC type for PicketFence analysis:\nLEAF_TYPE = [\"Varian_120\", \"Varian_120HD\", \"Varian_80\", \"Elekta_80\", \"Elekta_160\"]\n\n# Here starts the bottle server\npf_app = Bottle()\n\n@pf_app.route('/picket_fence', method=\"POST\")\ndef picket_fence():\n\n displayname = request.forms.hidden_displayname\n username = request.get_cookie(\"account\", secret=config.SECRET_KEY)\n if not username:\n redirect(\"/login\")\n try:\n variables = general_functions.Read_from_dcm_database()\n variables[\"displayname\"] = displayname\n except ConnectionError:\n return template(\"error_template\", {\"error_message\": \"Orthanc is refusing connection.\"})\n variables[\"LEAF_TYPE\"] = LEAF_TYPE\n response.set_cookie(\"account\", username, secret=config.SECRET_KEY, samesite=\"lax\")\n return template(\"picket_fence\", variables)\n\ndef picket_fence_helperf_catch_error(args):\n try:\n return picket_fence_helperf(args)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": str(e)})\n\ndef picket_fence_helperf(args):\n '''This function is used in order to prevent memory problems'''\n temp_folder = args[\"temp_folder\"]\n file_path = args[\"file_path\"]\n clip_box = args[\"clip_box\"]\n py_filter = args[\"py_filter\"]\n num_pickets = args[\"num_pickets\"]\n sag = args[\"sag\"]\n mlc = args[\"mlc\"]\n invert = args[\"invert\"]\n orientation = args[\"orientation\"]\n w = args[\"w\"]\n imgdescription = args[\"imgdescription\"]\n station = args[\"station\"]\n displayname = args[\"displayname\"]\n acquisition_datetime = args[\"acquisition_datetime\"]\n general_functions.set_configuration(args[\"config\"]) # Transfer to this process\n \n # Chose module:\n if mlc in [\"Varian_80\", \"Elekta_80\", \"Elekta_160\"]:\n use_original_pylinac = \"False\"\n else:\n use_original_pylinac = \"True\"\n \n # Collect data for \"save results\"\n dicomenergy = general_functions.get_energy_from_imgdescription(imgdescription)\n user_machine, user_energy = general_functions.get_user_machine_and_energy(station, dicomenergy)\n machines_and_energies = general_functions.get_machines_and_energies(general_functions.get_treatmentunits_picketfence())\n tolerances = general_functions.get_tolerance_user_machine_picketfence(user_machine) # If user_machne has specific tolerance\n if not tolerances:\n action_tolerance, tolerance, generate_pdf_report = general_functions.get_settings_picketfence()\n else:\n action_tolerance, tolerance, generate_pdf_report = tolerances[0]\n\n tolerance = float(tolerance)\n action_tol = float(action_tolerance)\n \n save_results = {\n \"user_machine\": user_machine,\n \"user_energy\": user_energy,\n \"machines_and_energies\": machines_and_energies,\n \"displayname\": displayname\n }\n\n # Import either original pylinac module or the modified module\n if use_original_pylinac == \"True\":\n from pylinac import PicketFence as PicketFence # Original pylinac analysis\n else:\n if __name__ == '__main__' or parent_module.__name__ == '__main__':\n from python_packages.pylinac.picketfence_modified import PicketFence as PicketFence\n else:\n from .python_packages.pylinac.picketfence_modified import PicketFence as PicketFence\n\n try:\n pf = PicketFence(file_path, filter=py_filter)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Module PicketFence cannot calculate. \"+str(e)})\n\n # Here we force pixels to background outside of box:\n if clip_box != 0:\n try:\n pf.image.check_inversion_by_histogram(percentiles=[4, 50, 96]) # Check inversion otherwise this might not work\n general_functions.clip_around_image(pf.image, clip_box)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Unable to apply clipbox. \"+str(e)})\n\n # Now invert if needed\n if invert:\n try:\n pf.image.invert()\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Unable to invert the image. \"+str(e)})\n \n # Now analyze\n try:\n if use_original_pylinac == \"True\":\n hdmlc = True if mlc==\"Varian_120HD\" else False\n pf.analyze(tolerance=tolerance, action_tolerance=action_tol, hdmlc=hdmlc, sag_adjustment=float(sag), num_pickets=num_pickets,\n orientation=orientation)\n else:\n pf.analyze(tolerance=tolerance, action_tolerance=action_tol, mlc_type=mlc, sag_adjustment=float(sag), num_pickets=num_pickets,\n orientation=orientation)\n except Exception as e:\n return template(\"error_template\", {\"error_message\": \"Picket fence module cannot analyze. \"+str(e)})\n \n # Added an if clause to tell if num of mlc's are not the same on all pickets:\n\n num_mlcs = len(pf.pickets[0].mlc_meas)\n for p in pf.pickets:\n if len(p.mlc_meas) != num_mlcs:\n return template(\"error_template\", {\"error_message\": \"Not all pickets have the same number of leaves. \"+\n \"Probably your image si too skewed. Rotate your collimator a bit \"+\n \"and try again. Use the jaws perpendicular to MLCs to set the right \"+\n \"collimator angle.\"})\n error_array = np.array([])\n max_error = []\n max_error_leaf = []\n passed_tol = []\n picket_offsets = []\n picket_nr = pf.num_pickets\n for k in pf.pickets.pickets:\n error_array = np.concatenate((error_array, k.error_array))\n max_error.append(k.max_error)\n max_err_leaf_ind = np.argmax(k.error_array)\n\n max_error_leaf.append(max_err_leaf_ind)\n passed_tol.append(\"Passed\" if k.passed else \"Failed\")\n picket_offsets.append(k.dist2cax)\n\n # Plot images\n if pf.settings.orientation == \"Left-Right\":\n fig_pf = Figure(figsize=(9, 10), tight_layout={\"w_pad\":0})\n else:\n fig_pf = Figure(figsize=(9.5, 7), tight_layout={\"w_pad\":0})\n\n img_ax = fig_pf.add_subplot(1,1,1)\n img_ax.imshow(pf.image.array, cmap=matplotlib.cm.gray, interpolation=\"none\", aspect=\"equal\", origin='upper')\n\n # Taken from pylinac: leaf_error_subplot:\n tol_line_height = [pf.settings.tolerance, pf.settings.tolerance]\n tol_line_width = [0, max(pf.image.shape)]\n # make the new axis\n divider = make_axes_locatable(img_ax)\n if pf.settings.orientation == 'Up-Down':\n axtop = divider.append_axes('right', 1.75, pad=0.2, sharey=img_ax)\n else:\n axtop = divider.append_axes('bottom', 1.75, pad=0.5, sharex=img_ax)\n\n # get leaf positions, errors, standard deviation, and leaf numbers\n pos, vals, err, leaf_nums = pf.pickets.error_hist()\n\n # Changed leaf_nums to sequential numbers:\n leaf_nums = list(np.arange(0, len(leaf_nums), 1))\n\n # plot the leaf errors as a bar plot\n if pf.settings.orientation == 'Up-Down':\n axtop.barh(pos, vals, xerr=err, height=pf.pickets[0].sample_width * 2, alpha=0.4, align='center')\n # plot the tolerance line(s)\n axtop.plot(tol_line_height, tol_line_width, 'r-', linewidth=3)\n if pf.settings.action_tolerance is not None:\n tol_line_height_action = [pf.settings.action_tolerance, pf.settings.action_tolerance]\n tol_line_width_action = [0, max(pf.image.shape)]\n axtop.plot(tol_line_height_action, tol_line_width_action, 'y-', linewidth=3)\n\n # reset xlims to comfortably include the max error or tolerance value\n axtop.set_xlim([0, max(max(vals), pf.settings.tolerance) + 0.1])\n else:\n axtop.bar(pos, vals, yerr=err, width=pf.pickets[0].sample_width * 2, alpha=0.4, align='center')\n axtop.plot(tol_line_width, tol_line_height, 'r-', linewidth=3)\n if pf.settings.action_tolerance is not None:\n tol_line_height_action = [pf.settings.action_tolerance, pf.settings.action_tolerance]\n tol_line_width_action = [0, max(pf.image.shape)]\n axtop.plot(tol_line_width_action, tol_line_height_action, 'y-', linewidth=3)\n axtop.set_ylim([0, max(max(vals), pf.settings.tolerance) + 0.1])\n\n # add formatting to axis\n axtop.grid(True)\n axtop.set_title(\"Average Error (mm)\")\n\n # add tooltips if interactive\n # Copied this from previous version of pylinac\n interactive = True\n if interactive:\n if pf.settings.orientation == 'Up-Down':\n labels = [['Leaf pair: {0} <br> Avg Error: {1:3.3f} mm <br> Stdev: {2:3.3f} mm'.format(leaf_num, err, std)]\n for leaf_num, err, std in zip(leaf_nums, vals, err)]\n voffset = 0\n hoffset = 20\n else:\n labels = [['Leaf pair: {0}, Avg Error: {1:3.3f} mm, Stdev: {2:3.3f} mm'.format(leaf_num, err, std)]\n for leaf_num, err, std in zip(leaf_nums, vals, err)]\n\n if pf.settings.orientation == 'Up-Down':\n for num, patch in enumerate(axtop.axes.patches):\n ttip = mpld3.plugins.PointHTMLTooltip(patch, labels[num], voffset=voffset, hoffset=hoffset)\n mpld3.plugins.connect(fig_pf, ttip)\n mpld3.plugins.connect(fig_pf, mpld3.plugins.MousePosition(fontsize=14))\n else:\n for num, patch in enumerate(axtop.axes.patches):\n ttip = mpld3.plugins.PointLabelTooltip(patch, labels[num], location='top left')\n mpld3.plugins.connect(fig_pf, ttip)\n mpld3.plugins.connect(fig_pf, mpld3.plugins.MousePosition(fontsize=14))\n\n for p_num, picket in enumerate(pf.pickets):\n picket.add_guards_to_axes(img_ax.axes)\n for idx, mlc_meas in enumerate(picket.mlc_meas):\n mlc_meas.plot2axes(img_ax.axes, width=1.5)\n\n # plot CAX\n img_ax.plot(pf.settings.image_center.x, pf.settings.image_center.y, 'r+', ms=12, markeredgewidth=3)\n\n # tighten up the plot view\n img_ax.set_xlim([0, pf.image.shape[1]])\n img_ax.set_ylim([pf.image.shape[0], 0])\n img_ax.axis('off')\n img_ax.set_xticks([])\n img_ax.set_yticks([])\n \n # Histogram of all errors and average profile plot\n upper_bound = pf.settings.tolerance\n upper_outliers = np.sum(error_array.flatten()>=upper_bound)\n fig_pf2 = Figure(figsize=(10, 4), tight_layout={\"w_pad\":2})\n ax2 = fig_pf2.add_subplot(1,2,1)\n ax3 = fig_pf2.add_subplot(1,2,2)\n n, bins = np.histogram(error_array.flatten(), density=False, bins=10, range=(0, upper_bound))\n ax2.bar(bins[0:-1], n, width=np.diff(bins)[0], facecolor='green', alpha=0.75)\n ax2.bar([upper_bound,upper_bound*1.1], upper_outliers, width=0.1*upper_bound, facecolor='red', alpha=0.75)\n ax2.plot([pf.settings.action_tolerance,pf.settings.action_tolerance], [0,max(n)/2] , color=\"orange\")\n ax2.annotate(\"Action Tol.\", (pf.settings.action_tolerance, 1.05*max(n)/2), color='black',\n fontsize=6, ha='center', va='bottom')\n ax2.plot([pf.settings.tolerance,pf.settings.tolerance], [0,max(n)/2] , color=\"darkred\")\n ax2.annotate(\"Tol.\", (pf.settings.tolerance, 1.05*max(n)/2), color='black',\n fontsize=6, ha='center', va='bottom')\n\n # Plot mean inplane profile and calculate FWHM:\n mlc_mean_profile = pf.pickets.image_mlc_inplane_mean_profile\n ax3.plot(mlc_mean_profile.values, \"b-\")\n picket_fwhm = []\n fwhm_mean = 0\n try:\n peaks = mlc_mean_profile.find_peaks(max_number=picket_nr, min_distance=0.02, threshold=0.5)\n peaks = np.sort(peaks)\n ax3.plot(peaks, mlc_mean_profile[peaks], \"ro\")\n\n separation = int(np.mean(np.diff(peaks))/3)\n mmpd = 1/pf.image.dpmm\n # Get valleys\n valleys = []\n for p in np.arange(0, len(peaks)-1, 1):\n prof_partial = mlc_mean_profile[peaks[p]: peaks[p+1]]\n valleys.append(peaks[p]+np.argmin(prof_partial))\n edge_points = [peaks[0]-separation] + valleys + [peaks[-1]+separation]\n ax3.plot(edge_points, mlc_mean_profile[edge_points], \"yo\")\n\n for k in np.arange(0, len(edge_points)-1, 1):\n pr = PylinacSingleProfile(mlc_mean_profile[edge_points[k]:edge_points[k+1]])\n left = pr[0]\n right = pr[-1]\n amplitude = mlc_mean_profile[peaks[k]]\n if left < right:\n x = 100*((amplitude-left)*0.5 +left-right)/(amplitude-right)\n a = pr._penumbra_point(x=50, side=\"left\", interpolate=True)\n b = pr._penumbra_point(x=x, side=\"right\", interpolate=True)\n else:\n x = 100*((amplitude-right)*0.5 +right-left)/(amplitude-left)\n a = pr._penumbra_point(x=x, side=\"left\", interpolate=True)\n b = pr._penumbra_point(x=50, side=\"right\", interpolate=True)\n left_point = edge_points[k]+a\n right_point = edge_points[k]+b\n ax3.plot([left_point, right_point], [np.interp(left_point, np.arange(0, len(mlc_mean_profile.values), 1), mlc_mean_profile.values),\n np.interp(right_point, np.arange(0, len(mlc_mean_profile.values), 1), mlc_mean_profile.values)], \"-k\", alpha=0.5)\n picket_fwhm.append(np.abs(a-b)*mmpd)\n \n fwhm_mean = np.mean(picket_fwhm)\n except:\n picket_fwhm = [np.nan]*picket_nr\n fwhm_mean = np.nan\n if len(picket_fwhm) != picket_nr:\n fwhm_mean = np.mean(picket_fwhm)\n picket_fwhm = [np.nan]*picket_nr\n\n ax2.set_xlim([-0.025, pf.settings.tolerance*1.15])\n ax3.set_xlim([0, pf.image.shape[1]])\n ax2.set_title(\"Leaf error\")\n ax3.set_title(\"MLC mean profile\")\n ax2.set_xlabel(\"Error [mm]\")\n ax2.set_ylabel(\"Counts\")\n ax3.set_xlabel(\"Pixel\")\n ax3.set_ylabel(\"Grey value\")\n\n passed = \"Passed\" if pf.passed else \"Failed\"\n\n script = mpld3.fig_to_html(fig_pf, d3_url=D3_URL, mpld3_url=MPLD3_URL)\n script2 = mpld3.fig_to_html(fig_pf2, d3_url=D3_URL, mpld3_url=MPLD3_URL)\n variables = {\n \"script\": script,\n \"script2\": script2,\n \"passed\": passed,\n \"max_error\": max_error,\n \"max_error_leaf\": max_error_leaf,\n \"passed_tol\": passed_tol,\n \"picket_nr\": picket_nr,\n \"tolerance\": pf.settings.tolerance,\n \"perc_passing\": pf.percent_passing,\n \"max_error_all\": pf.max_error,\n \"max_error_picket_all\": pf.max_error_picket,\n \"max_error_leaf_all\": pf.max_error_leaf,\n \"median_error\": pf.abs_median_error,\n \"spacing\": pf.pickets.mean_spacing,\n \"picket_offsets\": picket_offsets,\n \"fwhm_mean\": fwhm_mean,\n \"picket_fwhm\": picket_fwhm,\n \"pdf_report_enable\": generate_pdf_report,\n \"save_results\": save_results,\n \"acquisition_datetime\": acquisition_datetime\n }\n\n # Generate pylinac report:\n if generate_pdf_report == \"True\":\n pdf_file = tempfile.NamedTemporaryFile(delete=False, prefix=\"PicketFence_\", suffix=\".pdf\", dir=config.PDF_REPORT_FOLDER)\n metadata = RestToolbox.GetInstances(config.ORTHANC_URL, [w])\n try:\n patient = metadata[0][\"PatientName\"]\n except:\n patient = \"\"\n try:\n stationname = metadata[0][\"StationName\"]\n except:\n stationname = \"\"\n try:\n date_time = RestToolbox.get_datetime(metadata[0])\n date_var = datetime.datetime.strptime(date_time[0], \"%Y%m%d\").strftime(\"%d/%m/%Y\")\n except:\n date_var = \"\"\n pf.publish_pdf(pdf_file, notes=[\"Date = \"+date_var, \"Patient = \"+patient, \"Station = \"+stationname])\n\n variables[\"pdf_report_filename\"] = os.path.basename(pdf_file.name)\n #gc.collect()\n\n general_functions.delete_files_in_subfolders([temp_folder]) # Delete image\n return template(\"picket_fence_results\", variables)\n\n@pf_app.route('/picket_fence_calculate/<w>', method=\"POST\")\ndef picket_fence_calculate(w):\n # w is the image, m is the mlc type\n \n temp_folder, file_path = RestToolbox.GetSingleDcm(config.ORTHANC_URL, w)\n clip_box = float(request.forms.hidden_clipbox)*10.0\n py_filter = int(request.forms.hidden_filter)\n py_filter = None if py_filter==0 else py_filter\n num_pickets = int(request.forms.hidden_peaks)\n num_pickets = None if num_pickets==0 else num_pickets\n sag = float(request.forms.hidden_sag)\n mlc = request.forms.hidden_mlc\n invert = True if request.forms.hidden_invert==\"true\" else False\n orientation = request.forms.hidden_orientation\n orientation = None if orientation==\"Automatic\" else orientation\n imgdescription = request.forms.hidden_imgdescription\n station = request.forms.hidden_station\n displayname = request.forms.hidden_displayname\n acquisition_datetime = request.forms.hidden_datetime\n\n args = {\"temp_folder\": temp_folder, \"file_path\": file_path, \"clip_box\": clip_box, \"py_filter\":py_filter,\n \"num_pickets\":num_pickets, \"sag\": sag, \"mlc\":mlc, \"invert\":invert, \"orientation\":orientation,\n \"w\":w, \"imgdescription\": imgdescription,\"station\": station, \"displayname\": displayname,\n \"acquisition_datetime\": acquisition_datetime, \"config\": general_functions.get_configuration()}\n p = Pool(1)\n data = p.map(picket_fence_helperf_catch_error, [args])\n p.close()\n p.join()\n return data\n"
] | [
[
"numpy.sort",
"numpy.diff",
"numpy.argmin",
"matplotlib.figure.Figure",
"numpy.abs",
"numpy.argmax",
"matplotlib.style.use",
"matplotlib.use",
"numpy.concatenate",
"numpy.array",
"numpy.mean"
]
] |
lgeiger/tensorboard | [
"6b012202689ae3c55e27c3690455e47f8d18c54d"
] | [
"tensorboard/loader.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorBoard data ingestion module.\n\nWARNING: This module is currently EXPERIMENTAL.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport functools\nimport inspect\nimport locale\nimport logging\nimport os\nimport re\nimport sys\nimport threading\nimport time\nimport types # pylint: disable=unused-import\n\nimport six\nimport tensorflow as tf\n\nfrom tensorboard import db\nfrom tensorboard import util\n\n\nclass Record(collections.namedtuple('Record', ('record', 'offset'))):\n \"\"\"Value class for a record returned by RecordReader.\n\n Fields:\n record: The byte string record that was read.\n offset: The byte offset in the file *after* this record was read.\n\n :type record: str\n :type offset: int\n \"\"\"\n __slots__ = () # Enforces use of only tuple fields.\n\n\[email protected]\[email protected]_2_unicode_compatible\nclass RecordReader(object):\n \"\"\"Pythonic veneer around PyRecordReader.\"\"\"\n\n def __init__(self, path, start_offset=0):\n \"\"\"Creates new instance.\n\n Args:\n path: Path of file. This can be on a remote file system if the\n TensorFlow build supports it.\n start_offset: Byte offset to seek in file once it's opened.\n\n :type path: str\n :type start_offset: int\n \"\"\"\n self.path = tf.compat.as_text(path)\n self._offset = start_offset\n self._size = -1\n self._reader = None # type: tf.pywrap_tensorflow.PyRecordReader\n self._is_closed = False\n self._lock = threading.Lock()\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n This is guaranteed to return a number greater than or equal to the\n offset of the last record returned by get_next_record().\n\n This method can be called after the instance has been closed.\n\n Raises:\n IOError: If file has shrunk from last read offset, or start\n offset, or last read size.\n\n :rtype: int\n \"\"\"\n size = tf.gfile.Stat(self.path).length\n minimum = max(self._offset, self._size)\n if size < minimum:\n raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path))\n self._size = size\n return size\n\n def get_next_record(self):\n \"\"\"Reads record from file.\n\n Returns:\n A Record or None if no more were available.\n\n Raises:\n IOError: On open or read error, or if close was called.\n tf.errors.DataLossError: If corruption was encountered in the\n records file.\n\n :rtype: Record\n \"\"\"\n if self._is_closed:\n raise IOError('%s is closed' % self)\n if self._reader is None:\n self._reader = self._open()\n try:\n if not inspect.getargspec(self._reader.GetNext).args[1:]: # pylint: disable=deprecated-method\n self._reader.GetNext()\n else:\n # GetNext() expects a status argument on TF <= 1.7\n with tf.errors.raise_exception_on_not_ok_status() as status:\n self._reader.GetNext(status)\n except tf.errors.OutOfRangeError:\n # We ignore partial read exceptions, because a record may be truncated.\n # PyRecordReader holds the offset prior to the failed read, so retrying\n # will succeed.\n return None\n self._offset = self._reader.offset()\n return Record(self._reader.record(), self._offset)\n\n def close(self):\n \"\"\"Closes record reader if open.\n\n Further reads are not permitted after this method is called.\n \"\"\"\n if self._is_closed:\n return\n if self._reader is not None:\n self._reader.Close()\n self._is_closed = True\n self._reader = None\n\n def _open(self):\n with tf.errors.raise_exception_on_not_ok_status() as status:\n return tf.pywrap_tensorflow.PyRecordReader_New(\n tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)),\n self._offset, tf.compat.as_bytes(''), status)\n\n def __str__(self):\n return u'RecordReader{%s}' % self.path\n\n\[email protected]\[email protected]_2_unicode_compatible\nclass BufferedRecordReader(object):\n \"\"\"Wrapper around RecordReader that does threaded read-ahead.\n\n This class implements the same interface as RecordReader. It prevents\n remote file systems from devastating loader performance. It does not\n degrade throughput on local file systems.\n\n The thread is spawned when the first read operation happens. The\n thread will diligently try to buffer records in the background. Its\n goal is to sleep as much as possible without blocking read operations.\n\n This class is thread safe. It can be used from multiple threads\n without any need for external synchronization.\n \"\"\"\n\n READ_AHEAD_AGGRESSION = 2.3 # Does full replenish when ~40% full.\n READ_AHEAD_BYTES = 16 * 1024 * 1024\n STAT_INTERVAL_SECONDS = 4.0\n\n def __init__(self, path,\n start_offset=0,\n read_ahead=READ_AHEAD_BYTES,\n stat_interval=STAT_INTERVAL_SECONDS,\n clock=time.time,\n record_reader_factory=RecordReader):\n \"\"\"Creates new instance.\n\n The i/o thread is not started until the first read happens.\n\n Args:\n path: Path of file. This can be on a remote file system if the\n TensorFlow build supports it.\n start_offset: Byte offset to seek in file once it's opened.\n read_ahead: The number of record bytes to buffer into memory\n before the thread starts blocking. This value must be >0 and\n the default is BufferedRecordReader.READ_AHEAD_BYTES.\n stat_interval: A float with the minimum number of seconds between\n stat calls, to determine the file size. If this is 0.0 then\n the thread will stat after every re-buffer, but never be\n woken up in order to stat.\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n record_reader_factory: The RecordReader constructor, which can be\n changed for testing.\n\n :type path: str\n :type start_offset: int\n :type read_ahead: int\n :type clock: () -> float\n :type record_reader_factory: (str, int) -> RecordReader\n \"\"\"\n self.path = tf.compat.as_text(path)\n self._read_ahead = read_ahead\n self._stat_interval = stat_interval\n self._clock = clock\n self._is_closed = False\n self._has_reached_end = False\n self._offset = 0\n self._size = -1\n self._last_stat = 0.0\n self._buffered = 0\n self._reader = record_reader_factory(self.path, start_offset)\n self._records = collections.deque() # type: collections.deque[Record]\n self._read_exception = \\\n None # type: tuple[BaseException, BaseException, types.TracebackType]\n self._close_exception = \\\n None # type: tuple[BaseException, BaseException, types.TracebackType]\n self._lock = threading.Lock()\n self._wake_up_producer = threading.Condition(self._lock)\n self._wake_up_consumers = threading.Condition(self._lock)\n self._thread = threading.Thread(target=self._run,\n name=_shorten_event_log_path(self.path))\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n This is guaranteed to return a number greater than or equal to the\n offset of the last record returned by get_next_record().\n\n In the average case, this method will not block. However, if the\n i/o thread has not yet computed this value, then this method will\n block on a stat call.\n\n This method can be called after the instance has been closed.\n\n Returns:\n The byte length of file, which might increase over time, but is\n guaranteed to never decrease. It's also guaranteed that it will\n be greater than or equal to the offset field of any Record.\n\n :rtype: int\n \"\"\"\n with self._lock:\n if self._should_stat():\n self._stat()\n return self._size\n\n def get_next_record(self):\n \"\"\"Reads one record.\n\n When this method is first called, it will spawn the thread and\n block until a record is read. Once the thread starts, it will queue\n up records which can be read without blocking. The exception is\n when we reach the end of the file, in which case each repeated call\n will be synchronous. There is no background polling. If new data is\n appended to the file, new records won't be buffered until this\n method is invoked again. The caller should take care to meter calls\n to this method once it reaches the end of file, lest they impact\n performance.\n\n Returns:\n A Record object, or None if there are no more records available\n at the moment.\n\n Raises:\n IOError: If this instance has been closed.\n tf.errors.DataLossError: If corruption was encountered in the\n records file.\n Exception: To propagate any exceptions that may have been thrown\n by the read operation in the other thread. If an exception is\n thrown, then all subsequent calls to this method will rethrow\n that same exception.\n\n :rtype: Record\n \"\"\"\n with self._lock:\n if self._is_closed:\n raise IOError('%s is closed' % self)\n if not self._thread.is_alive():\n self._thread.start()\n else:\n record = self._get_record()\n if record is not None:\n if self._should_wakeup():\n self._wake_up_producer.notify()\n return record\n self._has_reached_end = False\n self._wake_up_producer.notify()\n while not (self._read_exception or\n self._has_reached_end or\n self._records):\n self._wake_up_consumers.wait()\n return self._get_record()\n\n def close(self):\n \"\"\"Closes event log reader if open.\n\n If the i/o thread is running, this method blocks until it has been\n shut down.\n\n Further reads are not permitted after this method is called.\n\n Raises:\n Exception: To propagate any exceptions that may have been thrown\n by the close operation in the other thread. If an exception\n is thrown, then all subsequent calls to this method will\n rethrow that same exception.\n \"\"\"\n with self._lock:\n if not self._is_closed:\n self._is_closed = True\n if not self._thread.is_alive():\n self._reader = None\n return\n self._wake_up_producer.notify()\n while self._reader is not None:\n self._wake_up_consumers.wait()\n if self._close_exception is not None:\n six.reraise(*self._close_exception)\n\n def _get_record(self):\n if self._read_exception is not None:\n six.reraise(*self._read_exception)\n if not self._records:\n return None\n record = self._records.popleft()\n self._buffered -= len(record.record)\n return record\n\n @util.guarded_by('_lock')\n def _should_wakeup(self):\n return (self._is_closed or\n self._read_exception is None and\n (self._should_rebuffer() or\n (self._stat_interval and self._should_stat())))\n\n @util.guarded_by('_lock')\n def _should_rebuffer(self):\n return (not self._has_reached_end and\n (float(self._buffered) <\n self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION))\n\n @util.guarded_by('_lock')\n def _should_stat(self):\n return (self._read_exception is None and\n (self._offset > self._size or\n self._last_stat <= self._clock() - self._stat_interval))\n\n @util.guarded_by('_lock')\n def _stat(self):\n try:\n now = self._clock()\n self._size = self._reader.get_size()\n self._last_stat = now\n except Exception as e: # pylint: disable=broad-except\n tf.logging.debug('Stat failed: %s', e)\n self._read_exception = sys.exc_info()\n\n def _run(self):\n while True:\n with self._lock:\n while not self._should_wakeup():\n self._wake_up_producer.wait()\n if self._is_closed:\n try:\n self._reader.close()\n tf.logging.debug('Closed')\n except Exception as e: # pylint: disable=broad-except\n self._close_exception = sys.exc_info()\n tf.logging.debug('Close failed: %s', e)\n self._reader = None\n self._wake_up_consumers.notify_all()\n return\n if self._buffered >= self._read_ahead:\n tf.logging.debug('Waking up to stat')\n self._stat()\n continue\n # Calculate a good amount of data to read outside the lock.\n # The less we have buffered, the less re-buffering we'll do.\n # We want to minimize wait time in the other thread. See the\n # following contour plot: https://goo.gl/HTBcCU\n x = float(self._buffered)\n y = BufferedRecordReader.READ_AHEAD_AGGRESSION\n c = float(self._read_ahead)\n want = int(min(c - x, y/c * x**y + 1))\n # Perform re-buffering outside lock.\n self._rebuffer(want)\n\n def _rebuffer(self, want):\n tf.logging.debug('Waking up to read %s bytes', _localize_int(want))\n records = []\n read_exception = self._read_exception\n if read_exception is None:\n try:\n while want > 0:\n record = self._reader.get_next_record()\n if record is None:\n break\n self._offset = record.offset\n records.append(record)\n want -= len(record.record)\n except Exception as e: # pylint: disable=broad-except\n tf.logging.debug('Read failed: %s', e)\n read_exception = sys.exc_info()\n with self._lock:\n self._read_exception = read_exception\n if self._should_stat():\n self._stat()\n if not self._read_exception:\n if not records:\n self._has_reached_end = True\n else:\n for record in records:\n self._records.append(record)\n self._buffered += len(record.record)\n self._wake_up_consumers.notify_all()\n\n def __str__(self):\n return u'BufferedRecordReader{%s}' % self.path\n\n\nclass RateCounter(object):\n \"\"\"Utility class for tracking how much a number increases each second.\n\n The rate is calculated by averaging of samples within a time window,\n which weights recent samples more strongly.\n \"\"\"\n\n def __init__(self, window, clock=time.time):\n \"\"\"Creates new instance.\n\n Args:\n window: The maximum number of seconds across which rate is\n averaged. In practice, the rate might be averaged over a time\n period greater than window if set_value is being called less\n frequently than window.\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n\n :type window: float\n :type clock: () -> float\n \"\"\"\n self._window = window\n self._clock = clock\n self._points = collections.deque()\n self._last_value = None # type: float\n self._last_time = None # type: float\n\n def get_rate(self):\n \"\"\"Determines rate of increase in value per second averaged over window.\n\n Returns:\n An integer representing the rate or None if not enough\n information has been collected yet.\n\n :rtype: int\n \"\"\"\n points = []\n total_elapsed = 0.0\n total_weight = 0.0\n for rate, elapsed, _ in self._points:\n weight = 1.0 / (total_elapsed + 1) * elapsed\n total_elapsed += elapsed\n total_weight += weight\n points.append((rate, weight))\n if not total_weight:\n return 0\n return int(sum(w / total_weight * r for r, w in points))\n\n def set_value(self, value):\n \"\"\"Sets number state.\n\n This method adds a delta between value and the value of the last\n time this method was called. Therefore the first invocation does\n not add a delta.\n\n Raises:\n ValueError: If value is less than the last value.\n\n :type value: float\n \"\"\"\n value = float(value)\n now = self._clock()\n if self._last_value is None:\n self._last_value = value\n self._last_time = now\n return\n if value < self._last_value:\n raise ValueError('%f < %f' % (value, self._last_value))\n delta = value - self._last_value\n elapsed = now - self._last_time\n if not elapsed:\n return\n self._points.appendleft((delta / elapsed, elapsed, now))\n self._last_time = now\n self._last_value = value\n self._remove_old_points()\n\n def bump(self):\n \"\"\"Makes time since last set_value count for nothing.\"\"\"\n self._last_time = self._clock()\n\n def _remove_old_points(self):\n threshold = self._clock() - self._window\n while self._points:\n r, e, t = self._points.pop()\n if t > threshold:\n self._points.append((r, e, t))\n break\n\n\[email protected]\nclass Progress(object):\n \"\"\"Terminal UI for displaying job progress in terms of bytes.\n\n On teletypes, this class will display a nice ephemeral unicode\n progress bar. Otherwise it just emits periodic log messages.\n\n This class keeps track of the rate at which input is processed, as\n well as the rate it grows. These values are represented to the user\n using the DELTA and NABLA symbols.\n\n An alarm is displayed if the consumption rate falls behind the\n production rate. In order for this to be calculated properly, the\n sleep method of this class should be used rather than time.sleep.\n \"\"\"\n\n BAR_INTERVAL_SECONDS = 0.25\n BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)\n BAR_WIDTH = 45\n BLOCK_DARK = u'\\u2593'\n BLOCK_LIGHT = u'\\u2591'\n DELTA = u'\\u2206'\n LOG_INTERVAL_SECONDS = 5.0\n NABLA = u'\\u2207'\n RATE_WINDOW = 20.0\n\n def __init__(self, clock=time.time,\n sleep=time.sleep,\n log_callback=tf.logging.info,\n bar_callback=BAR_LOGGER.info,\n rate_counter_factory=RateCounter):\n \"\"\"Creates new instance.\n\n Args:\n clock: Function returning a float with the number of seconds\n since the UNIX epoch in zulu time.\n sleep: Injected time.sleep function.\n log_callback: Callback for emitting normal log records.\n bar_callback: Callback for emitting ephemeral bar records.\n rate_counter_factory: Constructor to RateCounter, which can be\n swapped out for testing.\n\n :type clock: () -> float\n :type sleep: (float) -> None\n :type rate_counter_factory: (float) -> RateCounter\n \"\"\"\n self._clock = clock\n self._sleep = sleep\n self._log_callback = log_callback\n self._bar_callback = bar_callback\n self._initialized = False\n self._offset = 0\n self._size = 0\n self._last_log_time = 0.0\n self._last_bar_time = 0.0\n self._last_log_offset = -1\n self._last_bar_offset = -1\n self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW)\n self._rate_size = rate_counter_factory(Progress.RATE_WINDOW)\n\n def set_progress(self, offset, size):\n \"\"\"Updates the progress bar state.\n\n This method will cause progress information to be occasionally\n written out.\n\n Args:\n offset: The number of bytes processed so far.\n size: The total number of bytes. This is allowed to increase or\n decrease, but it must remain at least offset.\n\n Raises:\n ValueError: If offset is greater than size, or offset or size\n decreased from the last invocation.\n\n :type offset: int\n :type size: int\n \"\"\"\n if offset > size:\n raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size))\n self._rate_offset.set_value(offset)\n self._rate_size.set_value(size)\n self._offset = offset\n self._size = size\n now = self._clock()\n if not self._initialized:\n self._last_log_time = now\n self._last_bar_time = now\n self._initialized = True\n return\n elapsed = now - self._last_log_time\n if elapsed >= Progress.LOG_INTERVAL_SECONDS:\n self._last_log_time = now\n self._show_log()\n elapsed = now - self._last_bar_time\n if elapsed >= Progress.BAR_INTERVAL_SECONDS:\n self._last_bar_time = now\n self._show_bar()\n\n def close(self):\n \"\"\"Forces progress to be written to log.\n\n This method exists because we don't want the progress bar to say\n something like 98% once the file is done loading.\n \"\"\"\n self._show_log(can_stall=False)\n self._show_bar(can_stall=False)\n # Instructs util.LogHandler to clear the ephemeral logging state.\n self._bar_callback('')\n\n def sleep(self, seconds):\n \"\"\"Sleeps for a given number of seconds.\n\n Time spent sleeping in this method does not have a detrimental\n impact on the consumption rate.\n\n :type seconds: float\n \"\"\"\n self._sleep(seconds)\n self._rate_offset.bump()\n\n def _show_log(self, can_stall=True):\n is_stalled = can_stall and self._offset == self._last_log_offset\n self._last_log_offset = self._offset\n self._log_callback('Loaded %s', self._get_message(is_stalled))\n\n def _show_bar(self, can_stall=True):\n is_stalled = can_stall and self._offset == self._last_bar_offset\n self._last_bar_offset = self._offset\n sofar = int(self._get_fraction() * Progress.BAR_WIDTH)\n bar = (Progress.BLOCK_DARK * sofar +\n Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar))\n self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled))\n\n def _get_message(self, is_stalled):\n rate_offset = self._rate_offset.get_rate() # summary processing speed\n rate_size = self._rate_size.get_rate() # summary production speed\n message = u'%d%% of %s%s%s' % (\n int(self._get_fraction() * 100.0),\n _localize_int(self._size),\n self._get_rate_suffix(Progress.DELTA, rate_offset),\n self._get_rate_suffix(Progress.NABLA, rate_size))\n if rate_offset and rate_size and rate_offset < rate_size:\n # If TensorFlow is writing summaries to disk faster than we can\n # insert them into the database, that's kind of problematic.\n message += u' ' + self._make_red(u'[meltdown]')\n elif is_stalled:\n message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET)\n return message\n\n def _get_fraction(self):\n if not self._size:\n return 0.0\n else:\n return float(self._offset) / self._size\n\n def _get_rate_suffix(self, symbol, rate):\n if not rate:\n return u''\n return u' %s %sB/s' % (symbol, _localize_int(rate))\n\n def _make_red(self, text):\n return (util.Ansi.BOLD +\n util.Ansi.RED +\n (util.Ansi.FLIP if self._offset % 2 == 0 else u'') +\n text +\n util.Ansi.RESET)\n\n\[email protected]\[email protected]_ordering\[email protected]_2_unicode_compatible\nclass EventLogReader(object):\n \"\"\"Helper class for reading from event log files.\n\n This class is a wrapper around BufferedRecordReader that operates on\n record files containing tf.Event protocol buffers.\n\n Fields:\n rowid: An integer primary key in EventLogs table, or 0 if unknown.\n path: A string with the path of the event log on the local or\n remote file system.\n timestamp: An integer of the number of seconds since the UNIX epoch\n in UTC according to hostname at the time when the event log\n file was created.\n hostname: A string with the FQDN of the machine that wrote this\n event log file.\n \"\"\"\n\n def __init__(self, path,\n start_offset=0,\n record_reader_factory=BufferedRecordReader):\n \"\"\"Creates new instance.\n\n Args:\n path: Path of event log file.\n start_offset: Byte offset to seek in file once it's opened.\n record_reader_factory: A reference to the constructor of a class\n that implements the same interface as RecordReader.\n\n :type path: str\n :type record_reader_factory: (str, int) -> RecordReader\n \"\"\"\n self.rowid = 0\n self.path = tf.compat.as_text(path)\n m = _EVENT_LOG_PATH_PATTERN.search(self.path)\n if not m:\n raise ValueError('Bad event log path: ' + self.path)\n self.timestamp = int(m.group('timestamp'))\n self.hostname = m.group('hostname')\n self._offset = start_offset\n self._reader_factory = record_reader_factory\n self._reader = self._reader_factory(self.path, start_offset)\n self._key = (os.path.dirname(self.path), self.timestamp, self.hostname)\n\n def get_next_event(self):\n \"\"\"Reads an event proto from the file.\n\n Returns:\n A tf.Event or None if no more records exist in the file. Please\n note that the file remains open for subsequent reads in case more\n are appended later.\n\n :rtype: tf.Event\n \"\"\"\n record = self._reader.get_next_record()\n if record is None:\n return None\n event = tf.Event()\n event.ParseFromString(record.record)\n self._offset = record.offset\n return event\n\n def set_offset(self, offset):\n \"\"\"Sets byte offset in file.\n\n :type offset: int\n \"\"\"\n if offset == self._offset:\n return\n self._reader.close()\n self._reader = self._reader_factory(self.path, offset)\n self._offset = offset\n\n def get_offset(self):\n \"\"\"Returns current byte offset in file.\n\n :rtype: int\n \"\"\"\n return self._offset\n\n def get_size(self):\n \"\"\"Returns byte length of file.\n\n :rtype: int\n \"\"\"\n return self._reader.get_size()\n\n def save_progress(self, db_conn):\n \"\"\"Saves current offset to DB.\n\n The rowid property must be set beforehand.\n\n :type db_conn: db.Connection\n \"\"\"\n with contextlib.closing(db_conn.cursor()) as c:\n c.execute(\n 'UPDATE EventLogs SET offset = ? WHERE rowid = ? AND offset < ?',\n (self._offset, self.rowid, self._offset))\n\n def close(self):\n \"\"\"Closes event log reader if open.\n\n Further i/o is not permitted after this method is called.\n \"\"\"\n if self._reader is not None:\n self._reader.close()\n self._reader = None\n\n def __hash__(self):\n return hash(self._key)\n\n def __eq__(self, other):\n return self._key == other._key\n\n def __lt__(self, other):\n return self._key < other._key\n\n def __str__(self):\n offset = self.get_offset()\n if offset:\n return u'EventLogReader{path=%s, offset=%d}' % (self.path, offset)\n else:\n return u'EventLogReader{%s}' % self.path\n\n\[email protected]\[email protected]_ordering\[email protected]_2_unicode_compatible\nclass RunReader(object):\n \"\"\"Utility for loading event logs into the DB.\n\n This class merges the chain of event log files into one meaningful\n stream of events, ordered by step or timestamp.\n\n Fields:\n rowid: The primary key of the corresponding row in Runs.\n name: Display name of this run.\n \"\"\"\n\n def __init__(self, rowid, name):\n \"\"\"Creates new instance.\n\n Args:\n rowid: Primary key of run in `Runs` table, which should already\n be inserted. This is a bit-packed int made by db.RUN_ROWID.\n name: Display name of run.\n\n :type rowid: int\n :type name: str\n \"\"\"\n self.rowid = db.RUN_ROWID.check(rowid)\n self.run_id = db.RUN_ROWID.parse(rowid)[1]\n self.name = tf.compat.as_text(name)\n self._mark = -1\n self._logs = [] # type: list[EventLogReader]\n self._index = 0\n self._entombed_progress = 0\n self._saved_events = \\\n collections.deque() # type: collections.deque[tf.Event]\n self._prepended_events = \\\n collections.deque() # type: collections.deque[tf.Event]\n\n def add_event_log(self, db_conn, log):\n \"\"\"Adds event log to run loader.\n\n Event logs must be added monotonically, based on the timestamp in\n the filename. Please note that calling this method could cause a\n current batch of reads to fast forward.\n\n Args:\n db_conn: A PEP 249 Connection object.\n log: An EventLogReader instance.\n\n Returns:\n True if log was actually added.\n\n :type db_conn: db.Connection\n :type log: EventLogReader\n :rtype: bool\n \"\"\"\n if self._logs and log <= self._logs[-1]:\n return False\n with contextlib.closing(db_conn.cursor()) as c:\n c.execute(\n 'SELECT rowid, offset FROM EventLogs WHERE run_id = ? AND path = ?',\n (self.run_id, log.path))\n row = c.fetchone()\n if row:\n log.rowid = row[0]\n log.set_offset(row[1])\n else:\n event_log_id = db.EVENT_LOG_ID.generate()\n log.rowid = db.EVENT_LOG_ROWID.create(self.run_id, event_log_id)\n c.execute(\n ('INSERT INTO EventLogs (rowid, run_id, path, offset)'\n ' VALUES (?, ?, ?, 0)'),\n (log.rowid, self.run_id, log.path))\n tf.logging.debug('Adding %s', log)\n self._logs.append(log)\n # Skip over event logs we've already read.\n if log.get_offset() > 0 and not self._prepended_events:\n self._index = len(self._logs) - 1\n self._cleanup()\n return True\n\n def get_next_event(self):\n \"\"\"Returns next tf.Event from event logs or None if stalled.\n\n :rtype: tf.Event\n \"\"\"\n event = None\n if self._prepended_events:\n event = self._prepended_events.popleft()\n elif self._index < len(self._logs):\n while True:\n log = self._logs[self._index]\n event = log.get_next_event()\n if event is not None:\n break\n if self._index == len(self._logs) - 1:\n break\n self._index += 1\n self._cleanup()\n if event is not None and self._mark != -1:\n self._saved_events.append(event)\n return event\n\n def mark_peek_reset(self):\n \"\"\"Returns next event without advancing.\n\n Note: This method sets the mark to the current position.\n\n :rtype: tf.Event\n \"\"\"\n self.mark()\n result = self.get_next_event()\n self.reset()\n return result\n\n def get_offset(self):\n \"\"\"Returns number of bytes read across all event log files.\n\n :rtype: int\n \"\"\"\n if self._mark != -1:\n return self._mark\n return self._get_offset()\n\n def _get_offset(self):\n return sum(el.get_offset() for el in self._logs) + self._entombed_progress\n\n def get_size(self):\n \"\"\"Returns sum of byte lengths of event log files.\n\n :rtype: int\n \"\"\"\n return sum(el.get_size() for el in self._logs) + self._entombed_progress\n\n def save_progress(self, db_conn):\n \"\"\"Saves current offsets of all open event logs to DB.\n\n This should be called after the mark has been advanced.\n\n :type db_conn: db.Connection\n \"\"\"\n n = 0\n while self._index >= n < len(self._logs):\n self._logs[n].save_progress(db_conn)\n n += 1\n\n def mark(self):\n \"\"\"Marks current position in file so reset() can be called.\"\"\"\n if self._prepended_events:\n raise ValueError('mark() offsets must be monotonic')\n self._mark = self._get_offset()\n self._saved_events.clear()\n\n def reset(self):\n \"\"\"Resets read state to where mark() was called.\"\"\"\n if self._mark == -1:\n return\n self._prepended_events.extend(self._saved_events)\n self._saved_events.clear()\n\n def close(self):\n \"\"\"Closes all event log readers.\n\n This method may be called multiple times, but further operations\n are not permitted.\n\n Raises:\n Exception: To propagate the most recent exception thrown by the\n EventLogReader close method. Suppressed exceptions are\n logged.\n \"\"\"\n util.close_all(self._logs)\n self._index = len(self._logs)\n self._mark = -1\n self._prepended_events.clear()\n self._saved_events.clear()\n\n def _cleanup(self):\n # Last event log has to be preserved so we can continue enforcing\n # monotonicity. We entomb offset because that also has to be\n # monotonic, but the size does not.\n if 0 < self._index < len(self._logs):\n deleted = self._logs[:self._index]\n self._logs = self._logs[self._index:]\n self._index = 0\n self._entombed_progress += sum(l.get_offset() for l in deleted)\n util.close_all(deleted)\n\n def _skip_to_event_log(self, i):\n should_mark = self._mark != -1 and i > self._index\n self._index = i\n if should_mark:\n self._prepended_events.clear()\n self.mark()\n\n def __hash__(self):\n return hash(self.rowid)\n\n def __eq__(self, other):\n return self.rowid == other.rowid\n\n def __lt__(self, other):\n return self.rowid < other.rowid\n\n def __str__(self):\n offset = self.get_offset()\n if offset:\n return u'RunReader{name=%s, offset=%d}' % (self.name, offset)\n else:\n return u'RunReader{%s}' % self.name\n\n\ndef _get_basename(path):\n \"\"\"Gets base name of path.\n\n This is the same as os.path.basename, however it may potentially do\n i/o to handle a few edge cases, which would otherwise cause the\n result to be less meaningful, e.g. \".\" and \"..\".\n\n :type path: str\n :rtype: str\n \"\"\"\n return os.path.basename(os.path.normpath(os.path.join(_get_cwd(), path)))\n\n\ndef _get_cwd():\n \"\"\"Returns current directory and try not to expand symlinks.\n\n :rtype: str\n \"\"\"\n result = os.environ.get('PWD')\n if not result:\n result = os.getcwd()\n return result\n\n\ndef get_event_logs(directory):\n \"\"\"Walks directory tree for EventLogReader files.\n\n Args:\n directory: Path of directory.\n\n Returns:\n List of EventLogReader objects, ordered by directory name and\n timestamp.\n\n :type directory: str\n :rtype: list[EventLogReader]\n \"\"\"\n logs = []\n for dirname, _, filenames in tf.gfile.Walk(directory):\n for filename in filenames:\n if is_event_log_file(filename):\n logs.append(EventLogReader(os.path.join(dirname, filename)))\n logs.sort()\n return logs\n\n\n_EVENT_LOG_PATH_PATTERN = re.compile(\n r'\\.tfevents\\.(?P<timestamp>\\d+).(?P<hostname>[-.0-9A-Za-z]+)$')\n\n\ndef is_event_log_file(path):\n \"\"\"Returns True if path appears to be an event log file.\n\n :type path: str\n :rtype: bool\n \"\"\"\n return bool(_EVENT_LOG_PATH_PATTERN.search(path))\n\n\n_SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\\\]+[/\\\\])?(?:[^/\\\\]+)$')\n\n\ndef _shorten_event_log_path(path):\n \"\"\"Makes an event log path more human readable.\n\n Returns:\n Path containing only basename and the first parent directory name,\n if there is one.\n\n :type path: str\n :rtype: str\n \"\"\"\n m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path)\n return m.group(0) if m else None\n\n\ndef _localize_int(n):\n \"\"\"Adds locale specific thousands group separators.\n\n :type n: int\n :rtype: str\n \"\"\"\n return locale.format('%d', n, grouping=True)\n"
] | [
[
"tensorflow.errors.raise_exception_on_not_ok_status",
"tensorflow.gfile.Stat",
"tensorflow.compat.as_bytes",
"tensorflow.compat.as_text",
"tensorflow.logging.debug",
"tensorflow.Event",
"tensorflow.gfile.Walk"
]
] |
jw9730/clova-speech-hackathon | [
"72cc2e31b0ec18a6486ddc746835a472bf6577fe"
] | [
"test.py"
] | [
"import wavio\nimport torch\nimport numpy as np\nfrom specaugment import spec_augment_pytorch, melscale_pytorch\nimport matplotlib.pyplot as plt\n\nPAD = 0\nN_FFT = 512\nSAMPLE_RATE = 16000\n\ndef trim(data, threshold_attack=0.01, threshold_release=0.05, attack_margin=5000, release_margin=5000):\n data_size = len(data)\n cut_head = 0\n cut_tail = data_size\n\n plt.subplot(5,1,1)\n plt.plot(data)\n\n # Square\n w = np.power(np.divide(data, np.max(data)), 2)\n\n plt.subplot(5,1,2)\n plt.plot(w)\n\n # Gaussian kernel\n sig = 20000\n time = np.linspace(-40000, 40000)\n kernel = np.exp(-np.square(time)/2/sig/sig)\n\n # Smooth and normalize\n w = np.convolve(w, kernel, mode='same')\n w = np.divide(w, np.max(w))\n\n plt.subplot(5,1,3)\n plt.plot(w)\n\n\n # Detect crop sites\n for sample in range(data_size):\n sample_num = sample\n sample_amp = w[sample_num]\n if sample_amp > threshold_attack:\n cut_head = np.max([sample_num - attack_margin, 0])\n break\n\n for sample in range(data_size):\n sample_num = data_size-sample-1\n sample_amp = w[sample_num]\n if sample_amp > threshold_release:\n cut_tail = np.min([sample_num + release_margin, data_size])\n break\n\n print(cut_head)\n print(cut_tail)\n plt.subplot(5,1,4)\n plt.plot(data[cut_head:cut_tail])\n\n data_copy = data[cut_head:cut_tail]\n del w, time, kernel, data\n\n plt.subplot(5,1,5)\n plt.plot(data_copy)\n #plt.show()\n\n return data_copy\n\n\ndef get_spectrogram_feature(filepath, train_mode=False):\n (rate, width, sig) = wavio.readwav(filepath)\n wavio.writewav24(\"test.wav\", rate=rate, data=sig)\n sig = sig.ravel()\n sig = trim(sig)\n\n stft = torch.stft(torch.FloatTensor(sig),\n N_FFT,\n hop_length=int(0.01*SAMPLE_RATE),\n win_length=int(0.030*SAMPLE_RATE),\n window=torch.hamming_window(int(0.030*SAMPLE_RATE)),\n center=False,\n normalized=False,\n onesided=True)\n\n stft = (stft[:,:,0].pow(2) + stft[:,:,1].pow(2)).pow(0.5)\n\n amag = stft.clone().detach()\n\n amag = amag.view(-1, amag.shape[0], amag.shape[1]) # reshape spectrogram shape to [batch_size, time, frequency]\n mel = melscale_pytorch.mel_scale(amag, sample_rate=SAMPLE_RATE, n_mels=N_FFT//2+1) # melspec with same shape\n\n plt.subplot(1,2,1)\n plt.imshow(mel.transpose(1,2).squeeze(), cmap='jet')\n\n p = 1 # always augment\n randp = np.random.uniform(0, 1)\n do_aug = p > randp\n if do_aug & train_mode: # apply augment\n print(\"augment image\")\n mel = spec_augment_pytorch.spec_augment(mel, time_warping_para=80, frequency_masking_para=54,\n time_masking_para=50, frequency_mask_num=1, time_mask_num=1)\n feat = mel.view(mel.shape[1], mel.shape[2]) # squeeze back to [frequency, time]\n feat = feat.transpose(0, 1).clone().detach()\n\n plt.subplot(1,2,2)\n plt.imshow(feat, cmap='jet')\n plt.show() # display it\n\n del stft, amag, mel\n return feat\n\n\nfilepath = \"./sample_dataset/train/train_data/wav_007.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_002.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_006.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_016.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)\n\nfilepath = \"./sample_dataset/train/train_data/wav_040.wav\"\nfeat = get_spectrogram_feature(filepath, train_mode=True)"
] | [
[
"numpy.random.uniform",
"torch.FloatTensor",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.plot",
"numpy.convolve",
"numpy.square",
"numpy.linspace"
]
] |
risteon/tf_quat2rot | [
"f308cab83e552300c274b733dd6cc5609269feb4"
] | [
"tf_quat2rot/check.py"
] | [
"# -*- coding: utf-8 -*-\n\n__author__ = \"\"\"Christoph Rist\"\"\"\n__email__ = \"[email protected]\"\n\nimport tensorflow as tf\n\n\ndef assert_normalized_quaternion(quaternion: tf.Tensor):\n with tf.control_dependencies(\n [\n tf.debugging.assert_near(\n tf.ones_like(quaternion[..., 0]),\n tf.linalg.norm(quaternion, axis=-1),\n message=\"Input quaternions are not normalized.\",\n )\n ]\n ):\n return tf.identity(quaternion)\n\n\ndef assert_valid_rotation(rotation_matrix: tf.Tensor):\n r = rotation_matrix\n with tf.control_dependencies(\n [\n tf.debugging.assert_near(\n tf.ones_like(rotation_matrix[..., 0, 0]),\n tf.linalg.det(rotation_matrix),\n message=\"Invalid rotation matrix.\",\n ),\n tf.debugging.assert_near(\n tf.linalg.matmul(r, r, transpose_a=True),\n tf.eye(3, batch_shape=tf.shape(r)[:-2], dtype=r.dtype),\n message=\"Invalid rotation matrix.\",\n ),\n ]\n ):\n return tf.identity(r)\n"
] | [
[
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.linalg.matmul",
"tensorflow.linalg.norm",
"tensorflow.identity",
"tensorflow.linalg.det"
]
] |
MichaelXcc/seldon-core | [
"e304ba28b9ef14bbda4f357bb145db2732a9e0a5"
] | [
"testing/scripts/test_benchmark.py"
] | [
"import json\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom google.protobuf import json_format\n\nfrom seldon_e2e_utils import post_comment_in_pr, run_benchmark_and_capture_results\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_service_orchestrator():\n\n sort_by = [\"apiType\", \"disableOrchestrator\"]\n\n data_size = 1_000\n data = [100.0] * data_size\n\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n df = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n disable_orchestrator_list=[\"false\", \"true\"],\n image_list=[\"seldonio/seldontest_predict:1.10.0-dev\"],\n benchmark_data=data_tensor,\n )\n df = df.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Service Orchestrator\\n\\n\"\n\n orch_mean = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"mean\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"mean\"].values\n )\n < 3\n )\n result_body += f\"* Orch added mean latency under 4ms: {orch_mean}\\n\"\n orch_nth = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"95th\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"95th\"].values\n )\n < 5\n )\n result_body += f\"* Orch added 95th latency under 5ms: {orch_nth}\\n\"\n orch_nth = all(\n (\n df[df[\"disableOrchestrator\"] == \"false\"][\"99th\"].values\n - df[df[\"disableOrchestrator\"] == \"true\"][\"99th\"].values\n )\n < 10\n )\n result_body += f\"* Orch added 99th latency under 10ms: {orch_nth}\\n\"\n\n # We have to set no errors to 1 as the tools for some reason have 1 as base\n no_err = all(df[\"errors\"] <= 1)\n result_body += f\"* No errors: {no_err}\\n\"\n\n result_body += \"\\n### Results table\\n\\n\"\n result_body += str(df.to_markdown())\n post_comment_in_pr(result_body)\n\n assert orch_mean\n assert orch_nth\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_workers_performance():\n\n sort_by = [\"apiType\", \"serverWorkers\"]\n\n data_size = 10\n data = [100.0] * data_size\n\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n df = run_benchmark_and_capture_results(\n api_type_list=[\"grpc\", \"rest\"],\n server_workers_list=[\"1\", \"5\", \"10\"],\n benchmark_concurrency_list=[\"10\", \"100\", \"1000\"],\n parallelism=\"1\",\n requests_cpu_list=[\"4000Mi\"],\n limits_cpu_list=[\"4000Mi\"],\n image_list=[\"seldonio/seldontest_predict:1.10.0-dev\"],\n benchmark_data=data_tensor,\n )\n df = df.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Workers Performance\\n\\n\"\n\n result_body += \"\\n### Results table\\n\\n\"\n result_body += str(df.to_markdown())\n post_comment_in_pr(result_body)\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_python_wrapper_v1_vs_v2_iris():\n\n sort_by = [\"concurrency\", \"apiType\"]\n benchmark_concurrency_list = [\"1\", \"50\", \"150\"]\n\n result_body = \"\"\n result_body += \"\\n# Benchmark Results - Python Wrapper V1 vs V2\\n\\n\"\n\n # Using single worker as fastapi also uses single worker\n df_pywrapper = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n protocol=\"seldon\",\n server_list=[\"SKLEARN_SERVER\"],\n benchmark_concurrency_list=benchmark_concurrency_list,\n model_uri_list=[\"gs://seldon-models/v1.12.0-dev/sklearn/iris\"],\n benchmark_data={\"data\": {\"ndarray\": [[1, 2, 3, 4]]}},\n )\n df_pywrapper = df_pywrapper.sort_values(sort_by)\n\n conc_idx = df_pywrapper[\"concurrency\"] == 1\n # Python V1 Wrapper Validations\n # Ensure base mean performance latency below 10 ms\n v1_latency_mean = all((df_pywrapper[conc_idx][\"mean\"] < 10))\n result_body += f\"* V1 base mean performance latency under 10ms: {v1_latency_mean}\\n\"\n # Ensure 99th percentiles are not spiking above 15ms\n v1_latency_nth = all(df_pywrapper[conc_idx][\"99th\"] < 10)\n result_body += f\"* V1 base 99th performance latenc under 10ms: {v1_latency_nth}\\n\"\n # Ensure throughput is above 180 rps for REST\n v1_rps_rest = all(\n df_pywrapper[(df_pywrapper[\"apiType\"] == \"rest\") & conc_idx][\n \"throughputAchieved\"\n ]\n > 180\n )\n result_body += f\"* V1 base throughput above 180rps: {v1_rps_rest}\\n\"\n # Ensure throughput is above 250 rps for GRPC\n v1_rps_grpc = all(\n df_pywrapper[(df_pywrapper[\"apiType\"] == \"grpc\") & conc_idx][\n \"throughputAchieved\"\n ]\n > 250\n )\n result_body += f\"* V1 base throughput above 250rps: {v1_rps_grpc}\\n\"\n # Validate latenc added by adding service orchestrator is lower than 4ms\n\n # TODO: Validate equivallent of parallel workers in MLServer\n df_mlserver = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n model_name=\"classifier\",\n protocol=\"kfserving\",\n server_list=[\"SKLEARN_SERVER\"],\n model_uri_list=[\"gs://seldon-models/sklearn/iris-0.23.2/lr_model\"],\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data={\n \"inputs\": [\n {\n \"name\": \"predict\",\n \"datatype\": \"FP32\",\n \"shape\": [1, 4],\n \"data\": [[1, 2, 3, 4]],\n }\n ]\n },\n benchmark_grpc_data_override={\n \"model_name\": \"classifier\",\n \"inputs\": [\n {\n \"name\": \"predict\",\n \"datatype\": \"FP32\",\n \"shape\": [1, 4],\n \"contents\": {\"fp32_contents\": [1, 2, 3, 4]},\n }\n ],\n },\n )\n # First we sort the dataframes to ensure they are compared correctly\n df_mlserver = df_mlserver.sort_values(sort_by)\n\n # Python V1 Wrapper Validations\n\n conc_idx = df_mlserver[\"concurrency\"] == 1\n # Ensure all mean performance latency below 5 ms\n v2_latency_mean = all(df_mlserver[conc_idx][\"mean\"] < 5)\n result_body += f\"* V2 mean performance latency under 5ms: {v2_latency_mean}\\n\"\n # Ensure 99th percentiles are not spiking above 15ms\n v2_latency_nth = all(df_mlserver[conc_idx][\"99th\"] < 10)\n result_body += f\"* V2 99th performance latenc under 10ms: {v2_latency_nth}\\n\"\n # Ensure throughput is above 180 rps for REST\n v2_rps_rest = all(\n df_mlserver[(df_mlserver[\"apiType\"] == \"rest\") & conc_idx][\"throughputAchieved\"]\n > 250\n )\n result_body += f\"* V2 REST throughput above 250rps: {v2_rps_rest}\\n\"\n # Ensure throughput is above 250 rps for GRPC\n v2_rps_grpc = all(\n df_mlserver[(df_mlserver[\"apiType\"] == \"grpc\") & conc_idx][\"throughputAchieved\"]\n > 250\n )\n result_body += f\"* V2 throughput above 300rps: {v2_rps_grpc}\\n\"\n\n result_body += \"\\n### Python V1 Wrapper Results table\\n\\n\"\n result_body += str(df_pywrapper.to_markdown())\n result_body += \"\\n\\n\\n### Python V2 MLServer Results table\\n\\n\"\n result_body += str(df_mlserver.to_markdown())\n\n post_comment_in_pr(result_body)\n\n assert v1_latency_mean\n assert v1_latency_nth\n assert v1_rps_rest\n assert v1_rps_grpc\n assert v2_latency_mean\n assert v2_latency_nth\n assert v2_rps_rest\n assert v2_rps_grpc\n\n\[email protected]\[email protected](\"argo_worfklows\")\ndef test_v1_seldon_data_types():\n\n sort_by = [\"concurrency\", \"apiType\"]\n\n # 10000 element array\n data_size = 10_000\n data = [100.0] * data_size\n\n benchmark_concurrency_list = [\"1\", \"50\", \"150\"]\n\n image_list = [\"seldonio/seldontest_predict:1.10.0-dev\"]\n\n data_ndarray = {\"data\": {\"ndarray\": data}}\n data_tensor = {\"data\": {\"tensor\": {\"values\": data, \"shape\": [1, data_size]}}}\n\n array = np.array(data)\n tftensor_proto = tf.make_tensor_proto(array)\n tftensor_json_str = json_format.MessageToJson(tftensor_proto)\n tftensor_dict = json.loads(tftensor_json_str)\n data_tftensor = {\"data\": {\"tftensor\": tftensor_dict}}\n\n df_ndarray = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_ndarray,\n )\n df_ndarray = df_ndarray.sort_values(sort_by)\n\n df_tensor = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_tensor,\n )\n df_tensor = df_tensor.sort_values(sort_by)\n\n df_tftensor = run_benchmark_and_capture_results(\n api_type_list=[\"rest\", \"grpc\"],\n image_list=image_list,\n benchmark_concurrency_list=benchmark_concurrency_list,\n benchmark_data=data_tftensor,\n )\n df_tftensor = df_tftensor.sort_values(sort_by)\n\n result_body = \"# Benchmark results - Testing Seldon V1 Data Types\\n\\n\"\n\n result_body += \"\\n### Results for NDArray\\n\\n\"\n result_body += str(df_ndarray.to_markdown())\n result_body += \"\\n### Results for Tensor\\n\\n\"\n result_body += str(df_tensor.to_markdown())\n result_body += \"\\n### Results for TFTensor\\n\\n\"\n result_body += str(df_tftensor.to_markdown())\n post_comment_in_pr(result_body)\n"
] | [
[
"numpy.array",
"tensorflow.make_tensor_proto"
]
] |
MATHplus-Young-Academy/P2-Cardiac-Motion | [
"844995e8e5760f981c425d13c0bd7f2f3bb8baec"
] | [
"NN_segmentation/tst_dataset.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 17:32:08 2020\n\n@author: apramanik\n\"\"\"\n\n\n\nimport numpy as np\nimport SimpleITK as sitk \nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\n\n\n\n#%% Functions\ndef normalize_img(img):\n img = img.copy().astype(np.float32)\n img -= np.mean(img)\n img /= np.std(img)\n return img\n\ndef crop_img(img):\n sizex = 144\n sizey = 144\n sizez = 8\n img = img.copy()\n sh = img.shape\n midptx = int(sh[2]/2)\n midpty = int(sh[3]/2)\n if sh[1]<8:\n residue=8-sh[1]\n a=np.zeros((sh[0],int(residue),144,144),dtype=np.float32)\n img=img[:,:,midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n img=np.concatenate((img,a),axis=1)\n else:\n midptz = int(sh[1]/2)\n img = img[:,midptz-int(sizez/2):midptz+int(sizez/2),midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n return img\n \ndef crop_label(img):\n sizex = 144\n sizey = 144\n sizez = 8\n img = img.copy()\n sh = img.shape\n midptx = int(sh[1]/2)\n midpty = int(sh[2]/2)\n if sh[0]<8:\n residue=8-sh[0]\n a=np.zeros((int(residue),144,144),dtype=np.float32)\n img=img[:,midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n img=np.concatenate((img,a),axis=0)\n else:\n midptz = int(sh[0]/2)\n img = img[midptz-int(sizez/2):midptz+int(sizez/2),midptx-int(sizex/2):midptx+int(sizex/2),midpty-int(sizey/2):midpty+int(sizey/2)]\n return img\n \ndef crop_img_paper(img):\n sizez = 8\n img = img.copy()\n sh = img.shape\n if sh[1]<8:\n residue=8-sh[1]\n a=np.zeros((sh[0],int(residue),sh[2],sh[3]),dtype=np.float32)\n img=np.concatenate((img,a),axis=1)\n else:\n midptz = int(sh[1]/2)\n img = img[:,midptz-int(sizez/2):midptz+int(sizez/2),:,:]\n return img\n \ndef crop_label_paper(img):\n sizez = 8\n img = img.copy()\n sh = img.shape\n if sh[0]<8:\n residue=8-sh[0]\n a=np.zeros((int(residue),sh[1],sh[2]),dtype=np.float32)\n img=np.concatenate((img,a),axis=0)\n else:\n midptz = int(sh[0]/2)\n img = img[midptz-int(sizez/2):midptz+int(sizez/2),:,:]\n return img\n\n#%%paths\n\n\n#%%dataset preparation\n\nclass cardiacdata_old(Dataset):\n\n def __init__(self, img_dir = \"./Datasets/patient005/patient005_4d.nii.gz\", label_dir = r'./Datasets/patient005/patient005_frame01_gt.nii.gz'): \n IMG_DIR = \"./Datasets\"\n \n ptnum=str(5).zfill(3) \n img_dir = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_4d.nii.gz'\n dummy_img = sitk.GetArrayFromImage(sitk.ReadImage(img_dir))\n dummy_img = crop_img(dummy_img)\n \n file = open(IMG_DIR + '/patient'+ptnum+'/'+\"Info.cfg\",\"r\")\n es=int(file.read().split(\"\\n\")[1].split(\":\")[1])\n es_str=str(es).zfill(2)\n gt_dir_es = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_frame'+es_str+'_gt.nii.gz'\n es_label = sitk.GetArrayFromImage(sitk.ReadImage(gt_dir_es))\n es_label = crop_label(es_label)\n\n file = open(IMG_DIR + '/patient'+ptnum+'/'+\"Info.cfg\",\"r\")\n ed=int(file.read().split(\"\\n\")[0].split(\":\")[1])\n ed_str=str(ed).zfill(2)\n gt_dir_ed = IMG_DIR + '/patient'+ptnum+'/patient'+ptnum+'_frame'+ed_str+'_gt.nii.gz'\n ed_label = sitk.GetArrayFromImage(sitk.ReadImage(gt_dir_ed))\n ed_label = crop_label(ed_label)\n\n a = dummy_img[ed-1:ed]\n b = dummy_img[es-1:es]\n dummy_img = np.concatenate((a,b),axis=0)\n dummy_img = normalize_img(dummy_img)\n\n ed_label = np.expand_dims(ed_label,axis=0)\n es_label = np.expand_dims(es_label,axis=0)\n dummy_gt = np.concatenate((ed_label,es_label),axis=0)\n\n \n self.img = np.expand_dims(np.reshape(dummy_img,[dummy_img.shape[0]*dummy_img.shape[1],dummy_img.shape[2],dummy_img.shape[3]]),axis=0) \n self.gt = np.expand_dims(np.reshape(dummy_gt,[dummy_gt.shape[0]*dummy_gt.shape[1],dummy_gt.shape[2],dummy_gt.shape[3]]),axis=0)\n self.len = self.img.shape[0]\n \n return\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, i):\n \n img = self.img[i]\n gt = self.gt[i]\n \n img = torch.from_numpy(img.astype(np.float32)).unsqueeze(0)\n gt = torch.from_numpy(gt.astype(np.float32)).long()\n\n return img,gt\n \nclass cardiacdata(Dataset):\n\n def __init__(self, img_dir = \"./Datasets/patient005/patient005_4d.nii.gz\", label_dir = r'./Datasets/patient005/patient005_frame01_gt.nii.gz'): \n dummy_img = sitk.GetArrayFromImage(sitk.ReadImage(img_dir))\n dummy_img = np.squeeze(dummy_img)\n # print(dummy_img.shape)\n dummy_img = crop_img(dummy_img)\n # print(dummy_img.shape)\n dummy_img = normalize_img(dummy_img)\n \n if not label_dir is None:\n dummy_gt = sitk.GetArrayFromImage(sitk.ReadImage(label_dir))\n # print(dummy_gt.shape)\n dummy_gt = np.squeeze(dummy_gt)\n dummy_gt = crop_img(dummy_gt)\n \n \n \n self.img = np.expand_dims(np.reshape(dummy_img,[dummy_img.shape[0]*dummy_img.shape[1],dummy_img.shape[2],dummy_img.shape[3]]),axis=0) \n if not label_dir is None:\n self.gt = np.expand_dims(np.reshape(dummy_gt,[dummy_gt.shape[0]*dummy_gt.shape[1],dummy_gt.shape[2],dummy_gt.shape[3]]),axis=0)\n else:\n self.gt = np.zeros(self.img.shape)\n self.len = self.img.shape[0]\n \n return\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, i):\n \n img = self.img[i]\n gt = self.gt[i]\n \n img = torch.from_numpy(img.astype(np.float32)).unsqueeze(0)\n gt = torch.from_numpy(gt.astype(np.float32)).long()\n\n return img,gt\n \n \nif __name__ == \"__main__\":\n dataset = cardiacdata()\n loader = DataLoader(dataset, shuffle=False, batch_size=1)\n count=0\n for step, (img, gt) in enumerate(loader):\n count=count+1\n print('img shape is:', img.shape)\n print('gt shape is:', gt.shape)\n fig, axes = plt.subplots(1,2)\n pos = axes[0].imshow(img[0,0,2,])\n pos = axes[1].imshow(gt[0,2,])\n plt.show()\n #break\n "
] | [
[
"torch.utils.data.DataLoader",
"numpy.squeeze",
"numpy.zeros",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"numpy.expand_dims",
"matplotlib.pyplot.show",
"numpy.std",
"numpy.concatenate",
"numpy.mean"
]
] |
AndreaCossu/ContinualLearning_RecurrentNetworks | [
"8cbc247f1f660f7acb94868696d128e538ad72f4"
] | [
"clutils/datasets/QuickDraw.py"
] | [
"import torch\nimport numpy as np\nimport os\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom .utils import collate_sequences\n\n\nNORMALIZER = { # (mu, std) per class computed on the concatenation of both features (discarding the binary feature)\n 'hot dog': (1.3554527691145501, 55.15028414343622),\n 'palm tree': (-0.7063322505461493, 49.02700047706162),\n 'moon': (0.8036297226693486, 41.345375756324735),\n 'envelope': (4.900210171097034, 69.4196392054246),\n 'dumbbell': (2.0407119932197504, 47.695996108391235),\n 'microwave': (2.9699868328411974, 66.93104801889736),\n 'onion': (1.2284401861051968, 45.21653229074296),\n 'nail': (1.6172277953943177, 62.21706638258232),\n 'paper clip': (0.6436449511025123, 33.32139677497804),\n 'soccer ball': (0.6708907017116656, 39.52546034271634),\n 'drill': (1.1185769827821401, 48.722276882610934),\n 'telephone': (1.3498681969396034, 40.76261400934935),\n 'airplane': (1.0251489388319772, 53.19602656498733),\n 'dishwasher': (2.2191710394266084, 61.508456849155735),\n 'chair': (4.016188671169509, 62.53028260788498),\n 'grass': (3.376122464598659, 69.31003265138725),\n 'rhinoceros': (1.2215264458448767, 48.80834840225656),\n 'octopus': (0.8146148966002359, 40.89244147955804),\n 'cloud': (0.35621641218733063, 29.409365110585483),\n 'bicycle': (0.46389958129146036, 48.99489500128756),\n 'swan': (1.049680167563788, 42.94216649535794),\n 'picture frame': (1.5065118700885085, 63.253773000519175),\n 'shorts': (1.9229859470161206, 52.22414095061445),\n 'flying saucer': (1.5281540318557478, 55.091686319872025),\n 'basketball': (1.6894072481767088, 57.72410547176846),\n 'harp': (2.906289433329914, 78.44568624724737),\n 'beard': (0.9775846803866044, 40.10763763299041),\n 'binoculars': (0.808846681416587, 47.034367710374035),\n 'tiger': (0.9438875155470355, 50.66921493109194),\n 'book': (2.5133103399080188, 65.60820901501357),\n 'scissors': (0.6647841622339, 40.07199951607956),\n 'raccoon': (0.7915126973835728, 43.36239169880799),\n 'peanut': (0.5509739234166906, 30.524261515788336),\n 'wheel': (0.7686023820927692, 47.607169012136815),\n 'trombone': (1.0112428613309314, 52.411164111718705),\n 'diamond': (2.395434500084604, 59.73484161759297),\n 'parachute': (2.056040072916103, 60.77205525434674),\n 'tractor': (0.5855071624918279, 50.5522849539403),\n 'windmill': (0.24800006974356498, 52.12209721342569),\n 'alarm clock': (0.391438978240927, 41.44493991046053),\n 'clarinet': (1.2795783017970905, 49.905620294236705),\n 'spider': (0.6505395210719399, 51.18743252881025),\n 'violin': (0.8724565090414226, 52.533813964768754),\n 'clock': (1.6654012441543409, 37.33134444355261),\n 'tent': (3.3092329281631137, 79.47572994387069),\n 'belt': (2.3132169051670886, 71.13105919924993),\n 'map': (2.6555302484638714, 61.11029370697819),\n 'toe': (0.5471757615653022, 43.27192861865762),\n 'bread': (1.3686935317654665, 47.6839114556787),\n 'kangaroo': (0.6918159454175237, 35.99155678225584),\n 'camera': (1.4527253130110975, 49.336211227235296),\n 'duck': (1.598900790833744, 41.45077993986563),\n 'lipstick': (0.41066758960159977, 41.786372987299615),\n 'snowman': (0.14670998509400804, 31.624590642386174),\n 'pickup truck': (1.6892820330935685, 54.644954488199524),\n 'radio': (1.1157698308056494, 56.49502963911298),\n 'truck': (1.814018865712332, 55.76992610437815),\n 'train': (1.470463028668502, 77.63271694640828),\n 'teapot': (0.9014336302825292, 37.48169241933444),\n 'tree': (0.13337780967798976, 42.97342154517355),\n 'hourglass': (2.39448903480218, 52.622226862393084),\n 'eyeglasses': (1.379818588483046, 52.57994649640675),\n 'church': (0.9630982672082059, 69.99862099910592),\n 'submarine': (0.9138035290673335, 56.77613283220326),\n 'couch': (2.283752727373058, 68.77383224311272),\n 'umbrella': (0.5170775226020248, 47.83678678400117),\n 'whale': (0.3497782843722267, 52.43513503159438),\n 'cooler': (2.19778540728888, 61.74770130955316),\n 'sword': (0.5910085971920176, 48.46440617862079),\n 'table': (4.542251159698462, 87.48848948789511),\n 'skull': (0.6570416475324352, 36.02607871443743),\n 'house': (2.654399100012597, 66.81448281800678),\n 'blackberry': (0.3455386008050086, 29.1600574174796),\n 'bush': (0.7558370448198207, 41.04289142315455),\n 'giraffe': (1.4011522715876905, 46.32335477059355),\n 'rainbow': (4.702348561309779, 82.07143165031478),\n 'yoga': (1.1423119096294918, 50.79902795898255),\n 'mailbox': (0.3511077577743624, 55.61495444057362),\n 'wristwatch': (1.0924273980760375, 49.96303380973288),\n 'The Eiffel Tower': (1.2008260944995623, 73.04798400687072),\n 'syringe': (1.6277984132013836, 56.22798342770764),\n 'bulldozer': (1.060340370028316, 50.385030079706375),\n 'door': (3.36173249421909, 66.2933191994613),\n 'zebra': (1.132649710524639, 52.459089632246396),\n 'beach': (2.263430578427388, 106.91064036288513),\n 'crown': (0.7879512551564102, 50.206077053610386),\n 'screwdriver': (1.1442268550285573, 46.07164154904856),\n 'bear': (0.9395651847291722, 36.986932048274426),\n 'sink': (0.9422909049727696, 47.54959424164138),\n 'teddy-bear': (0.7913359738933313, 33.36477019938705),\n 'square': (1.3275239412907043, 65.89863242901156),\n 'cruise ship': (2.15931097599974, 78.36615495337965),\n 'waterslide': (3.4486527614397833, 83.83125777723943),\n 'elbow': (4.092940205383508, 61.758770494053785),\n 'stereo': (1.8269654619223092, 58.475208066649714),\n 'sweater': (1.067301637554828, 44.59577281486724),\n 'bandage': (1.4032828202796717, 49.86169830574158),\n 'bat': (0.8121797269039484, 37.69212883824029),\n 'The Mona Lisa': (1.6676774598611082, 58.162407907625514),\n 'sea turtle': (0.7386565039500725, 46.86228560536563),\n 'butterfly': (0.4342721164650034, 37.484845221008726),\n 'mosquito': (0.6493471316616555, 40.10938957349605),\n 'tennis racquet': (0.015468185574502526, 62.24923783294656),\n 'tornado': (0.7822439181013964, 45.2077352961338),\n 'computer': (3.0811423630717107, 60.20403781306317),\n 'bridge': (3.679091358194862, 120.07641800536442),\n 'toothbrush': (1.2915788907996562, 53.22105425492547),\n 'baseball bat': (0.410479892106175, 39.02003924116569),\n 'bench': (4.462592927663926, 85.6302327587279),\n 'finger': (-0.6637118888775841, 49.09874846625699),\n 'canoe': (2.9733556427417493, 68.6835039501244),\n 'baseball': (1.6959011615443051, 45.45130310748645),\n 'circle': (-0.39852915378672893, 31.77419572565473),\n 'banana': (1.3562427804512358, 42.94349204337924),\n 'bathtub': (2.3570421946852544, 65.3192157735626),\n 'axe': (1.0293065652442999, 54.84964062528346),\n 'lantern': (1.1476541043730428, 53.67189040723054),\n 'birthday cake': (0.15146259492252578, 55.89568012892327),\n 'castle': (1.804799071214271, 63.20589225473029),\n 'wine bottle': (1.034291851931799, 44.04598147244387),\n 'ant': (0.9303194592264448, 34.43552547266363),\n 'The Great Wall of China': (4.285709330181438, 131.23951199298298),\n 'bee': (0.43095116254566934, 40.56855963179127),\n 'apple': (-0.44780125973592305, 30.208033668691396),\n 'arm': (1.7757119621507091, 46.692967793920644),\n 'asparagus': (-0.2421902384249924, 48.97218720603324),\n 'angel': (0.5489444327750316, 41.66381961171915),\n 'cup': (2.673919370605991, 43.54406248924784),\n 'carrot': (0.6945175048056408, 46.104020850556616),\n 'bucket': (1.7396654767172537, 48.828570427954205),\n 'animal migration': (2.6285542168388782, 61.28180224245095),\n 'cell phone': (1.9267526020713346, 49.38973568488984),\n 'van': (1.9173825658872794, 54.8721828825201),\n 'dolphin': (0.9714616061928398, 42.83044052150523),\n 'bowtie': (1.168151585565935, 37.61503592501492),\n 'campfire': (0.2534712087647997, 42.286814756524535),\n 'ceiling fan': (1.0603067359693852, 40.52738328774831),\n 'boomerang': (0.5759666273292099, 39.78957492087158),\n 'aircraft carrier': (1.5172469688772912, 78.7478229402662),\n 'cactus': (-0.1281463623029328, 42.27573114632624),\n 'cake': (0.31108565857076187, 56.322115673526696),\n 'anvil': (1.471075424663743, 48.99321880248113),\n 'toothpaste': (1.8461911264030182, 51.53740072123023),\n 'swing set': (3.971684529151281, 98.99892200987023),\n 'feather': (-0.42952206263561854, 53.55639949373167),\n 'flashlight': (1.9317251715822668, 62.79624045193533),\n 'garden hose': (1.5452934595615202, 53.713569777275175),\n 'camel': (1.5165348305653266, 35.07846843003865),\n 'calculator': (1.991161645112966, 50.142844727554575),\n 'diving board': (1.7300484119947224, 75.61560569527323),\n 'chandelier': (1.991040877029286, 50.65396442677625),\n 'helmet': (1.9722019205320098, 45.87749730234627),\n 'squirrel': (0.729042851521045, 35.3641639039348),\n 'ambulance': (1.0598312283596059, 55.875842882074),\n 'bottlecap': (1.5970585109209756, 40.01592713375047),\n 'hospital': (1.7313904919786411, 72.37806984815816),\n 'coffee cup': (1.32151623967879, 41.665383540075005),\n 'watermelon': (1.8482342559051477, 59.31958622930048),\n 'dresser': (2.396722754292761, 79.1225545952145),\n 'bed': (2.588378888585306, 78.08870505568636),\n 'bird': (1.5906829218142842, 41.059856184169284),\n 'cookie': (0.7048879723978447, 34.29958258051739),\n 'underwear': (3.027964069514147, 54.355597943207094),\n 'drums': (1.1572575727426198, 54.68602043565278),\n 'cat': (0.9684180598296738, 43.19493215282525),\n 'calendar': (2.495118096854286, 82.1800159400022),\n 'bracelet': (0.4661401948292038, 31.127130949231766),\n 'eraser': (2.3490401085702235, 56.643670114244784),\n 'dog': (0.8907946320439043, 38.505287852990726),\n 'barn': (2.2770830828592583, 77.75086163641558),\n 'spoon': (0.5421543550003102, 37.016180276322515),\n 'sun': (-0.2008690561101928, 57.798300559005334),\n 'toilet': (1.291036016063847, 40.288417166228925),\n 'backpack': (1.3079276772602353, 46.33461078978928),\n 'trumpet': (1.233316766684717, 47.840050217395266),\n 'frying pan': (1.1317137754492954, 42.27197781360748),\n 'blueberry': (0.3428165650102726, 29.923143234478975),\n 'toaster': (1.3159036268033921, 59.46381954781093),\n 'floor lamp': (-1.4045719348973986, 52.73112796615196),\n 'crocodile': (1.2462846638010021, 51.83360295588419),\n 'police car': (0.6314716475098945, 51.402397657264785),\n 'cow': (0.6487350495428166, 44.82200063524666),\n 'basket': (1.781348034990179, 61.40405101602184),\n 'cello': (1.4380096549620986, 59.481368251629206),\n 'golf club': (2.935274820103259, 47.944997493610416),\n 'school bus': (1.3202131289388477, 61.70753264839142),\n 'hockey puck': (0.725588239742589, 48.55963543134594),\n 'fence': (3.8660243770815614, 92.36222788620427),\n 'donut': (0.431402194475543, 32.222374599013726),\n 'goatee': (1.2211961416317247, 39.81077215140121),\n 'traffic light': (1.269260032432163, 44.30942006032888),\n 'hamburger': (1.4103828007350085, 49.04022894395681),\n 'ear': (1.9563928536834947, 34.3747704500531),\n 'compass': (0.8636275744036599, 38.906947603746346),\n 'broccoli': (-0.08805269427735608, 30.880695648320078),\n 'skyscraper': (1.3477313197584702, 87.73974365488579),\n 'fan': (0.5595090068208328, 42.26975493031441),\n 'hot air balloon': (1.0010255829235684, 45.01559229242698),\n 'mountain': (5.349497596465423, 69.73739652862577),\n 'fork': (0.21995268515715857, 43.66291957421616),\n 'face': (1.1847102417517064, 41.81747854722619),\n 'crab': (0.5500211063457824, 48.30558365265961),\n 'ice cream': (0.5645385757011395, 41.72357855932428),\n 'foot': (1.6352285029716924, 40.86466847411941),\n 'hat': (2.1269765754849357, 53.181061994837336),\n 'candle': (-0.9566338163648833, 46.30537462785261),\n 'flip flops': (1.1195172002513105, 45.28787295602699),\n 'hammer': (0.40690889202283986, 45.31354440860368),\n 'guitar': (0.9118308447368665, 58.627968076179016),\n 'brain': (0.5667801625156502, 39.94893006675094),\n 'stove': (1.2695451153311437, 56.13115551721316),\n 'headphones': (1.7442579010033754, 38.05663003234409),\n 'flamingo': (1.3329066566304946, 44.20478550977875),\n 'flower': (0.014788800722293086, 28.686447255310085),\n 'bus': (1.5110163683563511, 65.58525727312637),\n 'hot tub': (0.9262199087425361, 63.37602990315963),\n 'elephant': (1.0286360401485168, 42.29328387209706),\n 'fire hydrant': (0.4353600099500871, 48.49174159770318),\n 'laptop': (2.5420362830209355, 63.093568635534155),\n 'leaf': (-0.07888685459428697, 51.531397540382116),\n 'potato': (0.7248796777877287, 36.04373128693473),\n 'hockey stick': (2.560198275283893, 47.75516557446046),\n 'lighter': (-0.10645657100081249, 38.600148168238576),\n 'hexagon': (2.7288170353096675, 50.79748328406929),\n 'garden': (0.881398058547382, 59.301002560708866),\n 'marker': (1.4847281646438588, 50.021490600998504),\n 'keyboard': (2.8496015722739236, 81.38936435354776),\n 'camouflage': (0.8524647599170719, 65.65432278791238),\n 'knee': (5.3541853695693575, 60.225209719801974),\n 'sheep': (1.2468686657122494, 35.19467195151128),\n 'microphone': (0.3006266208385552, 46.783442715555715),\n 'mushroom': (0.28405131561550195, 40.671965829362236),\n 'light bulb': (0.3093205629583717, 35.25819445171456),\n 'hand': (0.7429242999868996, 34.70475212985948),\n 'key': (0.7406380633244096, 34.13758650534517),\n 'house plant': (-0.4396176672108764, 40.515632771810296),\n 'eye': (0.8606006296728399, 44.889207403048424),\n 'matches': (0.3485948924638904, 47.42024782911991),\n 'broom': (2.9233557704577193, 49.52062851559808),\n 'knife': (1.4292202706092547, 51.01808033723662),\n 'crayon': (1.467668727844571, 51.82316360295973),\n 'ocean': (7.872452229036218, 89.99111246191521),\n 'dragon': (0.8266093687680877, 49.41364315921484),\n 'leg': (5.117580228531927, 54.01711580361819),\n 'horse': (0.9246973774561026, 48.65827974249926),\n 'zigzag': (9.770917367360767, 61.744673036996616),\n 'car': (1.1106827823007763, 47.60058589694208),\n 'grapes': (0.6046526027097275, 27.16306317679192),\n 'lightning': (4.090431090680993, 57.03172069825947),\n 'moustache': (1.7875824399413591, 37.731677498630745),\n 'mouth': (2.76090978291076, 57.20373539326289),\n 'vase': (0.5528729482101566, 36.996243257356014),\n 'fish': (0.8878609523273818, 44.34932077221152),\n 'string bean': (1.346485501392611, 54.7312484146683),\n 'lighthouse': (0.4274423658693314, 75.81546755799378),\n 'ladder': (5.90632648589332, 110.16555003310557),\n 'television': (1.3151946885305383, 62.90537952277926),\n 'helicopter': (0.7111156159770702, 56.6546344981718),\n 'pillow': (2.0992806701392936, 55.274535278488294),\n 'pencil': (2.0345830706124053, 62.90446034037889),\n 'rollerskates': (2.0053135688983006, 39.31457668947572),\n 'jail': (5.661515872939487, 115.47255551897983),\n 'mermaid': (0.3187352763659362, 39.8221589482459),\n 'jacket': (2.0929497013270537, 50.6087533539712),\n 'megaphone': (1.8135468059177202, 54.66219701027781),\n 'nose': (4.435118108240006, 36.01419720778613),\n 'pants': (1.4927018991320877, 55.47801110072461),\n 'octagon': (2.0144474110553916, 49.61164954802588),\n 'pizza': (0.9106006910867426, 49.75334623210151),\n 'passport': (2.09209268126368, 53.80930291521799),\n 'pool': (2.06494328488252, 67.72608882496336),\n 'motorbike': (0.4038001637130562, 46.94203574972685),\n 'snake': (1.5154800788642753, 49.350623204522535),\n 'pond': (0.7752730687547197, 47.62409950756826),\n 'frog': (0.8874821595962438, 39.61840650901404),\n 'pig': (0.47576581658267675, 39.5924494951546),\n 'penguin': (1.0164857569517498, 40.88730060878002),\n 'cannon': (0.8927868329478531, 53.019935221920896),\n 'parrot': (1.6070485082553567, 43.38710309821747),\n 'lobster': (0.5829596663716866, 42.78511651754868),\n 'saw': (1.6178343188617499, 43.19552103419441),\n 'strawberry': (0.6209739512011668, 32.08453043500838),\n 'firetruck': (1.125536302973774, 65.91057171556372),\n 'speedboat': (2.0848389958987257, 76.42986457816829),\n 'popsicle': (0.4813981088599971, 42.49229183387311),\n 'hurricane': (0.7079895622313991, 61.715710494552816),\n 'see saw': (1.8249850934378673, 70.89383197689017),\n 'saxophone': (0.9072383454069756, 36.470803735437904),\n 'mug': (2.5296236017401257, 42.26283334121334),\n 'piano': (2.6469411517060806, 73.27448246359215),\n 'mouse': (0.8020204927469491, 43.836228689128035),\n 'power outlet': (2.071476276483809, 46.822370189887785),\n 'hedgehog': (0.4703510415238984, 45.92192258266138),\n 'oven': (1.8548425634903463, 62.43067850281194),\n 'shoe': (1.297356215372919, 41.93847714957883),\n 'rifle': (2.5223233995449474, 60.73555429659974),\n 'roller coaster': (2.6065332991832584, 86.95567387367467),\n 'peas': (0.7749159834453123, 42.94847025647788),\n 'lion': (0.4463371384240275, 34.510210963204415),\n 'rake': (3.442498762575747, 57.38005406297777),\n 'postcard': (3.700937086574, 69.8261672011201),\n 'sock': (1.9223557134218592, 43.2687682421636),\n 'purse': (1.6872172724499956, 48.85082993380252),\n 'sleeping bag': (1.2484033851490541, 52.138238912603775),\n 'skateboard': (2.4819607493229663, 53.19362309156825),\n 'necklace': (2.392666309866489, 41.3064841582455),\n 'stairs': (5.195938168639603, 47.15470516213574),\n 'lollipop': (0.10920444361594842, 38.89025105370695),\n 'snowflake': (2.3415722082063986, 68.96721342968107),\n 'rabbit': (0.9078200152038035, 34.75862482451542),\n 'owl': (1.2457620241823235, 42.73803624793326),\n 'shovel': (1.970015817486029, 45.419236670608626),\n 'pear': (-0.45220059964010495, 30.843347488001527),\n 'remote control': (1.1358869210694837, 44.83511889242796),\n 'star': (0.3626996748657054, 52.65011227641426),\n 'scorpion': (0.4161827603069684, 38.88321413686467),\n 'washing machine': (1.5518183007862645, 51.91417194144562),\n 'monkey': (0.9748166731813579, 38.60787650590758),\n 'pineapple': (0.562007915664679, 43.7000843939721),\n 'sandwich': (1.6847535599541337, 57.542891294247035),\n 'shark': (1.272828952833183, 49.334895742299615),\n 'sailboat': (1.309450897368411, 66.09322028103158),\n 'steak': (0.8908929135892587, 46.82398060648129),\n 'stethoscope': (2.300526882061146, 43.63511505624273),\n 'wine glass': (2.1753360642422095, 42.95333738304328),\n 'smiley face': (1.4208837631558537, 43.864342591767816),\n 'streetlight': (-1.4343035375659503, 57.70810758721286),\n 'squiggle': (5.131557337201909, 48.02532522224354),\n 'stop sign': (1.3327274061718097, 42.78360537094287),\n 'line': (40.59167311123959, 112.02341955570965),\n 'pliers': (0.796279030471497, 45.67250007650481),\n 'paint can': (1.3512234721466652, 47.35796924253278),\n 'panda': (0.5475608600999033, 33.69643785103632),\n 'paintbrush': (0.20347385695100456, 47.341806442823824),\n 't-shirt': (0.9831120778329658, 42.21114938247829),\n 'fireplace': (1.3117628588460688, 61.01045131707993),\n 'river': (5.610367142280469, 117.56790294876312),\n 'snorkel': (1.2366543753832537, 43.709326082973476),\n 'rain': (3.6461954118834403, 61.31247784406768),\n 'triangle': (1.1218274781431306, 64.34926695455631),\n 'suitcase': (1.9098774305372213, 57.805580971303506),\n 'stitches': (4.142179481399166, 79.85573072340479),\n 'tooth': (0.7350361072423909, 34.97655580063578),\n 'snail': (0.3764966115255877, 34.91367713422217),\n 'spreadsheet': (4.333452826793876, 134.8852997594341)\n}\n\nclass QuickDrawDataset(torch.utils.data.Dataset):\n def __init__(self, data_dict, normalizers, task_vector=None):\n\n self.data_dict = data_dict\n self.normalizers = normalizers\n self.task_vector = task_vector\n\n self.patterns_per_class = [len(v) for k,v in self.data_dict.items()]\n\n self.min_class_id = min(list(self.data_dict.keys()))\n\n def __getitem__(self, idx):\n # select class based on idx\n class_id = None\n curr_idx = idx\n ppc = [0] + self.patterns_per_class\n\n for i in range(1, len(ppc)):\n if curr_idx < ppc[i]:\n class_id = self.min_class_id + (i - 1)\n break\n elif curr_idx == ppc[i]:\n curr_idx -= ppc[i]\n class_id = self.min_class_id + i\n break\n else:\n curr_idx -= ppc[i]\n\n if class_id is None:\n raise IndexError('Out of range when indexing QuickDraw!')\n\n # normalize\n x_cur = torch.from_numpy(self.data_dict[class_id][curr_idx]).float() #/ self.normalizers[class_id][1]\n y_cur = torch.tensor(class_id).long()\n\n if self.task_vector is not None:\n x_cur = torch.cat((self.task_vector.unsqueeze(0).repeat(x_cur.size(0),1), x_cur), dim=1)\n\n return x_cur, y_cur\n\n def __len__(self):\n return sum(self.patterns_per_class)\n\n\nclass CLQuickDraw():\n def __init__(self, root, train_batch_size, test_batch_size,\n len_task_vector=0, task_vector_at_test=False):\n\n self.root = root\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n\n self.len_task_vector = len_task_vector\n self.task_vector_at_test = task_vector_at_test\n\n self.dataloaders = []\n\n self.current_class_id = 0\n\n def _load_data(self, classes):\n train_dict, test_dict, normalizer = {}, {}, {}\n for classname in classes:\n feature = np.load(os.path.join(self.root, f\"{classname}.npz\"), encoding='latin1', allow_pickle=True)\n train, test = feature['train'], feature['test'] # discard feature['valid'] because we don't need it\n train_dict[self.current_class_id] = train\n test_dict[self.current_class_id] = test\n normalizer[self.current_class_id] = NORMALIZER[classname]\n self.current_class_id += 1\n\n return train_dict, test_dict, normalizer\n\n def get_task_loaders(self, classes=None, task_id=None):\n\n if classes is not None:\n train, test, normalizer = self._load_data(classes)\n\n if self.len_task_vector > 0:\n task_vector = torch.zeros(self.len_task_vector).float()\n task_vector[len(self.dataloaders)] = 1.\n else:\n task_vector = None\n\n train_dataset = QuickDrawDataset(train, normalizer,\n task_vector=task_vector)\n test_dataset = QuickDrawDataset(test, normalizer,\n task_vector=task_vector if self.task_vector_at_test else None)\n\n train_batch_size = len(train_dataset) if self.train_batch_size == 0 else self.train_batch_size\n test_batch_size = len(test_dataset) if self.test_batch_size == 0 else self.test_batch_size\n\n train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True,\n collate_fn=collate_sequences)\n test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False, drop_last=True,\n collate_fn=collate_sequences)\n\n self.dataloaders.append([train_loader, test_loader])\n\n return train_loader, test_loader\n\n elif task_id is not None:\n return self.dataloaders[task_id]"
] | [
[
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.tensor"
]
] |
zawecha1/arXiv2020-RIFE | [
"8eb622a150bd3bf0e773033cbba4728e64340ba1"
] | [
"dataset.py"
] | [
"import cv2\nimport ast\nimport torch\nimport numpy as np\nimport random\nfrom torch.utils.data import DataLoader, Dataset\n\ncv2.setNumThreads(1)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass VimeoDataset(Dataset):\n def __init__(self, dataset_name, batch_size=32):\n self.batch_size = batch_size\n self.dataset_name = dataset_name\n self.load_data()\n self.h = 256\n self.w = 448\n self.data_root = 'vimeo_triplet'\n self.image_root = os.path.join(self.data_root, 'sequences')\n train_fn = os.path.join(self.data_root, 'tri_trainlist.txt')\n test_fn = os.path.join(self.data_root, 'tri_testlist.txt')\n with open(train_fn, 'r') as f:\n self.trainlist = f.read().splitlines()\n with open(test_fn, 'r') as f:\n self.testlist = f.read().splitlines() \n\n def __len__(self):\n return len(self.meta_data)\n\n def load_data(self):\n if self.dataset_name == 'train':\n self.meta_data = self.trainlist\n else:\n self.meta_data = self.testlist\n\n def aug(self, img0, gt, img1, h, w):\n ih, iw, _ = img0.shape\n x = np.random.randint(0, ih - h + 1)\n y = np.random.randint(0, iw - w + 1)\n img0 = img0[x:x+h, y:y+w, :]\n img1 = img1[x:x+h, y:y+w, :]\n gt = gt[x:x+h, y:y+w, :]\n return img0, gt, img1\n\n def getimg(self, index):\n imgpath = self.meta_data[index]\n imgpaths = [imgpath + '/im1.png', imgpath + '/im2.png', imgpath + '/im3.png']\n\n # Load images\n img0 = cv2.imread(imgpaths[0])\n gt = cv2.imread(imgpaths[1])\n img1 = cv2.imread(imgpaths[2])\n return img0, gt, img1\n \n def __getitem__(self, index): \n img0, gt, img1 = self.getimg(index)\n if self.dataset_name == 'train':\n img0, gt, img1 = self.aug(img0, gt, img1, 224, 224)\n if random.uniform(0, 1) < 0.5:\n img0 = img0[:, :, ::-1]\n img1 = img1[:, :, ::-1]\n gt = gt[:, :, ::-1]\n if random.uniform(0, 1) < 0.5:\n img0 = img0[::-1]\n img1 = img1[::-1]\n gt = gt[::-1]\n if random.uniform(0, 1) < 0.5:\n img0 = img0[:, ::-1]\n img1 = img1[:, ::-1]\n gt = gt[:, ::-1]\n if random.uniform(0, 1) < 0.5:\n tmp = img1\n img1 = img0\n img0 = tmp\n img0 = torch.from_numpy(img0.copy()).permute(2, 0, 1)\n img1 = torch.from_numpy(img1.copy()).permute(2, 0, 1)\n gt = torch.from_numpy(gt.copy()).permute(2, 0, 1)\n return torch.cat((img0, img1, gt), 0)\n"
] | [
[
"torch.cuda.is_available",
"torch.cat",
"numpy.random.randint"
]
] |
vessemer/LungCancerDetection | [
"b1810c608896406abf8964298c0dd9ccf4816008"
] | [
"Scripts/extract_features.py"
] | [
"import sys\nsys.path.append('../')\nsys.path.append('../support/')\nfrom scipy.ndimage.measurements import label\nfrom scipy.ndimage import interpolation\nfrom time import time\nfrom glob import glob\nimport timeit\nfrom os.path import join, basename, isfile\nfrom tqdm import tqdm\nfrom paths import *\nfrom ct_reader import *\nimport dicom\nfrom scipy.misc import imresize\nfrom multiprocessing import Pool\nimport pickle\nfrom paths import *\nfrom scipy.ndimage import morphology\n# import seaborn as sns\nimport pandas as pd\nfrom numpy import *\n\n\n# In[2]:\n\ndef read_ct(path, ret_xy_spacing=False):\n patient = read_ct_scan(path)\n image = get_pixels_hu(patient)\n image[image == image[0,0,0]] = 0\n \n if ret_xy_spacing:\n try:\n return image, patient[0].PixelSpacing[0]\n except AttributeError:\n return image, scan.GetSpacing()[0]\n \n return image\n\n\n# In[3]:\n\ndef label_nodules(enhanced):\n isolated = enhanced.copy()\n isolated[(isolated == 4)\n |(isolated == 2)\n |(isolated == 6)] = 0\n isolated, _ = label(isolated)\n\n vascular = enhanced.copy()\n vascular[(vascular == 1)\n |(vascular == 2)\n |(vascular == 3)] = 0\n vascular, _ = label(vascular)\n\n plural = enhanced.copy()\n plural[(plural == 1)\n |(plural == 4)\n |(plural == 5)] = 0\n plural, _ = label(plural)\n return isolated, vascular, plural\n\n\n# In[4]:\n\ndef mask_features(mask,sp_mask):\n volumes = bincount(mask.flatten())\n zone_volumes = bincount(sp_mask.flatten())\n ans = dict()\n for i in range(16):\n try:\n ans['volume' + str(i)] = volumes[i]\n except:\n ans['volume' + str(i)] = 0 \n for i in range(7):\n ans['z_volume' + str(i)] = zone_volumes[i]\n ans['l//r'] = volumes[1] / volumes[2] if(volumes[2]) else 0.0\n ans['lungoverlap//l'] = volumes[3] / volumes[1] if(volumes[1]) else 0.0\n ans['br_overlap//l'] = volumes[5] / volumes[1] if(volumes[1]) else 0.0\n ans['br_overlap//r'] = volumes[6] / volumes[2] if(volumes[2]) else 0.0\n ans['tr_overlap//l'] = volumes[9] / volumes[1] if(volumes[1]) else 0.0\n ans['tr_overlap//r'] = volumes[10] / volumes[2] if(volumes[2]) else 0.0\n ans['br_tr_overlap//tr'] = volumes[12] / volumes[8] if(volumes[8]) else 0.0\n ans['z_volume_1//2'] = zone_volumes[1] / zone_volumes[2]\n ans['z_volume_2//3'] = zone_volumes[2] / zone_volumes[3]\n ans['z_volume_4//5'] = zone_volumes[4] / zone_volumes[5]\n ans['z_volume_5//6'] = zone_volumes[5] / zone_volumes[6]\n return ans\n\n\n# In[5]:\n\ndef if_left(mask):\n return in1d(mask,[1,3,5,7,9,11,13,15]).reshape(mask.shape)\n \ndef if_right(mask):\n return in1d(mask,[2,3,6,7,10,11,14,15]).reshape(mask.shape)\n\ndef split_mask(mask):\n mn1 = where(if_left(mask))[0].min()\n mx1 = where(if_left(mask))[0].max()\n mn2 = where(if_right(mask))[0].min()\n mx2 = where(if_right(mask))[0].max()\n height1 = int((mx1-mn1)/3.0)\n height2 = int((mx2-mn2)/3.0)\n mask_zones = zeros(mask.shape)\n mask_zones[mn1:mn1+height1,:,:] = 1 \n mask_zones[mn1+height1:mn1+2*height1,:,:] = 2\n mask_zones[mn1+2*height1:mx1,:,:] = 3\n mask_l = if_left(mask)*mask_zones\n mask_zones = zeros(mask.shape)\n mask_zones[mn2:mn2+height2,:,:] = 4\n mask_zones[mn2+height2:mn2+2*height2,:,:] = 5\n mask_zones[mn2+2*height2:mx2,:,:] = 6\n return (mask_l + if_right(mask) * mask_zones).astype('int8')\n\n\n# In[6]:\n\ndef merge(enhanced, mask):\n return 8 * mask + enhanced\n\n\n# In[7]:\n\ndef collect_stats(enhanced,mask,sp_mask):\n prev_time = time()\n l_enhanced = enhanced * if_left(mask)\n r_enhanced = enhanced * if_right(mask)\n \n \n# print('split_mask ',time()-prev_time)\n# prev_time = time()\n \n enh_areas = bincount(enhanced.flatten())[1:]\n enh_l_areas = bincount(l_enhanced.flatten())[1:]\n enh_r_areas = bincount(r_enhanced.flatten())[1:]\n \n enh_areas_zones = list()\n for i in range(1,7):\n enh_areas_zones.append(bincount((enhanced * (sp_mask == i)).flatten())[1:])\n# enh_l_areas = concatenate((enh_areas_zones[1][enh_areas_zones[1]>0],\n# enh_areas_zones[2][enh_areas_zones[2]>0],\n# enh_areas_zones[0][enh_areas_zones[0]>0]))\n# enh_r_areas = concatenate((enh_areas_zones[4][enh_areas_zones[4]>0],\n# enh_areas_zones[5][enh_areas_zones[5]>0],\n# enh_areas_zones[3][enh_areas_zones[3]>0]))\n# enh_areas = concatenate((enh_l_areas,enh_r_areas))\n# print('bincounts ',time()-prev_time)\n# prev_time = time()\n \n if not enh_areas.shape[0]:\n max_areas = dict()\n for i in range(5):\n max_areas['max'+str(i)] = 0\n max_areas['max_l'+str(i)] = 0\n max_areas['max_r'+str(i)] = 0\n zone_feats = dict()\n for i in range(6):\n zone_feats['amoun_z' + str(i+1)] = 0\n zone_feats['sumarea_z' + str(i+1)] = 0\n enh_comps_after_dil = dict()\n for i in range(20):\n enh_comps_after_dil['comps_dil'+str(i)] = 0\n enh_comps_after_dil['comps_dil_l'+str(i)] = 0\n enh_comps_after_dil['comps_dil_r'+str(i)] = 0\n ans = dict((('areas', 0), ('amoun', 0), \n ('mean', 0), ('std', 0), ('median', 0), \n ('mean_not_min', 0), \n ('median_not_min', 0), \n ('modes', [0] * 9)))\n ans.update(max_areas)\n ans.update(enh_comps_after_dil)\n ans.update(mask_features(mask,sp_mask))\n ans.update(zone_feats)\n return ans\n \n enh_amoun = enh_areas[enh_areas > 0].shape[0]\n enh_amoun_l = enh_l_areas[enh_l_areas > 0].shape[0]\n enh_amoun_r = enh_r_areas[enh_r_areas > 0].shape[0]\n enh_amoun_zones = [x[x > 0].shape[0] for x in enh_areas_zones]\n enh_area_sum_zones = [x[x > 0].sum() for x in enh_areas_zones]\n \n zone_feats = dict()\n for i in range(6):\n zone_feats['amoun_z' + str(i+1)] = enh_amoun_zones[i]\n zone_feats['sumarea_z' + str(i+1)] = enh_area_sum_zones[i]\n \n enh_mean = mean(enh_areas)\n enh_std = std(enh_areas)\n enh_sort_areas = sorted(enh_areas[enh_areas > 0],reverse=True)\n enh_sort_areas_l = sorted(enh_l_areas[enh_l_areas > 0],reverse=True)\n enh_sort_areas_r = sorted(enh_r_areas[enh_r_areas > 0],reverse=True)\n max_areas = dict()\n for i in range(5):\n try:\n max_areas['max'+str(i)] = enh_sort_areas[i]\n except:\n max_areas['max'+str(i)] = 0 \n try:\n max_areas['max_l'+str(i)] = enh_sort_areas_l[i]\n except:\n max_areas['max_l'+str(i)] = 0 \n try:\n max_areas['max_r'+str(i)] = enh_sort_areas_r[i]\n except:\n max_areas['max_l'+str(i)] = 0\n \n enh_median = median(enh_areas)\n enh_mean_not_min = enh_areas[enh_areas != enh_areas.min()].mean()\n enh_median_not_min = median(enh_areas[enh_areas != enh_areas.min()])\n modes = [2, 3, 4, 5, 6, 9, 12, 19, 37, 1e7]\n enh_modes = [sum((enh_areas >= modes[i - 1]) \n & (modes[i] > enh_areas))\n for i in range(1, len(modes))]\n \n# print('stats ',time()-prev_time)\n# prev_time = time()\n \n img = enhanced.copy()\n enh_comps_after_dil = dict()\n iter_num = 1\n for i in range(iter_num):\n labeled,label_num = label(img)\n enh_comps_after_dil['comps_dil'+str(i)] = label_num\n enh_comps_after_dil['comps_dil_l'+str(i)] = len(unique(labeled*if_left(mask)))\n enh_comps_after_dil['comps_dil_r'+str(i)] = len(unique(labeled*if_right(mask)))\n img = morphology.binary_dilation(img,structure=ones((5,5,5)))\n labeled,label_num = label(img)\n enh_comps_after_dil['comps_dil'+str(iter_num)] = label_num\n enh_comps_after_dil['comps_dil_l'+str(iter_num)] = len(unique(labeled*if_left(mask)))\n enh_comps_after_dil['comps_dil_r'+str(iter_num)] = len(unique(labeled*if_right(mask)))\n\n# print('dil ',time()-prev_time)\n# prev_time = time()\n \n \n ans = dict((('areas', sum(enh_areas)), ('amoun', enh_amoun), \n ('mean', enh_mean), ('std', enh_std), ('median', enh_median), \n ('mean_not_min', enh_mean_not_min), \n ('median_not_min', enh_median_not_min),\n ('modes', enh_modes)))\n ans.update(max_areas)\n ans.update(enh_comps_after_dil)\n ans.update(mask_features(mask,sp_mask))\n ans.update(zone_feats)\n\n# print('mask_feats ',time()-prev_time)\n# prev_time = time()\n \n return ans\n\n\n# In[9]:\n\ndef operate(path):\n try:\n enhanced = load(join(PATH['STAGE_ENHANCED'], \n path + '.npy'))\n mask = load(join(PATH['STAGE_MASKS'], \n path + '.npy'))\n\n zoomfactor = [w / float(f) for w, f in zip(enhanced.shape, mask.shape)]\n mask = interpolation.zoom(mask, zoom=zoomfactor, order = 0, mode = 'nearest')\n isolated, vascular, plural = label_nodules(enhanced)\n sp_mask = split_mask(mask)\n save(join(PATH['STAGE_MASKS'], path), merge(enhanced,mask))\n return (path, collect_stats(isolated,mask,sp_mask)),\\\n (path, collect_stats(vascular,mask,sp_mask)),\\\n (path, collect_stats(plural,mask,sp_mask))\n except:\n pass\n return ((path, None), (path, None), (path, None))\n\n\n# In[ ]:\n\npatients = set([basename(path)[:32] for path in glob(join(PATH['STAGE_ENHANCED'], '*'))])\npatients = patients.difference(pickle.load(open(join(PATH['STAGE_MASKS'], 'still_erroneus_ncrash'), 'rb')))\nstats = list()\nCPU = 8\n#print('Start. ', len(patients))\nwith Pool(CPU) as pool:\n stats = pool.map(operate, list(patients))\n \n#print('Done.')\npath = join(PATH['STAGE_MASKS'], 'DATAFRAMES')\npickle.dump(stats, open(join(path, 'merged_stats_100'), 'wb'))\n\n"
] | [
[
"scipy.ndimage.interpolation.zoom",
"scipy.ndimage.measurements.label"
]
] |
takkii/Pylean | [
"d51595e2788e946d9a2492bbe7131e4ada19062f"
] | [
"analyze/ruby-dic3_ana.py"
] | [
"from os.path import expanduser\n\nimport dask.dataframe as dd\nimport os\nimport pandas as pd\nfrom pandas import DataFrame\n\n# ------------------------------- KEYWORD -------------------------------------------------------------------------\n\n\nhome = expanduser(\"~\")\n\nd1 = os.path.expanduser(\"~/.cache/dein/repos/github.com/takkii/go_straight/dict/\")\nd2 = os.path.expanduser(\"~/.config/nvim/.cache/dein/repos/github.com/takkii/go_straight/dict/\")\nd3 = os.path.expanduser(\"~/.config/nvim/repos/github.com/takkii/go_straight/dict/\")\n\nif os.path.isdir(d1):\n ruby_method = open(os.path.expanduser(\n \"~/.cache/dein/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelif os.path.isdir(d1):\n ruby_method = open(os.path.expanduser(\n \"~/.config/nvim/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelif os.path.isdir(d3):\n ruby_method = open(os.path.expanduser(\n \"~/.config/nvim/.cache/dein/repos/github.com/takkii/go_straight/dict/ruby_dict\"))\nelse:\n print(\"Please, Check the path of go_straight.\")\n\nindex_ruby = ruby_method.readlines()\nSeri = pd.Series(index_ruby)\nsort_ruby = Seri.sort_index()\ndata_ruby = DataFrame(sort_ruby)\nddf = dd.from_pandas(data=data_ruby, npartitions=1)\ndata = ddf.compute()\nprint(data)\nruby_method.close()\n\n# ------------------------------- KEYWORD -------------------------------------------------------------------------\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] |
zilbermanor/functions | [
"a1ef1411089314b8a264a70077a64ea77ccc0558"
] | [
"sklearn_classifier/sklearn_classifier.py"
] | [
"import json\nimport os\nfrom importlib import import_module\nfrom inspect import getfullargspec, FullArgSpec\nfrom cloudpickle import dump, load\nimport itertools\n\nimport sklearn\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.utils.testing import all_estimators\nfrom sklearn.datasets import make_classification\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom typing import Union, List, Any, Optional\nfrom mlrun.execution import MLClientCtx\nfrom mlrun.datastore import DataItem\nfrom mlrun.artifacts import PlotArtifact\n\nskversion = sklearn.__version__\n\nimport warnings\n\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\n\ndef _gcf_clear(plt):\n \"\"\"Utility to clear matplotlib figure\n\n Run this inside every plot method before calling any matplotlib\n methods\n\n :param plot: matloblib figure object\n \"\"\"\n plt.cla()\n plt.clf()\n plt.close()\n\n\ndef _create_class(pkg_class: str):\n \"\"\"Create a class from a package.module.class string\n\n :param pkg_class: full class location,\n e.g. \"sklearn.model_selection.GroupKFold\"\n \"\"\"\n splits = pkg_class.split(\".\")\n clfclass = splits[-1]\n pkg_module = splits[:-1]\n class_ = getattr(import_module(\".\".join(pkg_module)), clfclass)\n return class_\n\ndef _create_function(pkg_func: list):\n \"\"\"Create a function from a package.module.function string\n\n :param pkg_func: full function location,\n e.g. \"sklearn.feature_selection.f_classif\"\n \"\"\"\n splits = pkg_func.split(\".\")\n pkg_module = \".\".join(splits[:-1])\n cb_fname = splits[-1]\n pkg_module = __import__(pkg_module, fromlist=[cb_fname])\n function_ = getattr(pkg_module, cb_fname)\n return function_\n\ndef get_model_configs(\n my_models: Union[str, List[str]],\n class_key = \"CLASS\",\n fit_key = \"FIT\",\n meta_key = \"META\",\n) -> Union[dict, List[dict]]:\n \"\"\"build sklearn model configuration parameters\n \n Take (full) class name of an scikit-learn model \n and retrieve its `class` and `fit` parameters and\n their default values.\n \n Also returns some useful metadata values for the class\n \"\"\"\n # get a list of all sklearn estimators\n estimators = all_estimators()\n def _get_estimator(pkg_class):\n \"\"\"find a specific class in a list of sklearn estimators\"\"\"\n my_class = pkg_class.split('.')[-1]\n return list(filter(lambda x: x[0] == my_class, estimators))[0]\n\n # find estimators corresponding to my_models list\n my_estimators = []\n my_models = [my_models] if isinstance(my_models, str) else my_models\n for model in my_models:\n estimator_name, estimator_class = _get_estimator(model)\n my_estimators.append((estimator_name, estimator_class))\n\n # get class and fit specs\n estimator_specs = []\n for an_estimator in my_estimators:\n estimator_specs.append((an_estimator[0], # model only name\n getfullargspec(an_estimator[1]), # class params\n getfullargspec(an_estimator[1].fit), # fit params\n an_estimator[1])) # package.module.model\n\n model_configs = []\n\n for estimator in estimator_specs:\n model_json = {class_key: {}, fit_key: {}}\n fit_params = {}\n\n for i, key in enumerate(model_json.keys()):\n f = estimator[i+1]\n args_paired = []\n defs_paired = []\n\n # reverse the args since there are fewer defaults than args\n args = f.args\n args.reverse()\n n_args = len(args)\n\n defs = f.defaults\n if defs is None:\n defs = [defs]\n defs = list(defs)\n defs.reverse()\n n_defs = len(defs)\n\n n_smallest = min(n_args, n_defs)\n n_largest = max(n_args, n_defs)\n\n # build 2 lists that can be concatenated\n for ix in range(n_smallest):\n if args[ix] is not \"self\":\n args_paired.append(args[ix])\n defs_paired.append(defs[ix])\n\n for ix in range(n_smallest, n_largest):\n if ix is not 0 and args[ix] is not \"self\":\n args_paired.append(args[ix])\n defs_paired.append(None)\n # concatenate lists into appropriate structure\n model_json[key] = dict(zip(reversed(args_paired), reversed(defs_paired)))\n\n model_json[meta_key] = {}\n model_json[meta_key]['sklearn_version'] = skversion\n model_json[meta_key]['class'] = '.'.join([estimator[3].__module__, estimator[0]])\n model_configs.append(model_json)\n if len(model_configs) == 1:\n # do we want to log this modified model as an artifact?\n return model_configs[0]\n else:\n # do we want to log this modified model as an artifact?\n return model_configs\n\ndef update_model_config(\n config: dict,\n new_class: dict,\n new_fit: dict,\n class_key: str = \"CLASS\",\n fit_key: str = \"FIT\"\n):\n \"\"\"Update model config json\n \n This function is essential since there are modifications in class\n and fit params that must be made (callbacks are a good example, without\n which there is no training history available)\n \n TODO: currently a model config contains 2 keys, but this will likely\n expand to include other functions beyond class and fit. So need to expand \n this to a list of Tuple(str, dict), where `str` corresponds to a key\n in the model config and `dict` contains the params and their new values.\n \n :param config: original model definition containing 2 keys, CLASS and FIT\n :param new_class: new class key-values\n :param new_fit: new fit key-values\n \"\"\"\n config[class_key].update(new_class)\n config[fit_key].update(new_fit)\n \n return config\n\ndef train_model(\n context: MLClientCtx,\n model_pkg_class: str,\n data_key: Union[DataItem, str],\n sample: int,\n label_column: str,\n model_key: str = \"model\",\n test_size: float = 0.05,\n train_val_split: float = 0.75,\n test_set_key: str = \"test_set\",\n rng: int = 1,\n models_dir: str = \"models\",\n plots_dir: str = \"plots\",\n score_method: str = \"micro\",\n class_params_updates: Union[DataItem, dict] = {},\n fit_params_updates: Union[DataItem, dict] = {},\n) -> None:\n \"\"\"train a classifier.\n\n :param context: the function context\n :param model_pkg_class: the model to train, e.g, 'sklearn.neural_networks.MLPClassifier'\n :param data_key: (\"raw\") name of raw data file\n :param sample: Selects the first n rows, or select a sample\n starting from the first. If negative <-1, select\n a random sample\n :param label_column: ground-truth (y) labels\n :param model_key: ('model') name of model in artifact store,\n points to a directory\n :param test_size: (0.05) test set size\n :param train_val_split: (0.75) Once the test set has been removed the\n training set gets this proportion.\n :param test_set_key: store the test data set under this key in the\n artifact store\n :param rng: (1) sklearn rng seed\n :param models_dir: models subfolder on artifact path\n :param plots_dir: plot subfolder on artifact path\n :param score_method: for multiclass classification\n :param class_updates: update these scikit-learn classifier params,\n input as a dict\n :param fit_updates: update scikit-learn fit parameters, input as\n a dict.\n \"\"\"\n # extract file name from DataItem\n srcfilepath = str(data_key)\n \n # TODO: this should be part of data's metadata dealt with in another step get a data set, sample, etc...\n # get all data or a sample\n if (sample == -1) or (sample >= 1):\n # get all rows, or contiguous sample starting at row 1.\n raw = pq.read_table(srcfilepath).to_pandas().dropna()\n labels = raw.pop(label_column)\n raw = raw.iloc[:sample, :]\n labels = labels.iloc[:sample]\n else:\n # grab a random sample\n raw = pq.read_table(srcfilepath).to_pandas().dropna().sample(sample * -1)\n labels = raw.pop(label_column)\n\n # TODO: this should be part of data's metadata dealt with in another step\n context.header = raw.columns.values\n \n # TODO: all of this should be part of a spitter component that does cv too, dealt with in another step\n # make a hot encode copy of labels before the split\n yb = label_binarize(labels, classes=list(range(raw.shape[1])))\n # double split to generate 3 data sets: train, validation and test\n # with xtest,ytest set aside\n x, xtest, y, ytest = train_test_split(np.concatenate([raw, yb], axis=0), labels, test_size=test_size, random_state=rng)\n xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, train_size=train_val_split, random_state=rng)\n # extract the hot_encoded labels\n ytrainb = xtrain[:, -yb.shape[1]:].copy()\n xtrain = xtrain[:, :-yb.shape[1]].copy()\n # extract the hot_encoded labels\n yvalidb = xvalid[:, -yb.shape[1]:].copy()\n xvalid = xvalid[:, :-yb.shape[1]].copy()\n # extract the hot_encoded labels\n ytestb = xtest[:, -yb.shape[1]:].copy()\n xtest = xtest[:, :-yb.shape[1]].copy() \n # set-aside test_set\n test_set = pd.concat(\n [pd.DataFrame(data=xtest, columns=context.header),\n pd.DataFrame(data=ytest, columns=[label_column]),\n pd.DataFrame(data=ytestb, columns=[label_column])],\n axis=1,)\n filepath = os.path.join(base_path, test_set_key + \".pqt\")\n test_set.to_parquet(filepath, index=False)\n context.log_artifact(test_set_key, local_path=test_set_key + \".pqt\")\n\n # load the model config\n model_config = get_model_configs(model_pkg_class)\n # get update params if any\n if isinstance(class_params_updates, DataItem):\n class_params_updates = json.loads(class_params_updates.get())\n if isinstance(fit_params_updates, DataItem):\n fit_params_updates = json.loads(fit_params_updates.get())\n # update the parameters \n # add data to fit params\n fit_params_updates.update({'X': xtrain,'y': ytrain})\n model_config = update_model_config(model_config, class_params_update, fit_params_updates)\n\n # create class and fit\n ClassifierClass = _create_class(model_config[\"META\"][\"class\"])\n model = ClassifierClass(**class_params)\n model.fit(**fit_params)\n\n # save model\n filepath = os.path.join(base_path, f\"{models_dir}/{model_key}.pkl\")\n dump(model, open(filepath, \"wb\"))\n context.log_artifact(model_key, local_path=models_dir)\n\n # compute validation metrics\n ypred = model.predict(xvalid)\n y_score = model.predict_proba(xvalid)\n\n average_precision = average_precision_score(yvalidb,\n y_score,\n average=score_method)\n\n context.log_result(f\"accuracy\", float(model.score(xvalid, yvalid)))\n context.log_result(f\"rocauc\", roc_auc_score(yvalidb, y_score))\n context.log_result(f\"f1_score\", f1_score(yvalid, ypred,\n average=score_method))\n context.log_result(f\"avg_precscore\", average_precision)\n\n # validation plots\n \n plot_roc(context, yvalidb, y_score)\n plot_confusion_matrix(context, yvalid, ypred, key=\"confusion\", fmt=\"png\")\n\ndef plot_roc(\n context,\n y_labels,\n y_probs,\n key=\"roc\",\n plots_dir: str = \"plots\",\n fmt=\"png\",\n x_label: str = \"false positive rate\",\n y_label: str = \"true positive rate\",\n title: str = \"roc curve\",\n legend_loc: str = \"best\"\n):\n \"\"\"plot roc curves\n \n TODO: add averaging method (as string) that was used to create probs, \n display in legend\n \n :param context: the function context\n :param y_labels: ground truth labels, hot encoded for multiclass \n :param y_probs: model prediction probabilities\n :param key: (\"roc\") key of plot in artifact store\n :param plots_dir: (\"plots\") destination folder relative path to artifact path\n :param fmt: (\"png\") plot format\n :param x_label: (\"false positive rate\") x-axis labels\n :param y_label: (\"true positive rate\") y-axis labels\n :param title: (\"roc curve\") title of plot\n :param legend_loc: (\"best\") location of plot legend\n \"\"\"\n # don't bother if this doesn't work\n assert y_probs.shape == y_labels.shape\n \n # clear matplotlib current figure\n _gcf_clear(plt)\n \n # data accummulators by class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n \n # draw 45 degree line\n plt.plot([0, 1], [0, 1], \"k--\")\n \n # labelling\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n plt.legend(loc=legend_loc)\n \n # single ROC or mutliple\n for i in range(y_labels.shape[1]):\n fpr[i], tpr[i], _ = metrics.roc_curve(y_labels[:, i], y_probs[:, i], pos_label=1)\n roc_auc[i] = metrics.auc(fpr[i], tpr[i])\n plt.plot(fpr[i], tpr[i], label=f\"class {i}\")\n\n fname = f\"{plots_dir}/{key}.{fmt}\"\n plt.savefig(os.path.join(context.artifact_path, fname))\n context.log_artifact(PlotArtifact(key, body=plt.gcf()), local_path=fname)\n \n\ndef plot_confusion_matrix(\n context: MLClientCtx,\n labels,\n predictions,\n key: str = \"confusion_matrix\",\n plots_dir: str = \"plots\",\n colormap: str = \"Blues\",\n fmt: str = \"png\",\n sample_weight=None\n):\n \"\"\"Create a confusion matrix.\n Plot and save a confusion matrix using test data from a\n modelline step.\n \n See https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\n \n TODO: fix label alignment\n TODO: consider using another packaged version\n TODO: refactor to take params dict for plot options\n\n :param context: function context\n :param labels: validation data ground-truth labels\n :param predictions: validation data predictions\n :param key: str\n :param plots_dir: relative path of plots in artifact store\n :param colormap: colourmap for confusion matrix\n :param fmt: plot format\n :param sample_weight: sample weights\n \"\"\"\n _gcf_clear(plt)\n \n cm = metrics.confusion_matrix(labels, predictions, sample_weight=None)\n sns.heatmap(cm, annot=True, cmap=colormap, square=True)\n\n fig = plt.gcf()\n fname = f\"{plots_dir}/{key}.{fmt}\"\n fig.savefig(os.path.join(context.artifact_path, fname))\n context.log_artifact(PlotArtifact(key, body=fig), local_path=fname)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cla",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.gcf",
"sklearn.metrics.auc",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.model_selection.train_test_split",
"sklearn.utils.testing.all_estimators"
]
] |
ByungKwanLee/AdversarialMemory | [
"d4cccfec4d370f975dffc826346b1a1a28916444"
] | [
"train/train_trade.py"
] | [
"#!/usr/bin/env python\n\n# numpy package\nimport numpy as np\n\n# torch package\nimport torch\nimport torchvision\nfrom torch.nn.functional import cross_entropy, softmax, log_softmax\n\n# basic package\nimport os\nimport sys\nsys.path.append('.')\nimport argparse\nfrom tqdm import tqdm\nfrom datetime import datetime\n\n# custom package\nfrom loader.argument_print import argument_print\nfrom loader.loader import dataset_loader, network_loader, attack_loader\n\n# cudnn enable\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cudnn.enabled = True\n\n# argument parser\nparser = argparse.ArgumentParser(description='Joint Adversarial Defense')\nparser.add_argument('--lr', default=0.01, type=float, help='learning rate')\nparser.add_argument('--steps', default=10, type=int, help='adv. steps')\nparser.add_argument('--eps', required=True, type=float, help='max norm')\nparser.add_argument('--dataset', required=True, type=str, help='dataset name')\nparser.add_argument('--network', required=True, type=str, help='network name')\nparser.add_argument('--data_root', required=True, type=str, help='path to dataset')\nparser.add_argument('--epoch', default=60, type=int, help='epoch number')\nparser.add_argument('--attack', default='pgd', type=str, help='attack type')\nparser.add_argument('--save_dir', default='./experiment', type=str, help='save directory')\nargs = parser.parse_args()\n\n# loading dataset, network, and attack\ntrainloader, testloader = dataset_loader(args)\nnet = network_loader(args, mean=args.mean, std=args.std).cuda()\nattack = attack_loader(args, net)\n\n# Adam Optimizer with KL divergence, and Scheduling Learning rate\noptimizer = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-2)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.2)\n\n# Setting checkpoint date time\ndate_time = datetime.today().strftime(\"%m%d%H%M\")\n\n# checkpoint_name\ncheckpoint_name = 'TRADE_'+args.network+'_'+args.dataset+'_'+date_time+'.pth'\n\n# argument print\nargument_print(args, checkpoint_name)\n\n\ndef train():\n\n # Modeling Adversarial Loss\n for epoch in range(args.epoch):\n\n # train environment\n net.train()\n\n print('\\n\\n[TRADE/Epoch] : {}'.format(epoch+1))\n\n total_cross_loss = 0\n correct = 0\n total = 0\n\n for batch_idx, (inputs, targets) in enumerate(tqdm(trainloader)):\n\n # dataloader parsing and generate adversarial examples\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # learning network parameters\n optimizer.zero_grad()\n adv_x = attack(inputs, targets) if args.eps != 0 else inputs\n cross_entropy_loss = cross_entropy(net(adv_x), targets)-(softmax(net(inputs).detach(),dim=1)*log_softmax(net(adv_x),dim=1)).sum(dim=1).mean()\n cross_entropy_loss.backward()\n optimizer.step()\n\n # validation\n pred = torch.max(net(adv_x).detach(), dim=1)[1]\n correct += torch.sum(pred.eq(targets)).item()\n total += targets.numel()\n\n # logging two types loss and total loss\n total_cross_loss += cross_entropy_loss.item()\n\n if batch_idx % 50 == 0 and batch_idx != 0:\n print('[TRADE/Train] Iter: {}, Acc: {:.3f}, CE: {:.3f}'.format(\n batch_idx, # Iter\n 100.*correct / total, # Acc\n total_cross_loss / (batch_idx+1) # CrossEntropy\n )\n )\n\n # Scheduling learning rate by stepLR\n scheduler.step()\n\n # Adversarial validation\n adversarial_test()\n\n # Save checkpoint file\n torch.save({\n 'epoch': epoch+1,\n 'model_state_dict': net.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'total_cross_entropy_loss' : total_cross_loss / (batch_idx+1)\n }, os.path.join(args.save_dir,checkpoint_name))\n\n # argument print\n argument_print(args, checkpoint_name)\n\n\ndef adversarial_test():\n\n correct = 0\n total = 0\n\n print('\\n\\n[TRADE/Test] Under Testing ... Wait PLZ')\n for batch_idx, (inputs, targets) in enumerate(tqdm(testloader)):\n\n # dataloader parsing and generate adversarial examples\n inputs, targets = inputs.cuda(), targets.cuda()\n adv_x = attack(inputs, targets) if args.eps != 0 else inputs\n\n # Evaluation\n outputs = net(adv_x).detach()\n\n # Test\n pred = torch.max(outputs, dim=1)[1]\n correct += torch.sum(pred.eq(targets)).item()\n total += targets.numel()\n\n print('[TRADE/Test] Acc: {:.3f}'.format(100.*correct / total))\n\n\nif __name__ == \"__main__\":\n train()"
] | [
[
"torch.optim.lr_scheduler.StepLR",
"torch.max"
]
] |
ShenDezhou/CAIL | [
"c4cfa98ab4ecedbce34a7a5a186830486047540c",
"c4cfa98ab4ecedbce34a7a5a186830486047540c"
] | [
"CAIL2021/slsb/selftest.py",
"CAIL2020/lawsplit/torch_server.py"
] | [
"import json\n\nimport pandas\nimport urllib3\nfrom classmerge import match\nfrom dataclean import cleanall\n\ndf = pandas.read_csv(\"dataset/valid-phase1.csv\")\nhttp = urllib3.PoolManager()\ncorrect = 0\nfor index, row in df.iterrows():\n label = row[0]\n title = row[1].replace(\".doc\",\"\").replace(\".docx\",\"\")\n content = cleanall(row[2])\n url = \"http://192.168.0.161:58080/z?1={}&2={}\".format(title, content)\n print(url)\n if len(url) > 9999:\n url = url[:9999]\n result = http.request('GET', url)\n result = json.loads(result.data)\n print(label, result['answer'][0])\n df.at[index, 'type1'] = result['answer'][0]\n df.at[index, 'title'] = title\n df.at[index, 'content'] = content\n if match(result['answer'][0], label):\n correct +=1\ndf.to_csv(\"eval/test-bert.csv\", index=False)\nprint('ACCURACY:{}%'.format(correct*100.0/len(df)))",
"import argparse\nimport logging\nimport time\n\nimport falcon\n\nfrom falcon_cors import CORS\nimport json\nimport waitress\n\nimport re\n\nimport pandas\nlogging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')\nlogger = logging.getLogger()\ncors_allow_all = CORS(allow_all_origins=True,\n allow_origins_list=['*'],\n allow_all_headers=True,\n allow_all_methods=True,\n allow_credentials_all_origins=True\n )\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '-p', '--port', default=58004,\n help='falcon server port')\n# parser.add_argument(\n# '-c', '--config_file', default='config/bert_config_l.json',\n# help='model config file')\nargs = parser.parse_args()\n# model_config=args.config_file\n#\n# MODEL_MAP = {\n# 'bert': BertForClassification,\n# 'cnn': CharCNN\n# }\n\n\nclass TorchResource:\n\n def __init__(self):\n logger.info(\"...\")\n\n self.rule = ' +第([^条]{1,7})条 (.*)'\n self.chapter = '第[一二三四五六七八九十]{1,3}分?[章编]'\n self.pattern = re.compile(self.rule)\n self.chapter_pattern = re.compile(self.chapter)\n\n self.FORMAL_DIGIT = \"零一二三四五六七八九\"\n self.math_digit = \"0123456789\"\n logger.info(\"###\")\n\n def format2digit(self, word):\n trans = \"\"\n if word.startswith('十'):\n trans += '1'\n\n for c in word:\n if c in self.FORMAL_DIGIT:\n trans += self.math_digit[self.FORMAL_DIGIT.index(c)]\n if c == '千' and not word.endswith('千'):\n if '百' not in word and '十' not in word:\n trans += \"0\"\n if word.endswith(c):\n if c == \"十\":\n trans += '0'\n if c == \"百\":\n trans += '00'\n if c == \"千\":\n trans += '000'\n return trans\n\n def split(self, content):\n # logger.info('1:{}, 2:{}'.format(title, content))\n\n df = pandas.DataFrame()\n f = content.split('\\n')\n buffer = []\n digit = 0\n for line in f:\n match = re.search(self.pattern, line)\n if match:\n # output\n article_digit = self.format2digit(match.group(1))\n if digit:\n tup = (str(int(article_digit) - 1), r\"\\n\".join(buffer))\n buffer = []\n dic = dict(zip(('id', 'desc'), tup))\n df = df.append(dic, ignore_index=True)\n buffer.append(line.strip())\n digit += 1\n else:\n if self.chapter_pattern.search(line):\n context = line.strip().split(' ')[-1]\n else:\n buffer.append(line.strip())\n # last\n if buffer:\n tup = (article_digit, r\"\\n\".join(buffer))\n dic = dict(zip(('id', 'desc'), tup))\n df = df.append(dic, ignore_index=True)\n filename = \"data/{}.csv\".format(time.time())\n df.to_csv(filename, columns=['id', 'desc'], index=False)\n tuple = {'id':df['id'].to_list(), 'desc':df['desc'].to_list()}\n return tuple\n\n\n def on_get(self, req, resp):\n logger.info(\"...\")\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials','true')\n # title = req.get_param('1', True)\n content = req.get_param('1', True)\n # clean_title = shortenlines(title)\n # clean_content = cleanall(content)\n resp.media = self.split(content)\n logger.info(\"###\")\n\n\n def on_post(self, req, resp):\n \"\"\"Handles POST requests\"\"\"\n resp.set_header('Access-Control-Allow-Origin', '*')\n resp.set_header('Access-Control-Allow-Methods', '*')\n resp.set_header('Access-Control-Allow-Headers', '*')\n resp.set_header('Access-Control-Allow-Credentials', 'true')\n resp.set_header(\"Cache-Control\", \"no-cache\")\n data = req.stream.read(req.content_length)\n jsondata = json.loads(data)\n # clean_title = shortenlines(jsondata['title'])\n # clean_content = self.split((jsondata['content'])\n resp.media = self.split(jsondata['content'])\n\nif __name__==\"__main__\":\n api = falcon.API(middleware=[cors_allow_all.middleware])\n api.req_options.auto_parse_form_urlencoded = True\n api.add_route('/z', TorchResource())\n waitress.serve(api, port=args.port, threads=48, url_scheme='http')\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.DataFrame"
]
] |
natetsang/open-rl | [
"426723d0d6759672ce77e02afeb55cbeb68fcfb0"
] | [
"openrl/algorithms/imitation/imitation_learning.py"
] | [
"import gym\nimport time\nimport pickle\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom typing import Callable, Union, Tuple, List\nfrom models.models import actor_fc_discrete_network, actor_critic_fc_discrete_network\nfrom algorithms.imitation.utils import plot_training_results\nfrom util.replay_buffer import ReplayBuffer\n\n\n# Set up\nGAMMA = 0.99\nLEARNING_RATE = 0.0001\n\n\nclass ImitationAgent:\n def __init__(self,\n environment: gym.Env,\n model_fn: Callable[..., tf.keras.Model],\n optimizer: tf.keras.optimizers,\n run_dagger: bool,\n expert_policy,\n expert_data_path,\n replay_buffer: ReplayBuffer,\n model_kwargs: dict = None,\n train_kwargs: dict = None,\n save_dir: str = None) -> None:\n # Env vars\n self.env = environment\n self.state_dims = model_kwargs.get('state_dims')\n self.num_actions = model_kwargs.get('num_actions')\n\n num_hidden_layers = model_kwargs.get(\"num_hidden_layers\")\n hidden_size = model_kwargs.get(\"hidden_size\")\n\n # Algorithm\n self.run_dagger = run_dagger\n\n # Expert\n self.expert_policy = expert_policy\n self.expert_data = ImitationAgent.load_expert_data(expert_data_path)\n\n # Actor model\n self.model = model_fn(state_dims=self.state_dims,\n num_actions=self.num_actions,\n num_hidden_layers=num_hidden_layers,\n hidden_size=hidden_size)\n\n self.optimizer = optimizer\n self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Discrete action space only\n\n # Replay buffer\n self.replay_buffer = replay_buffer\n\n # Training vars\n self.cur_episode = 0\n self.total_steps = 0\n self.max_ep_len = train_kwargs.get(\"max_ep_len\")\n self.batch_size = train_kwargs.get(\"batch_size\") # Batch size of data collection from buffer\n self.train_batch_size = train_kwargs.get('train_batch_size') # Batch size for training models\n self.eval_batch_size = train_kwargs.get('eval_batch_size') # Batch size for eval\n self.num_agent_train_steps_per_iter = train_kwargs.get('num_agent_train_steps_per_iter') # Grad updates per run\n\n # Save directories\n self.save_dir = save_dir\n\n def save_models(self) -> None:\n self.model.save(self.save_dir)\n\n def load_models(self) -> tf.keras.Model:\n self.model = tf.keras.models.load_model(self.save_dir)\n return self.model\n\n @staticmethod\n def load_expert_data(path):\n with open(path, 'rb') as f:\n expert_data = pickle.load(f)\n return expert_data\n\n def sample_random_trajectory(self) -> Tuple[List[Tuple], Union[int, float]]:\n \"\"\"\n Sample 1 trajectory.\n\n :param max_path_length: the maximum number of steps to take in the trajectory\n :param random: whether or not to sample actions randomly or using MPC\n :return:\n \"\"\"\n state = tf.expand_dims(tf.convert_to_tensor(self.env.reset()), 0)\n num_steps = 0\n total_rewards = 0\n transitions = [] # transition tuples (s,a,r,s',d)\n while True:\n num_steps += 1\n action_prob = self.model(state)\n action = np.random.choice(self.num_actions, p=np.squeeze(action_prob))\n next_state, reward, done, _ = self.env.step(action)\n next_state = tf.reshape(next_state, [1, self.state_dims])\n\n total_rewards += reward\n\n if done or num_steps > self.max_ep_len:\n transitions.append((state, action, reward, next_state, 1))\n break\n\n transitions.append((state, action, reward, next_state, 0))\n state = next_state\n\n return transitions, total_rewards\n\n def sample_n_trajectories(self) -> Tuple[List, List, int]:\n \"\"\"\n Sample `self.batch_size` trajectories. Each trajectory should be no longer than\n `max_path_length` steps/transitions. Note that transitions are different than trajectories!\n A transition is a tuple (s,a,r,s',d) and a trajectory is made up of 1 to `max_path_length` transitions.\n\n :param batch_size: The number of transitions to sample.\n :param max_path_length: The maximum steps/transitions per trajectory\n :param random: Boolean to indicate whether or not to sample actions randomly or via MPC\n :return:\n \"\"\"\n num_steps_this_batch = 0\n trajectory_rewards = []\n transitions = []\n while num_steps_this_batch < self.batch_size:\n traj, rews = self.sample_random_trajectory()\n num_steps_this_batch += len(traj)\n trajectory_rewards.append(rews)\n # Note that we're extending, not appending, because we don't care about trajectories, we care about\n # the transitions. If we appended, it would be ([[(tran 1), (tran 2)], ..., [(tran n), (tran n+1)]],\n # where each sublist is a trajectory. But by extending, it's instead ([(tran 1), ..., (tran n)]\n transitions.extend(traj)\n return transitions, trajectory_rewards, num_steps_this_batch\n\n def relabel_actions_with_expert(self, transitions: List[Tuple]) -> List[Tuple]:\n \"\"\"\n Given a batch of transition tuples, query the Expert Policy and update the action based on\n the Expert. This is the key difference between vanilla behavioral cloning and DAgger. This\n step is equivalent to asking a human expert to label our dataset with actions the correct actions.\n \"\"\"\n updated_transitions = []\n for transition in transitions:\n state, action, reward, next_state, done = transition\n action_prob, _ = self.expert_policy(state)\n expert_action = np.argmax(np.squeeze(action_prob))\n updated_transitions.append((state, expert_action, reward, next_state, done))\n return updated_transitions\n\n def train_episode(self) -> List:\n # Step 1: Sample trajectories\n if self.cur_episode == 0:\n # Load expert_data\n transitions = self.expert_data\n else:\n # Or sample trajectories using current policy\n transitions, _, _ = self.sample_n_trajectories()\n\n # Step 2: For DAgger only, ask expert policy to label data with actions\n if self.run_dagger and self.cur_episode > 0:\n transitions = self.relabel_actions_with_expert(transitions)\n\n # Step 3: Store the sampled transitions in the replay buffer\n self.replay_buffer.store_transitions_batch(transitions)\n\n # Step 4: Train model!\n losses = []\n for train_step in range(self.num_agent_train_steps_per_iter):\n # Sample a random batch of data from the replay buffer\n states, actions, _, _, _ = self.replay_buffer.sample(batch_size=self.train_batch_size)\n\n with tf.GradientTape() as tape:\n action_prob = self.model(states)\n loss = self.loss_fn(actions, action_prob)\n grads = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))\n\n losses.append(loss)\n self.cur_episode += 1\n return losses\n\n def run_agent(self, render=False) -> Tuple[float, int]:\n total_reward, total_steps = 0, 0\n state = self.env.reset()\n done = False\n\n while not done:\n if render:\n self.env.render()\n\n # Select action\n action_prob = self.model(tf.expand_dims(state, axis=0))\n action = np.argmax(np.squeeze(action_prob))\n\n # Interact with environment\n state, reward, done, _ = self.env.step(action)\n\n # Bookkeeping\n total_reward += reward\n total_steps += 1\n return total_reward, total_steps\n\n\ndef main() -> None:\n # Check input params\n if args.run_dagger:\n assert args.epochs > 1, \"DAgger needs more than 1 iteration of training, where each iter\" \\\n \"we query the expert and train\"\n else:\n assert args.epochs == 1, \"Vanilla behavior cloning collects expert data only once and does traditional\" \\\n \"supervised learning on that dataset.\"\n\n # Create environment\n env = gym.make(args.env)\n\n # Set seeds\n if args.seed:\n np.random.seed(args.seed)\n tf.random.set_seed(args.seed)\n env.seed(args.seed)\n\n # Create helper vars for model creation\n _state_dims = len(env.observation_space.high)\n _action_dims = 1\n _num_actions = env.action_space.n\n\n # Create Replay Buffer\n buffer = ReplayBuffer(state_dims=_state_dims, action_dims=_action_dims)\n\n # Instantiate optimizer\n opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)\n\n # Instantiate expert policy from file\n # TODO >> I think it's a bit cleaner to load the entire model instead of just the weights\n # but I'm getting a TF error that I think was fixed in a later version. I should probably\n # try updating the version and seeing if it fixes itself.\n expert = actor_critic_fc_discrete_network(state_dims=_state_dims,\n num_actions=_num_actions,\n num_hidden_layers=2,\n hidden_size=128)\n expert.load_weights(args.expert_policy_file)\n\n # Create agent\n agent = ImitationAgent(environment=env,\n model_fn=actor_fc_discrete_network,\n optimizer=opt,\n replay_buffer=buffer,\n run_dagger=args.run_dagger,\n expert_policy=expert,\n expert_data_path=args.expert_data,\n model_kwargs=dict(state_dims=_state_dims,\n num_actions=_num_actions,\n num_hidden_layers=2,\n hidden_size=256),\n train_kwargs=dict(max_ep_len=args.max_ep_len,\n batch_size=args.batch_size,\n train_batch_size=args.train_batch_size,\n eval_batch_size=args.eval_batch_size,\n num_agent_train_steps_per_iter=args.num_agent_train_steps_per_iter)\n )\n\n # Run training\n ep_mean_rewards_history, ep_max_rewards_history, ep_min_rewards_history = [], [], []\n ep_mean_loss_history, ep_max_loss_history, ep_min_loss_history = [], [], []\n ep_steps_history = []\n ep_wallclock_history = []\n start = time.time()\n for e in range(args.epochs):\n # Run one episode\n ep_loss = agent.train_episode()\n ep_rew, ep_steps = agent.run_agent()\n\n # Prepare for logging\n mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew = np.mean(ep_rew), np.max(ep_rew), np.min(ep_rew), np.std(ep_rew)\n mean_ep_loss, max_ep_loss, min_ep_loss = np.mean(ep_loss), np.max(ep_loss), np.min(ep_loss)\n ep_wallclock_history.append(time.time() - start)\n\n ep_mean_rewards_history.append(mean_ep_rew)\n ep_max_rewards_history.append(max_ep_rew)\n ep_min_rewards_history.append(min_ep_rew)\n\n ep_mean_loss_history.append(mean_ep_loss)\n ep_max_loss_history.append(max_ep_loss)\n ep_min_loss_history.append(min_ep_loss)\n\n ep_steps_history.append(ep_steps)\n\n template = \"EPISODE {} | mean ep reward: {:.2f} - max ep reward: {:.2f}\" \\\n \" - min ep reward: {:.2f} - std ep reward: {:.2f} - mean ep loss {:.2f}\"\n print(template.format(e, mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew, mean_ep_loss))\n\n # Now that we've completed training, let's plot the results\n print(f\"Training time elapsed (sec): {round(time.time() - start, 2)}\")\n\n # Let's evaluate the performance of the trained agent\n print(\"Beginning evaluation of trained agent!\")\n eval_rew = []\n for i in range(50):\n ep_rew, ep_steps = agent.run_agent()\n eval_rew.append(ep_rew)\n print(f\"Evaluation rewards: mean - {np.mean(eval_rew)} | min - {np.min(eval_rew)} | max - {np.max(eval_rew)}\")\n\n # Plot summary of results\n plot_training_results(mean_rewards_history=ep_mean_rewards_history,\n max_rew_history=ep_max_rewards_history,\n min_rew_history=ep_min_rewards_history,\n mean_loss_history=ep_mean_loss_history,\n max_loss_history=ep_max_loss_history,\n min_loss_history=ep_min_loss_history,\n steps_history=ep_steps_history,\n wallclock_history=ep_wallclock_history,\n save_dir=\"./results.png\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=1)\n parser.add_argument(\"--env\", type=str, default=\"CartPole-v0\")\n parser.add_argument('--expert_policy_file', type=str, default='./checkpoints/expert_model_weights')\n parser.add_argument('--expert_data', type=str, default='expert_data.pkl')\n # parser.add_argument(\"--run_dagger\", action=\"store_false\")\n parser.add_argument(\"--run_dagger\", type=bool, default=False)\n parser.add_argument(\"--epochs\", type=int, default=1)\n parser.add_argument('--max_ep_len', type=int, default=100) # max trajectory length\n\n parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=20) # number of grad updates per iter\n parser.add_argument('--batch_size', type=int, default=1000) # num steps/transitions to sample for itr 1+\n parser.add_argument('--train_batch_size', type=int, default=512) # training batch size per model\n parser.add_argument('--eval_batch_size', type=int, default=400) # steps collected per eval iteration\n args = parser.parse_args()\n\n main()\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.models.load_model",
"tensorflow.reshape",
"numpy.squeeze",
"numpy.random.seed",
"tensorflow.expand_dims",
"tensorflow.GradientTape",
"numpy.max",
"numpy.min",
"numpy.std",
"tensorflow.random.set_seed",
"numpy.mean"
]
] |
solazu/FinRL-Library | [
"6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea"
] | [
"finrl/commands/data_commands.py"
] | [
"import logging\nimport sys\nimport yfinance\nimport pandas as pd\nimport yfinance as yf\nimport os\n\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom typing import Any, Dict, List\n\n\nfrom finrl.config import TimeRange, setup_utils_configuration\nfrom finrl.data.converter import convert_ohlcv_format, convert_trades_format\nfrom finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,\n refresh_backtest_trades_data)\nfrom finrl.exceptions import OperationalException\nfrom finrl.exchange import timeframe_to_minutes\nfrom finrl.resolvers import ExchangeResolver\nfrom finrl.state import RunMode\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef start_download_cryptodata(args: Dict[str, Any]) -> None:\n \"\"\"\n Parameters:\n ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None, \n 'user_data_dir': None, 'pairs': None, 'pairs_file': None, \n 'days': 160, 'timerange': None, \n 'download_trades': False, 'exchange': 'binance', \n 'timeframes': ['1d'], 'erase': False, \n 'dataformat_ohlcv': None, 'dataformat_trades': None}\n \n Returns:\n Json files in user_data/data/exchange/*.json\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n if 'days' in config and 'timerange' in config:\n raise OperationalException(\"--days and --timerange are mutually exclusive. \"\n \"You can only specify one or the other.\")\n timerange = TimeRange()\n if 'days' in config:\n time_since = (datetime.now() - timedelta(days=config['days'])).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n if 'timerange' in config:\n timerange = timerange.parse_timerange(config['timerange'])\n\n # Remove stake-currency to skip checks which are not relevant for datadownload\n config['stake_currency'] = ''\n\n if 'pairs' not in config:\n raise OperationalException(\n \"Downloading data requires a list of pairs. \"\n \"Please check the documentation on how to configure this.\")\n\n logger.info(f\"About to download pairs: {config['pairs']}, \"\n f\"intervals: {config['timeframes']} to {config['datadir']}\")\n\n pairs_not_available: List[str] = []\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)\n # Manual validations of relevant settings\n exchange.validate_pairs(config['pairs'])\n for timeframe in config['timeframes']:\n exchange.validate_timeframes(timeframe)\n\n try:\n\n if config.get('download_trades'):\n pairs_not_available = refresh_backtest_trades_data(\n exchange, pairs=config['pairs'], datadir=config['datadir'],\n timerange=timerange, erase=bool(config.get('erase')),\n data_format=config['dataformat_trades'])\n\n # Convert downloaded trade data to different timeframes\n convert_trades_to_ohlcv(\n pairs=config['pairs'], timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format_ohlcv=config['dataformat_ohlcv'],\n data_format_trades=config['dataformat_trades'],\n )\n else:\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=config['pairs'], timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format=config['dataformat_ohlcv'])\n\n except KeyboardInterrupt:\n sys.exit(\"Interrupt received, aborting ...\")\n\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {exchange.name}.\")\n\ndef start_download_stockdata(args: Dict[str, Any]) -> None:\n \"\"\"Fetches data from Yahoo API\n Parameters\n ----------\n ticker_list, timerange, \n Returns\n -------\n Json of data\n \"\"\"\n args[\"exchange\"] = \"yahoo\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n \n\n if 'days' in config and 'timerange' in config:\n raise OperationalException(\"--days and --timerange are mutually exclusive. \"\n \"You can only specify one or the other.\")\n\n config[\"datadir\"] = \"user_data/data/yahoo\"\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = (datetime.now() - timedelta(days=config['days'])).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n start = datetime.fromtimestamp(timerange.startts).strftime(\"%Y-%m-%d\")\n end = datetime.now().strftime(\"%Y-%m-%d\")\n\n if 'timerange' in config:\n timerange = timerange.parse_timerange(config['timerange'])\n start = datetime.fromtimestamp(timerange.startts).strftime(\"%Y-%m-%d\")\n end = datetime.fromtimestamp(timerange.stopts).strftime(\"%Y-%m-%d\")\n try:\n data_df = pd.DataFrame()\n for tic in config['ticker_list']:\n temp_df = yf.download(tic, start=start, end=end)\n temp_df.columns = [\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adjcp\",\n \"volume\",\n ]\n temp_df[\"close\"] = temp_df[\"adjcp\"]\n temp_df = temp_df.drop([\"adjcp\"], axis=1)\n temp_df.to_json(f'{os.getcwd()}/{config[\"datadir\"]}/{tic}.json')\n except KeyboardInterrupt:\n sys.exit(\"Interrupt received, aborting ...\")\n\n\n\n\n\ndef start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:\n \"\"\"\n Convert data from one format to another\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n if ohlcv:\n convert_ohlcv_format(config,\n convert_from=args['format_from'], convert_to=args['format_to'],\n erase=args['erase'])\n else:\n convert_trades_format(config,\n convert_from=args['format_from'], convert_to=args['format_to'],\n erase=args['erase'])\n\n\ndef start_list_data(args: Dict[str, Any]) -> None:\n \"\"\"\n List available backtest data\n \"\"\"\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n from tabulate import tabulate\n\n from freqtrade.data.history.idatahandler import get_datahandler\n dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])\n\n paircombs = dhc.ohlcv_get_available_data(config['datadir'])\n\n if args['pairs']:\n paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]\n\n print(f\"Found {len(paircombs)} pair / timeframe combinations.\")\n groupedpair = defaultdict(list)\n for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):\n groupedpair[pair].append(timeframe)\n\n if groupedpair:\n print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],\n headers=(\"Pair\", \"Timeframe\"),\n tablefmt='psql', stralign='right'))\n"
] | [
[
"pandas.DataFrame"
]
] |
megodoonch/birdsong | [
"582e7ddecf6c9c1b75f17418097f7bcbf6784d31"
] | [
"surface/misc.py"
] | [
"import numpy as np\n\n# Generate some n number of colours, hopefully maximally different\n\n\n# source: http://stackoverflow.com/questions/470690/how-to-automatically-generate-n-distinct-colors\nimport colorsys\n\ndef get_colors(num_colors):\n colors=[]\n for i in np.arange(0., 360., 360. / num_colors):\n hue = i/360.\n lightness = (20 + np.random.rand() * 10)/100.\n saturation = (90 + np.random.rand() * 10)/100.\n colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))\n return colors\n\n\n\n\ndef cohens_d(x, y):\n lx = len(x)- 1\n ly = len(y)- 1\n md = abs(np.mean(x) - np.mean(y)) ## mean difference (numerator)\n csd = lx * np.var(x) + ly * np.var(y)\n csd = csd/(lx + ly)\n csd = np.sqrt(csd) ## common sd computation\n cd = md/csd ## cohen's d\n return cd\n\n\n\n\n\n\n\ndef get_freqs(lst):\n # Return a list of pairs that correspond to counts of \n # elements: (a,n) means that a appeared n times in the list.\n # The list is ordered by a, whatever order that variable has.\n counts = {}\n for l in lst:\n counts[l] = counts.get(l,0)+1\n\n # Convert to a list of pairs\n pairs = counts.items()\n\n # Order by the first element of each pair\n # pairs = pairs.sort(cmp=lambda (x,na),(y,nb): cmp(x,y))\n\n return pairs\n"
] | [
[
"numpy.var",
"numpy.arange",
"numpy.random.rand",
"numpy.sqrt",
"numpy.mean"
]
] |
saidineshpola/Knowledge-Distillation-Toolkit | [
"b05ebc28ae1385c9caa1c4c1c93db2d67356e85f"
] | [
"utils/fairseq_mod/fairseq_mod/criterions/cross_entropy.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom dataclasses import dataclass\n\nimport torch.nn.functional as F\nfrom fairseq_mod import metrics, utils\nfrom fairseq_mod.criterions import FairseqCriterion, register_criterion\nfrom fairseq_mod.dataclass import FairseqDataclass\nfrom omegaconf import II\n\n\n@dataclass\nclass CrossEntropyCriterionConfig(FairseqDataclass):\n sentence_avg: bool = II(\"params.optimization.sentence_avg\")\n\n\n@register_criterion(\"cross_entropy\", dataclass=CrossEntropyCriterionConfig)\nclass CrossEntropyCriterion(FairseqCriterion):\n def __init__(self, task, sentence_avg):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample[\"net_input\"])\n loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)\n sample_size = (\n sample[\"target\"].size(0) if self.sentence_avg else sample[\"ntokens\"]\n )\n logging_output = {\n \"loss\": loss.data,\n \"ntokens\": sample[\"ntokens\"],\n \"nsentences\": sample[\"target\"].size(0),\n \"sample_size\": sample_size,\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, model, net_output, sample, reduce=True):\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n lprobs = lprobs.view(-1, lprobs.size(-1))\n target = model.get_targets(sample, net_output).view(-1)\n loss = F.nll_loss(\n lprobs,\n target,\n ignore_index=self.padding_idx,\n reduction=\"sum\" if reduce else \"none\",\n )\n return loss, loss\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n if sample_size != ntokens:\n metrics.log_scalar(\n \"nll_loss\", loss_sum / ntokens / math.log(2), ntokens, round=3\n )\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"nll_loss\"].avg)\n )\n else:\n metrics.log_derived(\n \"ppl\", lambda meters: utils.get_perplexity(meters[\"loss\"].avg)\n )\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n"
] | [
[
"torch.nn.functional.nll_loss"
]
] |
henryzxu/pytorch-hsml-rl | [
"3b36f29cf91f3ca68820ea124a2ee7a75327b94f"
] | [
"maml_rl/sampler.py"
] | [
"import gym\nimport torch\nimport multiprocessing as mp\nimport numpy as np\n\nfrom maml_rl.envs.subproc_vec_env import SubprocVecEnv\nfrom maml_rl.episode import BatchEpisodes\n\ndef make_env(env_name):\n def _make_env():\n return gym.make(env_name)\n return _make_env\n\nclass BatchSampler(object):\n def __init__(self, env_name, batch_size, num_workers=mp.cpu_count() - 1):\n self.env_name = env_name\n self.batch_size = batch_size\n self.num_workers = num_workers\n \n self.queue = mp.Queue()\n self.envs = SubprocVecEnv([make_env(env_name) for _ in range(num_workers)],\n queue=self.queue)\n self._env = gym.make(env_name)\n\n def sample(self, policy, task, tree=None, params=None, gamma=0.95, device='cpu'):\n episodes = BatchEpisodes(batch_size=self.batch_size, gamma=gamma, device=device)\n for i in range(self.batch_size):\n self.queue.put(i)\n for _ in range(self.num_workers):\n self.queue.put(None)\n observations, batch_ids = self.envs.reset()\n dones = [False]\n while (not all(dones)) or (not self.queue.empty()):\n with torch.no_grad():\n input = torch.from_numpy(observations).float().to(device=device)\n\n if self.env_name == 'AntPos-v0':\n _, embedding = tree.forward(torch.from_numpy(task[\"position\"]).float().to(device=device))\n if self.env_name == 'AntVel-v1':\n _, embedding = tree.forward(torch.from_numpy(np.array([task[\"velocity\"]])).float().to(device=device))\n\n # print(input.shape)\n # print(embedding.shape)\n observations_tensor = torch.t(\n torch.stack([torch.cat([torch.from_numpy(np.array(teo)).to(device=device), embedding[0]], 0) for teo in input], 1))\n\n actions_tensor = policy(observations_tensor, task=task, params=params, enhanced=False).sample()\n actions = actions_tensor.cpu().numpy()\n new_observations, rewards, dones, new_batch_ids, _ = self.envs.step(actions)\n episodes.append(observations_tensor.cpu().numpy(), actions, rewards, batch_ids)\n observations, batch_ids = new_observations, new_batch_ids\n return episodes\n\n def reset_task(self, task):\n tasks = [task for _ in range(self.num_workers)]\n reset = self.envs.reset_task(tasks)\n return all(reset)\n\n def sample_tasks(self, num_tasks):\n tasks = self._env.unwrapped.sample_tasks(num_tasks)\n return tasks\n"
] | [
[
"numpy.array",
"torch.no_grad",
"torch.from_numpy"
]
] |
moslemk/Theano | [
"8d3a67b73fda49350d9944c9a24fc9660131861c"
] | [
"theano/sandbox/gpuarray/type.py"
] | [
"import numpy\n\nimport theano\nfrom theano.tensor.var import _tensor_py_operators\nfrom theano import Type, Variable, Constant, tensor, config, scalar\nfrom theano.compile import SharedVariable\n\n# Make sure this is importable even if pygpu is absent\n# (it will not work though)\ntry:\n import pygpu\n from pygpu import gpuarray\n from pygpu.elemwise import compare, elemwise2\nexcept ImportError:\n pass\n\n_context_reg = {}\n\n\ndef reg_context(name, ctx):\n \"\"\"\n Register a context by mapping it to a name.\n\n The context must be of type `GpuContext` and the name can be\n anything hashable (but is usually a string). Only one context can\n be registered per name and the second registration for a given\n name will raise an error.\n\n Parameters\n ----------\n name : hashable object\n Name to associate the context with (usually a string)\n ctx : GpuContext\n Context instance\n\n \"\"\"\n if name in _context_reg:\n raise ValueError(\"context name %s is already defined\" % (name,))\n if not isinstance(ctx, gpuarray.GpuContext):\n raise TypeError(\"context is not GpuContext\")\n _context_reg[name] = ctx\n\n\ndef get_context(name):\n \"\"\"\n Retrive the context associated with a name.\n\n Return the context object mapped to `ref` that was previously\n register through :func:`reg_context`. Trying to get the context\n for an unregistered `ref` will raise a exception.\n\n Parameters\n ----------\n name : hashable object\n Name associated with the context we want (usually a string)\n\n \"\"\"\n if name not in _context_reg:\n raise ValueError(\"context name %s not defined\" % (name,))\n return _context_reg[name]\n\n\ndef list_contexts():\n \"\"\"\n Return an iterable of all the registered context names.\n \"\"\"\n return _context_reg.keys()\n\n\n# Private method\ndef _name_for_ctx(ctx):\n for k, v in _context_reg:\n if v == ctx:\n return k\n raise ValueError('context is not registered')\n\n\n# This is a private method for use by the tests only\ndef _unreg_context(name):\n del _context_reg[name]\n\n\nclass GpuArrayType(Type):\n def __init__(self, dtype, broadcastable, context_name=None, name=None):\n # In case this was not provided and no global value is available\n self.dtype = str(dtype)\n self.broadcastable = tuple(bool(b) for b in broadcastable)\n self.ndim = len(self.broadcastable)\n self.name = name\n self.context_name = context_name\n try:\n self.typecode = gpuarray.dtype_to_typecode(self.dtype)\n except gpuarray.GpuArrayException:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def clone(self, dtype=None, broadcastable=None):\n if dtype is None:\n dtype = self.dtype\n if broadcastable is None:\n broadcastable = self.broadcastable\n return self.__class__(dtype=dtype, broadcastable=broadcastable,\n context_name=self.context_name, name=self.name)\n\n # This is a property to keep the type pickleable\n @property\n def context(self):\n return get_context(self.context_name)\n\n def __repr__(self):\n return \"GpuArrayType<%s>(%s, %s)\" % (self.context_name, self.dtype,\n self.broadcastable)\n\n def filter(self, data, strict=False, allow_downcast=None):\n if (isinstance(data, gpuarray.GpuArray) and\n data.typecode == self.typecode):\n # This is just to make this condition not enter the\n # following branches\n pass\n elif strict:\n if not isinstance(data, gpuarray.GpuArray):\n raise TypeError(\"%s expected a GpuArray object.\" % self,\n data, type(data))\n if self.typecode != data.typecode:\n raise TypeError(\"%s expected typecode %d (dtype %s), \"\n \"got %d (dtype %s).\" %\n (self, self.typecode, self.dtype,\n data.typecode, str(data.dtype)))\n if self.context != data.context:\n raise TypeError(\"data context does not match type context\")\n # fallthrough to ndim check\n elif (allow_downcast or\n (allow_downcast is None and\n type(data) == float and\n self.dtype == config.floatX)):\n data = gpuarray.array(data, dtype=self.typecode, copy=False,\n ndmin=len(self.broadcastable),\n context=self.context)\n else:\n if not hasattr(data, 'dtype'):\n # This is to convert objects that don't have a dtype\n # (like lists). We anticipate that the type below\n # will match and we pass copy=False so it won't make a\n # second object on the GPU.\n data = gpuarray.array(data, copy=False, context=self.context)\n\n up_dtype = scalar.upcast(self.dtype, data.dtype)\n if up_dtype == self.dtype:\n data = gpuarray.array(data, dtype=self.dtype, copy=False,\n context=self.context)\n else:\n raise TypeError(\"%s cannot store a value of dtype %s \"\n \"without risking loss of precision.\" %\n (self, data.dtype))\n\n if self.ndim != data.ndim:\n raise TypeError(\"Wrong number of dimensions: expected %s, \"\n \"got %s with shape %s.\" % (self.ndim, data.ndim,\n data.shape), data)\n shp = data.shape\n for i, b in enumerate(self.broadcastable):\n if b and shp[i] != 1:\n raise TypeError(\"Non-unit value on shape on a broadcastable\"\n \" dimension.\", shp, self.broadcastable)\n return data\n\n def filter_variable(self, other, allow_convert=True):\n from theano.sandbox.gpuarray import GpuFromHost\n\n if hasattr(other, '_as_GpuArrayVariable'):\n other = other._as_GpuArrayVariable(self.context_name)\n\n if not isinstance(other, Variable):\n other = self.Constant(type=self, data=other)\n\n if other.type == self:\n return other\n\n if not isinstance(other.type, tensor.TensorType):\n raise TypeError('Incompatible type', (self, other.type))\n if (other.type.dtype != self.dtype):\n raise TypeError('Incompatible dtype', (self.dtype,\n other.type.dtype))\n if other.type.ndim != self.ndim:\n raise TypeError('Incompatible number of dimensions.'\n ' Expected %d, got %d.' % (self.ndim, other.ndim))\n if other.type.broadcastable != self.broadcastable:\n if allow_convert:\n type2 = other.type.clone(broadcastable=self.broadcastable)\n other2 = type2.convert_variable(other)\n else:\n other2 = None\n if other2 is None:\n raise TypeError('Incompatible broadcastable dimensions.'\n ' Expected %s, got %s.' %\n (str(other.type.broadcastable),\n str(self.broadcastable)))\n other = other2\n\n return GpuFromHost(self.context_name)(other)\n\n @staticmethod\n def values_eq(a, b):\n if a.shape != b.shape:\n return False\n if a.typecode != b.typecode:\n return False\n a_eq_b = numpy.asarray(compare(a, '==', b))\n if a_eq_b.all():\n return True\n\n # maybe the trouble is that there are NaNs\n a = numpy.asarray(a)\n b = numpy.asarray(b)\n\n a_missing = numpy.isnan(a)\n if a_missing.any():\n b_missing = numpy.isnan(b)\n return numpy.all(a_eq_b + (a_missing == b_missing))\n else:\n return False\n\n @staticmethod\n def values_eq_approx(a, b,\n allow_remove_inf=False, allow_remove_nan=False,\n rtol=None, atol=None):\n if a.shape != b.shape or a.dtype != b.dtype:\n return False\n if 'int' in str(a.dtype):\n return GpuArrayType.values_eq(a, b)\n else:\n if allow_remove_inf or allow_remove_nan:\n raise NotImplementedError(\n \"GpuArrayType.values_eq_approx() don't implemented the\"\n \" allow_remove_inf and allow_remove_nan parameter\")\n if a.dtype == 'float16' or b.dtype == 'float16':\n an = numpy.asarray(a)\n bn = numpy.asarray(b)\n return tensor.TensorType.values_eq_approx(\n an, bn, allow_remove_inf=allow_remove_inf,\n allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)\n atol_, rtol_ = theano.tensor.basic._get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n res = elemwise2(a, '', b, a, odtype=numpy.dtype('bool'),\n op_tmpl=\"res[i] = (fabs(%%(a)s - %%(b)s) <\"\n \"(%(atol_)s + %(rtol_)s * fabs(%%(b)s)))\" %\n locals())\n ret = numpy.asarray(res).all()\n if ret:\n return True\n # maybe the trouble is that there are NaNs\n an = numpy.asarray(a)\n bn = numpy.asarray(b)\n return tensor.TensorType.values_eq_approx(\n an, bn, allow_remove_inf=allow_remove_inf,\n allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol)\n\n @staticmethod\n def may_share_memory(a, b):\n if (not isinstance(a, gpuarray.GpuArray) or\n not isinstance(b, gpuarray.GpuArray)):\n return False\n return pygpu.gpuarray.may_share_memory(a, b)\n\n def value_zeros(self, shape):\n return pygpu.gpuarray.zeros(shape, dtype=self.typecode,\n context=self.context)\n\n def make_variable(self, name=None):\n return self.Variable(self, name=name)\n\n def __eq__(self, other):\n return (type(self) == type(other) and\n self.typecode == other.typecode and\n self.broadcastable == other.broadcastable and\n self.context_name == other.context_name)\n\n def convert_variable(self, var):\n vt = var.type\n if (type(self) == type(vt) and\n self.typecode == vt.typecode and\n self.ndim == vt.ndim and\n self.context_name == vt.context_name and\n all(sb == ob or ob for sb, ob in zip(self.broadcastable,\n vt.broadcastable))):\n return theano.tensor.patternbroadcast(var, self.broadcastable)\n\n def __hash__(self):\n return hash((type(self), self.typecode, self.broadcastable,\n self.context_name))\n\n def dtype_specs(self):\n \"\"\"\n Return a tuple (python type, c type, numpy typenum) that corresponds\n to self.dtype.\n\n This function is used internally as part of C code generation.\n\n \"\"\"\n # TODO: add more type correspondances for e.g. int32, int64, float32,\n # complex64, etc.\n try:\n return {\n 'float16': (float, 'npy_float16', 'NPY_FLOAT16'),\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\n 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\n 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\n }[self.dtype]\n except KeyError:\n raise TypeError(\"Unsupported dtype for %s: %s\" %\n (self.__class__.__name__, self.dtype))\n\n def get_shape_info(self, obj):\n return obj.shape\n\n def get_size(self, shape_info):\n if shape_info:\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\n else:\n return numpy.dtype(self.dtype).itemsize\n\n def c_declare(self, name, sub, check_input=True):\n return \"\"\"\n PyGpuArrayObject *%(name)s;\n \"\"\" % locals()\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n # TODO I don't check broadcast stuff for now.\n return \"\"\"\n %(name)s = NULL;\n if (py_%(name)s == Py_None) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray, not None\");\n %(fail)s\n }\n /* First check if we are the base type exactly (the most common case),\n then do the full subclass check if needed. */\n if (py_%(name)s->ob_type != &PyGpuArrayType &&\n !PyObject_TypeCheck(py_%(name)s, &PyGpuArrayType)) {\n PyErr_SetString(PyExc_ValueError, \"expected a GpuArray\");\n %(fail)s\n }\n %(name)s = (PyGpuArrayObject *)py_%(name)s;\n Py_INCREF(%(name)s);\n \"\"\" % {'name': name, 'fail': sub['fail']}\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % {'name': name}\n\n def c_sync(self, name, sub):\n return \"\"\"\n if (!%(name)s) {\n Py_XDECREF(py_%(name)s);\n Py_INCREF(Py_None);\n py_%(name)s = Py_None;\n } else if ((void *)py_%(name)s != (void *)%(name)s) {\n Py_XDECREF(py_%(name)s);\n py_%(name)s = (PyObject *)%(name)s;\n Py_INCREF(py_%(name)s);\n }\n \"\"\" % {'name': name}\n\n def c_init_code(self):\n # We don't actually need the numpy API except in\n # HostFromGpu and GpuFromHost and those case will be covered\n # by the TensorType parameter\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n # We need arrayobject for the PyArrayDescr struct def\n # (even if we just use a pointer to it in a function def)\n return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',\n '<gpuarray/error.h>', '<gpuarray/buffer_blas.h>',\n '<numpy/arrayobject.h>', '<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include(), numpy.get_include()]\n\n def c_libraries(self):\n return ['gpuarray']\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.api_version()\n # we only use the major version since the minor revision are\n # API-compatible.\n return (1, ver[0])\n\n\nclass _operators(_tensor_py_operators):\n def _as_TensorVariable(self):\n from .basic_ops import host_from_gpu\n return host_from_gpu(self)\n\n def _as_GpuArrayVariable(self, context_name):\n if self.type.context_name == context_name:\n return self\n else:\n from .basic_ops import GpuToGpu\n return GpuToGpu(context_name)(self)\n\n\nclass GpuArrayVariable(_operators, Variable):\n pass\n\n\nGpuArrayType.Variable = GpuArrayVariable\n\n\nclass GpuArraySignature(tensor.TensorConstantSignature):\n # might do something better if we can run the sum on the GPU, but\n # for now this will suffice.\n pass\n\n\nclass GpuArrayConstant(_operators, Constant):\n def signature(self):\n return GpuArraySignature((self.type, numpy.asarray(self.data)))\n\n def __str__(self):\n if self.name is not None:\n return self.name\n try:\n np_data = numpy.asarray(self.data)\n except gpuarray.GpuArrayException:\n np_data = self.data\n return \"GpuArrayConstant{%s}\" % np_data\n\n\nGpuArrayType.Constant = GpuArrayConstant\n\n\nclass GpuArraySharedVariable(_operators, SharedVariable):\n def get_value(self, borrow=False, return_internal_type=False):\n if return_internal_type:\n if borrow:\n return self.container.value\n else:\n return self.container.value.copy()\n else:\n return numpy.asarray(self.container.value)\n\n def set_value(self, value, borrow=False):\n if isinstance(value, pygpu.gpuarray.GpuArray):\n value = pygpu.gpuarray.array(value, copy=(not borrow),\n context=self.type.context)\n self.container.value = value\n\n def __getitem__(self, *args):\n return _operators.__getitem__(self, *args)\n\n\nGpuArrayType.SharedVariable = GpuArraySharedVariable\n\n\ndef gpuarray_shared_constructor(value, name=None, strict=False,\n allow_downcast=None, borrow=False,\n broadcastable=None,\n context_name=None):\n \"\"\"\n SharedVariable constructor for GpuArrayType.\n\n \"\"\"\n if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)):\n raise TypeError('ndarray or GpuArray required')\n\n try:\n get_context(context_name)\n except ValueError:\n # Don't make this a hard error if we attempt to make a shared\n # variable while there is no default context.\n if context_name is None:\n raise TypeError('No default context and no context specified')\n raise\n\n if broadcastable is None:\n broadcastable = (False,) * value.ndim\n type = GpuArrayType(value.dtype, broadcastable, context_name=context_name)\n deviceval = pygpu.gpuarray.array(value, copy=(not borrow),\n context=type.context)\n return GpuArraySharedVariable(type=type, value=deviceval, name=name,\n strict=strict)\n\ntheano.compile.register_view_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n\"\"\", version=(0,))\n\n# Register GpuArrayType C code for Shape Op.\ntheano.compile.register_shape_c_code(\n GpuArrayType,\n \"\"\"\n npy_intp shape[] = {%(iname)s->ga.nd};\n if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))\n {\n Py_XDECREF(%(oname)s);\n %(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);\n }\n for(int i=0;i<shape[0];i++)\n {\n ((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = %(iname)s->ga.dimensions[i];\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_shape_i_c_code(\n GpuArrayType,\n \"\"\"\n if(!%(oname)s)\n %(oname)s=(PyArrayObject*)PyArray_ZEROS(0, NULL, NPY_INT64, 0);\n ((npy_int64*)PyArray_DATA(%(oname)s))[0] =\n %(iname)s->ga.dimensions[%(i)s];\n \"\"\",\n \"\"\"\n if (%(i)s>=%(iname)s->ga.nd){\n PyErr_SetString(PyExc_TypeError,\n \"Number of dimensions lower than expected\");\n %(fail)s\n }\n \"\"\",\n version=(1,))\n\ntheano.compile.register_deep_copy_op_c_code(GpuArrayType, \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = pygpu_copy(%(iname)s, GA_ANY_ORDER);\n if (!%(oname)s) { %(fail)s }\n\"\"\", version=(5,))\n\ntheano.compile.register_rebroadcast_c_code(\n GpuArrayType,\n \"\"\"\n if(%(iname)s->ga.dimensions[%(axis)s] != 1){\n PyErr_Format(PyExc_ValueError,\n \"Dimension %(axis)s in Rebroadcast's input was\"\n \" supposed to be 1 (got %%d instead)\",\n %(iname)s->ga.dimensions[%(axis)s]);\n %(fail)s\n }\n \"\"\",\n version=1)\n\ntheano.compile.register_specify_shape_c_code(\n GpuArrayType,\n \"\"\"\n if (PyGpuArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: vector of shape has %%d elements,\"\n \" but the input has %%d dimensions.\",\n PyGpuArray_NDIM(%(iname)s),\n PyArray_DIMS(%(shape)s)[0]);\n %(fail)s;\n }\n for(int i = 0; i < PyGpuArray_NDIM(%(iname)s); i++){\n dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,\n i))[0];\n if (PyGpuArray_DIMS(%(iname)s)[i] != shp) {\n PyErr_Format(PyExc_AssertionError,\n \"SpecifyShape: dim %%d of input has shape %%d,\"\n \" expected %%d.\",\n i, PyGpuArray_DIMS(%(iname)s)[i],\n shp);\n %(fail)s;\n }\n }\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\",\n version=1,\n c_support_code_apply='#include <numpy_compat.h>')\n\n\nclass GpuContextType(Type):\n def filter(self, data, strict=False, allow_downcast=None):\n if not isinstance(data, gpuarray.GpuContext):\n raise TypeError('context is not a GpuContext')\n return data\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n @staticmethod\n def values_eq(a, b):\n return a == b\n\n def c_declare(self, name, sub, check_input=True):\n return \"PyGpuContextObject *%s;\" % (name,)\n\n def c_init(self, name, sub):\n return \"%s = NULL;\" % (name,)\n\n def c_extract(self, name, sub, check_input=True):\n if check_input:\n res = \"\"\"\nif (!PyObject_TypeCheck(py_%(name)s, &PyGpuContextType)) {\n PyErr_SetString(PyExc_TypeError, \"expected a GpuContext\");\n %(fail)s\n}\n\"\"\" % dict(name=name, fail=sub['fail'])\n else:\n res = \"\"\n return res + \"\"\"\n%(name)s = (PyGpuContextObject *)py_%(name)s;\nPy_INCREF(%(name)s);\n\"\"\" % dict(name=name)\n\n def c_cleanup(self, name, sub):\n return \"Py_XDECREF(%(name)s); %(name)s = NULL;\" % dict(name=name)\n\n # c_sync is intentionally not declared to prevent normal usage\n\n def c_init_code(self):\n return ['import_pygpu__gpuarray();']\n\n def c_headers(self):\n return ['<gpuarray_api.h>']\n\n def c_header_dirs(self):\n return [pygpu.get_include()]\n\n def c_code_cache_version(self):\n ver = pygpu.gpuarray.api_version()\n return (0, ver[0])\n\n # Variable, Contstant, ... not declared\n\ngpu_context_type = GpuContextType()\n"
] | [
[
"numpy.dtype",
"numpy.get_include",
"numpy.asarray",
"numpy.all",
"numpy.prod",
"numpy.isnan"
]
] |
futurewarning/pyro | [
"005032f10099188fea86f63b6baa46a27867983f",
"005032f10099188fea86f63b6baa46a27867983f"
] | [
"pyro/distributions/transforms/affine_coupling.py",
"pyro/infer/tracetmc_elbo.py"
] | [
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport operator\nfrom functools import partial, reduce\n\nimport torch\nfrom torch.distributions.utils import _sum_rightmost\n\nfrom pyro.nn import ConditionalDenseNN, DenseNN\n\nfrom .. import constraints\nfrom ..conditional import ConditionalTransformModule\nfrom ..torch_transform import TransformModule\nfrom ..transforms.utils import clamp_preserve_gradients\nfrom ..util import copy_docs_from\n\n\n@copy_docs_from(TransformModule)\nclass AffineCoupling(TransformModule):\n r\"\"\"\n An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)\n that uses the bijective transform,\n\n :math:`\\mathbf{y}_{1:d} = \\mathbf{x}_{1:d}`\n :math:`\\mathbf{y}_{(d+1):D} = \\mu + \\sigma\\odot\\mathbf{x}_{(d+1):D}`\n\n where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs,\n e.g. :math:`\\mathbf{x}_{1:d}` represents the first :math:`d` elements of the\n inputs, and :math:`\\mu,\\sigma` are shift and translation parameters calculated\n as the output of a function inputting only :math:`\\mathbf{x}_{1:d}`.\n\n That is, the first :math:`d` components remain unchanged, and the subsequent\n :math:`D-d` are shifted and translated by a function of the previous components.\n\n Together with :class:`~pyro.distributions.TransformedDistribution` this provides\n a way to create richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn import DenseNN\n >>> input_dim = 10\n >>> split_dim = 6\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> param_dims = [input_dim-split_dim, input_dim-split_dim]\n >>> hypernet = DenseNN(split_dim, [10*input_dim], param_dims)\n >>> transform = AffineCoupling(split_dim, hypernet)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])\n >>> flow_dist.sample() # doctest: +SKIP\n\n The inverse of the Bijector is required when, e.g., scoring the log density of a\n sample with :class:`~pyro.distributions.TransformedDistribution`. This\n implementation caches the inverse of the Bijector when its forward operation is\n called, e.g., when sampling from\n :class:`~pyro.distributions.TransformedDistribution`. However, if the cached\n value isn't available, either because it was overwritten during sampling a new\n value or an arbitary value is being scored, it will calculate it manually.\n\n This is an operation that scales as O(1), i.e. constant in the input dimension.\n So in general, it is cheap to sample *and* score (an arbitrary value) from\n :class:`~pyro.distributions.transforms.AffineCoupling`.\n\n :param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/\n output split for transformation.\n :type split_dim: int\n :param hypernet: a neural network whose forward call returns a real-valued mean\n and logit-scale as a tuple. The input should have final dimension split_dim\n and the output final dimension input_dim-split_dim for each member of the\n tuple.\n :type hypernet: callable\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n References:\n\n [1] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation\n using Real NVP. ICLR 2017.\n\n \"\"\"\n\n bijective = True\n\n def __init__(self, split_dim, hypernet, *, dim=-1, log_scale_min_clip=-5., log_scale_max_clip=3.):\n super().__init__(cache_size=1)\n if dim >= 0:\n raise ValueError(\"'dim' keyword argument must be negative\")\n\n self.split_dim = split_dim\n self.nn = hypernet\n self.dim = dim\n self._cached_log_scale = None\n self.log_scale_min_clip = log_scale_min_clip\n self.log_scale_max_clip = log_scale_max_clip\n\n @constraints.dependent_property(is_discrete=False)\n def domain(self):\n return constraints.independent(constraints.real, -self.dim)\n\n @constraints.dependent_property(is_discrete=False)\n def codomain(self):\n return constraints.independent(constraints.real, -self.dim)\n\n def _call(self, x):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.Tensor\n\n Invokes the bijection x=>y; in the prototypical context of a\n :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from\n the base distribution (or the output of a previous transform)\n \"\"\"\n x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)\n\n # Now that we can split on an arbitrary dimension, we have do a bit of reshaping...\n mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n mean = mean.reshape(mean.shape[:-1] + x2.shape[self.dim:])\n log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])\n\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n self._cached_log_scale = log_scale\n\n y1 = x1\n y2 = torch.exp(log_scale) * x2 + mean\n return torch.cat([y1, y2], dim=self.dim)\n\n def _inverse(self, y):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.Tensor\n\n Inverts y => x. Uses a previously cached inverse if available, otherwise\n performs the inversion afresh.\n \"\"\"\n y1, y2 = y.split([self.split_dim, y.size(self.dim) - self.split_dim], dim=self.dim)\n x1 = y1\n\n # Now that we can split on an arbitrary dimension, we have do a bit of reshaping...\n mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n mean = mean.reshape(mean.shape[:-1] + y2.shape[self.dim:])\n log_scale = log_scale.reshape(log_scale.shape[:-1] + y2.shape[self.dim:])\n\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n self._cached_log_scale = log_scale\n\n x2 = (y2 - mean) * torch.exp(-log_scale)\n return torch.cat([x1, x2], dim=self.dim)\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\"\n Calculates the elementwise determinant of the log jacobian\n \"\"\"\n x_old, y_old = self._cached_x_y\n if self._cached_log_scale is not None and x is x_old and y is y_old:\n log_scale = self._cached_log_scale\n else:\n x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)\n _, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))\n log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])\n log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)\n return _sum_rightmost(log_scale, self.event_dim)\n\n\n@copy_docs_from(ConditionalTransformModule)\nclass ConditionalAffineCoupling(ConditionalTransformModule):\n r\"\"\"\n An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)\n that conditions on an additional context variable and uses the bijective\n transform,\n\n :math:`\\mathbf{y}_{1:d} = \\mathbf{x}_{1:d}`\n :math:`\\mathbf{y}_{(d+1):D} = \\mu + \\sigma\\odot\\mathbf{x}_{(d+1):D}`\n\n where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs,\n e.g. :math:`\\mathbf{x}_{1:d}` represents the first :math:`d` elements of the\n inputs, and :math:`\\mu,\\sigma` are shift and translation parameters calculated\n as the output of a function input :math:`\\mathbf{x}_{1:d}` and a context\n variable :math:`\\mathbf{z}\\in\\mathbb{R}^M`.\n\n That is, the first :math:`d` components remain unchanged, and the subsequent\n :math:`D-d` are shifted and translated by a function of the previous components.\n\n Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`\n this provides a way to create richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn import ConditionalDenseNN\n >>> input_dim = 10\n >>> split_dim = 6\n >>> context_dim = 4\n >>> batch_size = 3\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> param_dims = [input_dim-split_dim, input_dim-split_dim]\n >>> hypernet = ConditionalDenseNN(split_dim, context_dim, [10*input_dim],\n ... param_dims)\n >>> transform = ConditionalAffineCoupling(split_dim, hypernet)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> z = torch.rand(batch_size, context_dim)\n >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,\n ... [transform]).condition(z)\n >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP\n\n The inverse of the Bijector is required when, e.g., scoring the log density of a\n sample with :class:`~pyro.distributions.ConditionalTransformedDistribution`.\n This implementation caches the inverse of the Bijector when its forward\n operation is called, e.g., when sampling from\n :class:`~pyro.distributions.ConditionalTransformedDistribution`. However, if the\n cached value isn't available, either because it was overwritten during sampling\n a new value or an arbitary value is being scored, it will calculate it manually.\n\n This is an operation that scales as O(1), i.e. constant in the input dimension.\n So in general, it is cheap to sample *and* score (an arbitrary value) from\n :class:`~pyro.distributions.transforms.ConditionalAffineCoupling`.\n\n :param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/\n output split for transformation.\n :type split_dim: int\n :param hypernet: A neural network whose forward call returns a real-valued mean\n and logit-scale as a tuple. The input should have final dimension split_dim\n and the output final dimension input_dim-split_dim for each member of the\n tuple. The network also inputs a context variable as a keyword argument in\n order to condition the output upon it.\n :type hypernet: callable\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the NN\n :type log_scale_max_clip: float\n\n References:\n\n Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using\n Real NVP. ICLR 2017.\n\n \"\"\"\n\n domain = constraints.real_vector\n codomain = constraints.real_vector\n bijective = True\n\n def __init__(self, split_dim, hypernet, **kwargs):\n super().__init__()\n self.split_dim = split_dim\n self.nn = hypernet\n self.kwargs = kwargs\n\n def condition(self, context):\n cond_nn = partial(self.nn, context=context)\n return AffineCoupling(self.split_dim, cond_nn, **self.kwargs)\n\n\ndef affine_coupling(input_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):\n \"\"\"\n A helper function to create an\n :class:`~pyro.distributions.transforms.AffineCoupling` object that takes care of\n constructing a dense network with the correct input/output dimensions.\n\n :param input_dim: Dimension(s) of input variable to permute. Note that when\n `dim < -1` this must be a tuple corresponding to the event shape.\n :type input_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [10*input_dim]\n :type hidden_dims: list[int]\n :param split_dim: The dimension to split the input on for the coupling\n transform. Defaults to using input_dim // 2\n :type split_dim: int\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n \"\"\"\n if not isinstance(input_dim, int):\n if len(input_dim) != -dim:\n raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))\n event_shape = input_dim\n extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)\n else:\n event_shape = [input_dim]\n extra_dims = 1\n event_shape = list(event_shape)\n\n if split_dim is None:\n split_dim = event_shape[dim] // 2\n if hidden_dims is None:\n hidden_dims = [10 * event_shape[dim] * extra_dims]\n\n hypernet = DenseNN(split_dim * extra_dims,\n hidden_dims,\n [(event_shape[dim] - split_dim) * extra_dims,\n (event_shape[dim] - split_dim) * extra_dims])\n return AffineCoupling(split_dim, hypernet, dim=dim, **kwargs)\n\n\ndef conditional_affine_coupling(input_dim, context_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):\n \"\"\"\n A helper function to create an\n :class:`~pyro.distributions.transforms.ConditionalAffineCoupling` object that\n takes care of constructing a dense network with the correct input/output\n dimensions.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n :param context_dim: Dimension of context variable\n :type context_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [10*input_dim]\n :type hidden_dims: list[int]\n :param split_dim: The dimension to split the input on for the coupling\n transform. Defaults to using input_dim // 2\n :type split_dim: int\n :param dim: the tensor dimension on which to split. This value must be negative\n and defines the event dim as `abs(dim)`.\n :type dim: int\n :param log_scale_min_clip: The minimum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_min_clip: float\n :param log_scale_max_clip: The maximum value for clipping the log(scale) from\n the autoregressive NN\n :type log_scale_max_clip: float\n\n \"\"\"\n if not isinstance(input_dim, int):\n if len(input_dim) != -dim:\n raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))\n event_shape = input_dim\n extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)\n else:\n event_shape = [input_dim]\n extra_dims = 1\n event_shape = list(event_shape)\n\n if split_dim is None:\n split_dim = event_shape[dim] // 2\n if hidden_dims is None:\n hidden_dims = [10 * event_shape[dim] * extra_dims]\n\n nn = ConditionalDenseNN(split_dim * extra_dims, context_dim, hidden_dims,\n [(event_shape[dim] - split_dim) * extra_dims, (event_shape[dim] - split_dim) * extra_dims])\n return ConditionalAffineCoupling(split_dim, nn, dim=dim, **kwargs)\n",
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport queue\nimport warnings\n\nimport torch\n\nimport pyro.poutine as poutine\nfrom pyro.distributions.util import is_identically_zero\nfrom pyro.infer.elbo import ELBO\nfrom pyro.infer.enum import (\n get_importance_trace,\n iter_discrete_escape,\n iter_discrete_extend,\n)\nfrom pyro.infer.util import compute_site_dice_factor, is_validation_enabled, torch_item\nfrom pyro.ops import packed\nfrom pyro.ops.contract import einsum\nfrom pyro.poutine.enum_messenger import EnumMessenger\nfrom pyro.util import check_traceenum_requirements, warn_if_nan\n\n\ndef _compute_dice_factors(model_trace, guide_trace):\n \"\"\"\n compute per-site DiCE log-factors for non-reparameterized proposal sites\n this logic is adapted from pyro.infer.util.Dice.__init__\n \"\"\"\n log_probs = []\n for role, trace in zip((\"model\", \"guide\"), (model_trace, guide_trace)):\n for name, site in trace.nodes.items():\n if site[\"type\"] != \"sample\" or site[\"is_observed\"]:\n continue\n if role == \"model\" and name in guide_trace:\n continue\n\n log_prob, log_denom = compute_site_dice_factor(site)\n if not is_identically_zero(log_denom):\n dims = log_prob._pyro_dims\n log_prob = log_prob - log_denom\n log_prob._pyro_dims = dims\n if not is_identically_zero(log_prob):\n log_probs.append(log_prob)\n\n return log_probs\n\n\ndef _compute_tmc_factors(model_trace, guide_trace):\n \"\"\"\n compute per-site log-factors for all observed and unobserved variables\n log-factors are log(p / q) for unobserved sites and log(p) for observed sites\n \"\"\"\n log_factors = []\n for name, site in guide_trace.nodes.items():\n if site[\"type\"] != \"sample\" or site[\"is_observed\"]:\n continue\n log_proposal = site[\"packed\"][\"log_prob\"]\n log_factors.append(packed.neg(log_proposal))\n for name, site in model_trace.nodes.items():\n if site[\"type\"] != \"sample\":\n continue\n if site[\"name\"] not in guide_trace and \\\n not site[\"is_observed\"] and \\\n site[\"infer\"].get(\"enumerate\", None) == \"parallel\" and \\\n site[\"infer\"].get(\"num_samples\", -1) > 0:\n # site was sampled from the prior\n log_proposal = packed.neg(site[\"packed\"][\"log_prob\"])\n log_factors.append(log_proposal)\n log_factors.append(site[\"packed\"][\"log_prob\"])\n return log_factors\n\n\ndef _compute_tmc_estimate(model_trace, guide_trace):\n \"\"\"\n Use :func:`~pyro.ops.contract.einsum` to compute the Tensor Monte Carlo\n estimate of the marginal likelihood given parallel-sampled traces.\n \"\"\"\n # factors\n log_factors = _compute_tmc_factors(model_trace, guide_trace)\n log_factors += _compute_dice_factors(model_trace, guide_trace)\n\n if not log_factors:\n return 0.\n\n # loss\n eqn = \",\".join([f._pyro_dims for f in log_factors]) + \"->\"\n plates = \"\".join(frozenset().union(list(model_trace.plate_to_symbol.values()),\n list(guide_trace.plate_to_symbol.values())))\n tmc, = einsum(eqn, *log_factors, plates=plates,\n backend=\"pyro.ops.einsum.torch_log\",\n modulo_total=False)\n return tmc\n\n\nclass TraceTMC_ELBO(ELBO):\n \"\"\"\n A trace-based implementation of Tensor Monte Carlo [1]\n by way of Tensor Variable Elimination [2] that supports:\n - local parallel sampling over any sample site in the model or guide\n - exhaustive enumeration over any sample site in the model or guide\n\n To take multiple samples, mark the site with\n ``infer={'enumerate': 'parallel', 'num_samples': N}``.\n To configure all sites in a model or guide at once,\n use :func:`~pyro.infer.enum.config_enumerate` .\n To enumerate or sample a sample site in the ``model``,\n mark the site and ensure the site does not appear in the ``guide``.\n\n This assumes restricted dependency structure on the model and guide:\n variables outside of an :class:`~pyro.plate` can never depend on\n variables inside that :class:`~pyro.plate` .\n\n References\n\n [1] `Tensor Monte Carlo: Particle Methods for the GPU Era`,\n Laurence Aitchison (2018)\n\n [2] `Tensor Variable Elimination for Plated Factor Graphs`,\n Fritz Obermeyer, Eli Bingham, Martin Jankowiak, Justin Chiu, Neeraj Pradhan,\n Alexander Rush, Noah Goodman (2019)\n \"\"\"\n\n def _get_trace(self, model, guide, args, kwargs):\n \"\"\"\n Returns a single trace from the guide, and the model that is run\n against it.\n \"\"\"\n model_trace, guide_trace = get_importance_trace(\n \"flat\", self.max_plate_nesting, model, guide, args, kwargs)\n\n if is_validation_enabled():\n check_traceenum_requirements(model_trace, guide_trace)\n\n has_enumerated_sites = any(site[\"infer\"].get(\"enumerate\")\n for trace in (guide_trace, model_trace)\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\")\n\n if self.strict_enumeration_warning and not has_enumerated_sites:\n warnings.warn('Found no sample sites configured for enumeration. '\n 'If you want to enumerate sites, you need to @config_enumerate or set '\n 'infer={\"enumerate\": \"sequential\"} or infer={\"enumerate\": \"parallel\"}? '\n 'If you do not want to enumerate, consider using Trace_ELBO instead.')\n\n model_trace.compute_score_parts()\n guide_trace.pack_tensors()\n model_trace.pack_tensors(guide_trace.plate_to_symbol)\n return model_trace, guide_trace\n\n def _get_traces(self, model, guide, args, kwargs):\n \"\"\"\n Runs the guide and runs the model against the guide with\n the result packaged as a trace generator.\n \"\"\"\n if self.max_plate_nesting == float('inf'):\n self._guess_max_plate_nesting(model, guide, args, kwargs)\n if self.vectorize_particles:\n guide = self._vectorized_num_particles(guide)\n model = self._vectorized_num_particles(model)\n\n # Enable parallel enumeration over the vectorized guide and model.\n # The model allocates enumeration dimensions after (to the left of) the guide,\n # accomplished by preserving the _ENUM_ALLOCATOR state after the guide call.\n guide_enum = EnumMessenger(first_available_dim=-1 - self.max_plate_nesting)\n model_enum = EnumMessenger() # preserve _ENUM_ALLOCATOR state\n guide = guide_enum(guide)\n model = model_enum(model)\n\n q = queue.LifoQueue()\n guide = poutine.queue(guide, q,\n escape_fn=iter_discrete_escape,\n extend_fn=iter_discrete_extend)\n for i in range(1 if self.vectorize_particles else self.num_particles):\n q.put(poutine.Trace())\n while not q.empty():\n yield self._get_trace(model, guide, args, kwargs)\n\n def differentiable_loss(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: a differentiable estimate of the marginal log-likelihood\n :rtype: torch.Tensor\n :raises ValueError: if the ELBO is not differentiable (e.g. is\n identically zero)\n\n Computes a differentiable TMC estimate using ``num_particles`` many samples\n (particles). The result should be infinitely differentiable (as long\n as underlying derivatives have been implemented).\n \"\"\"\n elbo = 0.0\n for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs):\n elbo_particle = _compute_tmc_estimate(model_trace, guide_trace)\n if is_identically_zero(elbo_particle):\n continue\n\n elbo = elbo + elbo_particle\n elbo = elbo / self.num_particles\n\n loss = -elbo\n warn_if_nan(loss, \"loss\")\n return loss\n\n def loss(self, model, guide, *args, **kwargs):\n with torch.no_grad():\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n if is_identically_zero(loss) or not loss.requires_grad:\n return torch_item(loss)\n return loss.item()\n\n def loss_and_grads(self, model, guide, *args, **kwargs):\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n if is_identically_zero(loss) or not loss.requires_grad:\n return torch_item(loss)\n loss.backward()\n return loss.item()\n"
] | [
[
"torch.distributions.utils._sum_rightmost",
"torch.cat",
"torch.exp"
],
[
"torch.no_grad"
]
] |
BXuan694/SOLO-pytorch | [
"aef0ac47ce6989f6633fe4f71070bd6944c39abb"
] | [
"train.py"
] | [
"from data.config import cfg, process_funcs_dict\nfrom data.coco import CocoDataset\nfrom data.loader import build_dataloader\n#from modules.solov1 import SOLOV1 as solo\n# from modules.solov2 import SOLOV2 as solo\nfrom modules.solov1d import SOLOV1 as solo\nimport time\nimport torch\nimport numpy as np\n\n# 梯度均衡\ndef clip_grads(params_):\n params_ = list(filter(lambda p: p.requires_grad and p.grad is not None, params_))\n if len(params_) > 0:\n return torch.nn.utils.clip_grad.clip_grad_norm_(params_, max_norm=35, norm_type=2)\n\n# 设置新学习率\ndef set_lr(optimizer_, newLr_):\n for paramGroup_ in optimizer_.param_groups:\n paramGroup_['lr'] = newLr_\n\n# 设置requires_grad为False\ndef gradinator(x_):\n x_.requires_grad = False\n return x_\n\n# 设置pipline\ndef build_process_pipeline(pipelineConfgs_):\n assert isinstance(pipelineConfgs_, list)\n process_pipelines = []\n for pConfig_ in pipelineConfgs_:\n assert isinstance(pConfig_, dict) and 'type' in pConfig_\n args = pConfig_.copy()\n obj_type = args.pop('type')\n if isinstance(obj_type, str):\n process_pipelines.append(process_funcs_dict[obj_type](**args))\n return process_pipelines\n\n# 计算warmup学习率\ndef get_warmup_lr(curIter_, totalIters_, baseLr_, warmupRatio_, warmUpOption='linear'):\n if warmUpOption == 'constant':\n warmupLr = baseLr_ * warmupRatio_ \n elif warmUpOption == 'linear':\n k = (1 - curIter_ / totalIters_) * (1 - warmupRatio_)\n warmupLr = baseLr_ * (1 - k)\n elif warmUpOption == 'exp':\n k = warmupRatio_**(1 - curIter_ / totalIters_)\n warmupLr = baseLr_ * k\n return warmupLr\n\n\ndef train(globalStartEpoch, totalEpoches):\n\n # train process pipelines func\n trainTransformsPiplines = build_process_pipeline(cfg.train_pipeline)\n print(trainTransformsPiplines)\n # build datashet\n casiadata = CocoDataset(ann_file=cfg.dataset.train_info,\n pipeline = trainTransformsPiplines,\n img_prefix = cfg.dataset.trainimg_prefix,\n data_root=cfg.dataset.train_prefix)\n torchdataLoader = build_dataloader(casiadata, cfg.imgs_per_gpu, cfg.workers_per_gpu, num_gpus=cfg.num_gpus, shuffle=True)\n\n if cfg.resume_from is None:\n model = solo(cfg, pretrained=None, mode='train')\n print('cfg.resume_from is None')\n else:\n model = solo(cfg, pretrained=cfg.resume_from, mode='train')\n model = model.cuda()\n model = model.train()\n\n lrOri = cfg.optimizer['lr']\n lrStages = cfg.lr_config[\"step\"]\n lrList = np.full(totalEpoches, lrOri)\n for ii in range(len(lrStages)):\n lrList[lrStages[ii]:]*=0.1\n print(\"starting epoch: \", globalStartEpoch)\n print(\"lr adapting stages: \", end=' ')\n for ii in range(len(lrStages)):\n print(cfg.lr_config[\"step\"][ii], end=\" \")\n print(\"\\ntotal training epoches: \", totalEpoches)\n\n optimizer_config = cfg.optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=optimizer_config['lr'], momentum=optimizer_config['momentum'], weight_decay=optimizer_config['weight_decay'])\n\n batchSize = cfg.imgs_per_gpu * cfg.num_gpus\n epochSize = len(casiadata) // batchSize \n # nums of trained epoches, idx of epoch to start\n pastEpoches = globalStartEpoch\n # nums of trained iters, idx of iter to start\n pastIters = (globalStartEpoch-1) * epochSize\n # nums of left epoches\n leftEpoches = totalEpoches - pastEpoches + 1\n # nums of left iters\n leftIters = leftEpoches * epochSize\n\n print('##### begin train ######')\n currentIter = 0 \n \n for epoch in range(leftEpoches):\n\n currentEpoch = epoch + pastEpoches\n # 终止训练\n if currentEpoch >= totalEpoches:\n print(\"Current epoch is larger than setting epoch nums, training stop.\")\n return\n\n # 仅用于打印\n loss_sum = 0.0 \n loss_ins = 0.0 \n loss_cate = 0.0\n \n for j, data in enumerate(torchdataLoader):\n iterStartTime = time.time()\n\n if cfg.lr_config['warmup'] is not None and pastIters < cfg.lr_config['warmup_iters']:\n cur_lr = get_warmup_lr(pastIters, cfg.lr_config['warmup_iters'],\n optimizer_config['lr'], cfg.lr_config['warmup_ratio'],\n cfg.lr_config['warmup'])\n else:\n cur_lr = lrList[currentEpoch]\n set_lr(optimizer, cur_lr)\n\n imgs = gradinator(data['img'].data[0].cuda())\n img_meta = data['img_metas'].data[0] #图片的一些原始信息\n gt_bboxes = []\n for bbox in data['gt_bboxes'].data[0]:\n bbox = gradinator(bbox.cuda())\n gt_bboxes.append(bbox)\n \n gt_masks = data['gt_masks'].data[0] #cpu numpy data\n \n gt_labels = []\n for label in data['gt_labels'].data[0]:\n label = gradinator(label.cuda())\n gt_labels.append(label)\n\n\n loss = model.forward(img=imgs,\n img_meta=img_meta,\n gt_bboxes=gt_bboxes,\n gt_labels=gt_labels,\n gt_masks=gt_masks)\n\n\n losses = loss['loss_ins'] + loss['loss_cate']\n loss_sum += losses.cpu().item()\n loss_ins += loss['loss_ins'].cpu().item()\n loss_cate += loss['loss_cate'].cpu().item()\n\n optimizer.zero_grad()\n losses.backward()\n\n if torch.isfinite(losses).item():\n grad_norm = clip_grads(model.parameters()) #梯度平衡\n optimizer.step()\n else:\n NotImplementedError(\"loss type error!can't backward!\")\n\n leftIters -= 1\n pastIters += 1\n currentIter += 1\n\n showIters = 10\n if j%int(showIters) == 0 and j != 0:\n iterLastTime = time.time() - iterStartTime\n left_seconds = iterLastTime * leftIters\n left_minutes = left_seconds / 60.0\n left_hours = left_minutes / 60.0\n left_days = left_hours // 24\n left_hours = left_hours % 24\n\n out_srt = 'epoch:['+str(currentEpoch)+']/['+str(totalEpoches)+'],' # end of epoch of idx currentEpoch\n out_srt = out_srt + '['+str(j)+']/'+str(epochSize)+'], left_time: ' + str(left_days)+'days '+format(left_hours,'.2f')+'hours,'\n print(out_srt, \"loss:\", format(loss_sum/showIters,'.4f'), 'loss_ins:', format(loss_ins/showIters,'.4f'), \"loss_cate:\", format(loss_cate/showIters,'.4f'), \"lr:\", format(cur_lr,'.8f'))\n loss_sum = 0.0 \n loss_ins = 0.0 \n loss_cate = 0.0\n\n leftEpoches -= 1\n\n save_name = \"./weights/solo1/\" + cfg.name + \"_epoch_\" + str(currentEpoch) + \".pth\"\n model.save_weights(save_name) \n\nif __name__ == '__main__':\n train(globalStartEpoch=cfg.epoch_iters_start, totalEpoches=cfg.total_epoch) #设置本次训练的起始epoch\n"
] | [
[
"torch.nn.utils.clip_grad.clip_grad_norm_",
"numpy.full",
"torch.isfinite"
]
] |
Halo9Pan/dive-keras | [
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"f1e9c76675981ee6683f54a3ce569212d551d12d"
] | [
"keras/optimizer_v2/adamax.py",
"keras/distribute/custom_training_loop_models_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Adamax optimizer implementation.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom keras import backend_config\nfrom keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.optimizers.Adamax')\nclass Adamax(optimizer_v2.OptimizerV2):\n \"\"\"Optimizer that implements the Adamax algorithm.\n\n It is a variant of Adam based on the infinity norm.\n Default parameters follow those provided in the paper.\n Adamax is sometimes superior to adam, specially in models with embeddings.\n\n Initialization:\n\n ```python\n m = 0 # Initialize initial 1st moment vector\n v = 0 # Initialize the exponentially weighted infinity norm\n t = 0 # Initialize timestep\n ```\n\n The update rule for parameter `w` with gradient `g` is\n described at the end of section 7.1 of the paper:\n\n ```python\n t += 1\n m = beta1 * m + (1 - beta) * g\n v = max(beta2 * v, abs(g))\n current_lr = learning_rate / (1 - beta1 ** t)\n w = w - current_lr * m / (v + epsilon)\n ```\n\n Similarly to `Adam`, the epsilon is added for numerical stability\n (especially to get rid of division by zero when `v_t == 0`).\n\n In contrast to `Adam`, the sparse implementation of this algorithm\n (used when the gradient is an IndexedSlices object, typically because of\n `tf.gather` or an embedding lookup in the forward pass) only updates\n variable slices and corresponding `m_t`, `v_t` terms when that part of\n the variable was used in the forward pass. This means that the sparse\n behavior is contrast to the dense behavior (similar to some momentum\n implementations which ignore momentum unless a variable slice was actually\n used).\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.\n beta_1: A float value or a constant float tensor. The exponential decay\n rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor. The exponential decay\n rate for the exponentially weighted infinity norm.\n epsilon: A small constant for numerical stability.\n name: Optional name for the operations created when applying gradients.\n Defaults to `\"Adamax\"`.\n **kwargs: Keyword arguments. Allowed to be one of\n `\"clipnorm\"` or `\"clipvalue\"`.\n `\"clipnorm\"` (float) clips gradients by norm; `\"clipvalue\"` (float) clips\n gradients by value.\n\n Reference:\n - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n name='Adamax',\n **kwargs):\n super(Adamax, self).__init__(name, **kwargs)\n self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))\n self._set_hyper('decay', self._initial_decay)\n self._set_hyper('beta_1', beta_1)\n self._set_hyper('beta_2', beta_2)\n self.epsilon = epsilon or backend_config.epsilon()\n\n def _create_slots(self, var_list):\n # Separate for-loops to respect the ordering of slot variables from v1.\n for var in var_list:\n self.add_slot(var, 'm') # Create slots for the first moments.\n for var in var_list:\n self.add_slot(var, 'v') # Create slots for the second moments.\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state)\n\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype))\n beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype))\n beta_1_power = tf.pow(beta_1_t, local_step)\n lr_t = apply_state[(var_device, var_dtype)]['lr_t']\n\n apply_state[(var_device, var_dtype)].update(\n dict(\n neg_scaled_lr=-lr_t / (1 - beta_1_power),\n epsilon=tf.convert_to_tensor(\n self.epsilon, var_dtype),\n beta_1_t=beta_1_t,\n beta_1_power=beta_1_power,\n one_minus_beta_1_t=1 - beta_1_t,\n beta_2_t=beta_2_t,\n zero=tf.zeros((), dtype=tf.int64)))\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n m = self.get_slot(var, 'm')\n v = self.get_slot(var, 'v')\n return tf.raw_ops.ResourceApplyAdaMax(\n var=var.handle,\n m=m.handle,\n v=v.handle,\n beta1_power=coefficients['beta_1_power'],\n lr=coefficients['lr_t'],\n beta1=coefficients['beta_1_t'],\n beta2=coefficients['beta_2_t'],\n epsilon=coefficients['epsilon'],\n grad=grad,\n use_locking=self._use_locking)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n # m_t = beta1 * m + (1 - beta1) * g_t\n m = self.get_slot(var, 'm')\n m_slice = tf.gather(m, indices, axis=coefficients['zero'])\n m_t_slice = (m_slice * coefficients['beta_1_t'] +\n grad * coefficients['one_minus_beta_1_t'])\n with tf.control_dependencies([m_t_slice]):\n m_t = self._resource_scatter_update(m, indices, m_t_slice)\n\n # u_t = max(beta2 * u, abs(g_t))\n v = self.get_slot(var, 'v')\n v_slice = tf.gather(v, indices, axis=coefficients['zero'])\n v_t_slice = tf.maximum(v_slice * coefficients['beta_2_t'],\n tf.abs(grad))\n with tf.control_dependencies([v_t_slice]):\n v_t = self._resource_scatter_update(v, indices, v_t_slice)\n # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t\n var_slice = coefficients['neg_scaled_lr'] * (\n m_t_slice / (v_t_slice + coefficients['epsilon']))\n with tf.control_dependencies([var_slice]):\n var_update = self._resource_scatter_add(var, indices, var_slice)\n return tf.group(*[var_update, m_t, v_t])\n\n def get_config(self):\n config = super(Adamax, self).get_config()\n config.update({\n 'learning_rate': self._serialize_hyperparameter('learning_rate'),\n 'decay': self._initial_decay,\n 'beta_1': self._serialize_hyperparameter('beta_1'),\n 'beta_2': self._serialize_hyperparameter('beta_2'),\n 'epsilon': self.epsilon,\n })\n return config\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for custom training loops.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras.distribute import strategy_combinations\nfrom keras.layers import core\nfrom keras.optimizer_v2 import gradient_descent\n\n\nclass CustomModel(tf.Module):\n\n def __init__(self, name=None):\n super(CustomModel, self).__init__(name=name)\n with self.name_scope:\n self._layers = [\n keras.layers.Dense(4, name=\"dense\"),\n ]\n\n @tf.Module.with_name_scope\n def __call__(self, x):\n for layer in self._layers:\n x = layer(x)\n return x\n\n\n@tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(\n distribution=(strategy_combinations.all_strategies +\n strategy_combinations.multiworker_strategies),\n mode=[\"eager\"]\n )\n )\nclass KerasModelsTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_single_keras_layer_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = keras.layers.Dense(4, name=\"dense\")\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_keras_model_optimizer_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = _get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(replicated_inputs):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(step_fn, args=(replicated_inputs,))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n for x in input_iterator:\n train_step(x)\n\n def test_keras_subclass_model_optimizer_run(self, distribution):\n def get_subclass_model():\n\n class KerasSubclassModel(keras.Model):\n\n def __init__(self):\n super(KerasSubclassModel, self).__init__()\n self.l = keras.layers.Dense(4, name=\"dense\")\n\n def call(self, x):\n return self.l(x)\n\n return KerasSubclassModel()\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = get_subclass_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_keras_model_optimizer_run_loop(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = _get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n for _ in tf.range(4):\n distribution.run(step_fn, args=(next(iterator),))\n\n train_step(input_iterator)\n\n def test_batch_norm_with_dynamic_batch(self, distribution):\n inputs = np.zeros((10, 3, 3, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n x = keras.layers.Input(shape=(3, 3, 3), name=\"input\")\n y = keras.layers.BatchNormalization(fused=True, name=\"bn\")(x)\n y = keras.layers.Flatten()(y)\n y = keras.layers.Dense(4, name=\"dense\")(y)\n model = keras.Model(x, y)\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images, training=True)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n distribution.run(step_fn, args=(next(iterator),))\n\n train_step(input_iterator)\n\n def test_lstm(self, distribution):\n\n batch_size = 32\n\n def create_lstm_model():\n model = keras.models.Sequential()\n # We only have LSTM variables so we can detect no gradient issues more\n # easily.\n model.add(\n keras.layers.LSTM(1, return_sequences=False, input_shape=(10, 1)))\n return model\n\n def create_lstm_data():\n seq_length = 10\n\n x_train = np.random.rand(batch_size, seq_length, 1).astype(\"float32\")\n y_train = np.random.rand(batch_size, 1).astype(\"float32\")\n return x_train, y_train\n\n x, y = create_lstm_data()\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n dataset = dataset.batch(batch_size)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = create_lstm_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD()\n\n @tf.function\n def train_step(input_iterator):\n\n def step_fn(inputs):\n inps, targ = inputs\n with tf.GradientTape() as tape:\n output = model(inps)\n loss = tf.reduce_mean(\n keras.losses.binary_crossentropy(\n y_true=targ, y_pred=output, from_logits=False))\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.run(\n step_fn, args=(next(input_iterator),))\n return distribution.experimental_local_results(outputs)\n\n train_step(input_iterator)\n\n def test_nested_tf_functions(self, distribution):\n # The test builds two computations with keras layers, one with nested\n # tf.function, and the other without nested tf.function. We run these\n # computations independently on the model with same weights, and make sure\n # the variables are still the same after one training step.\n\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n weights_file = os.path.join(self.get_temp_dir(), \".h5\")\n model.save_weights(weights_file)\n model2 = get_model()\n model2.load_weights(weights_file)\n\n # Make sure model and model2 variables are in sync when initialized.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def compute_loss(images, targets):\n outputs = model(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def train_step_without_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss(images, targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n distribution.run(step_fn, args=(inputs,))\n\n @tf.function\n def compute_loss2(images, targets):\n outputs = model2(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def train_step_with_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss2(images, targets)\n grads = tape.gradient(loss, model2.variables)\n optimizer.apply_gradients(zip(grads, model2.variables))\n\n distribution.run(step_fn, args=(inputs,))\n\n inputs = next(input_iterator)\n\n train_step_without_nested_tf_function(inputs)\n train_step_with_nested_tf_function(inputs)\n\n # Make sure model and model2 variables are still in sync.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def test_nested_tf_functions_with_control_flow(self, distribution):\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n distribution.run(step_fn, args=(next(iterator),))\n\n @tf.function\n def train_steps(iterator):\n for _ in tf.range(10):\n train_step(iterator)\n\n train_steps(input_iterator)\n\n def test_nested_tf_functions_with_tf_function_passing_to_strategy_run(\n self, distribution):\n self.skipTest(\"b/190608193\")\n\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n\n @tf.function\n def compute_loss(images, targets):\n outputs = model(images)\n return keras.losses.mean_squared_error(targets, outputs)\n\n @tf.function\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n loss = compute_loss(images, targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n inputs = next(input_iterator)\n distribution.run(step_fn, args=(inputs,))\n\n def test_customized_tf_module_run(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = CustomModel()\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n def test_reduce_loss(self, distribution):\n inputs = np.zeros((10, 4), dtype=np.float32)\n targets = np.zeros((10, 1), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.batch(10)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n x = keras.layers.Input(shape=(4), name=\"input\")\n y = keras.layers.Dense(3, name=\"dense\")(x)\n model = keras.Model(x, y)\n\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n images, targets = inputs\n outputs = model(images)\n loss = keras.losses.sparse_categorical_crossentropy(targets, outputs)\n return loss\n\n return distribution.run(step_fn, args=(next(iterator),))\n\n loss = train_step(input_iterator)\n loss = distribution.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=0)\n\n def test_variable_run_argument(self, distribution):\n # Test that variables passed to run() remain variables. Previous behavior\n # in TPUStrategy was to cast to Tensor.\n\n with distribution.scope():\n optimizer = gradient_descent.SGD(0.1)\n net = core.Dense(1, trainable=True)\n dataset = tf.data.Dataset.from_tensors([[1.]])\n dataset = dataset.repeat()\n dataset = dataset.batch(2, drop_remainder=True)\n\n def replica_step(trainable_variables, features):\n\n with tf.GradientTape() as tape:\n net_out = net(features[0], training=True)\n loss = (net_out - 1.0) * (net_out - 1.0)\n gradients = tape.gradient(loss, trainable_variables)\n optimizer.apply_gradients(zip(gradients, trainable_variables))\n return loss\n\n @tf.function\n def step(features):\n per_replica_losses = distribution.run(\n replica_step,\n (net.trainable_variables, features),\n )\n loss = distribution.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n return loss\n\n step(next(iter(dataset)))\n\n\nclass KerasModelsXLATest(tf.test.TestCase, parameterized.TestCase):\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(\n distribution=strategy_combinations.tpu_strategies, mode=[\"eager\"]))\n def test_tf_function_jit_compile(self, distribution):\n dataset = _get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n class CustomDense(keras.layers.Layer):\n\n def __init__(self, num_outputs):\n super(CustomDense, self).__init__()\n self.num_outputs = num_outputs\n\n def build(self, input_shape):\n self.kernel = self.add_variable(\n \"kernel\", shape=[int(input_shape[-1]), self.num_outputs])\n\n @tf.function(jit_compile=True)\n def call(self, inputs):\n return tf.matmul(inputs, self.kernel)\n\n with distribution.scope():\n x = keras.layers.Input(shape=(3,))\n y = CustomDense(4)(x)\n model = keras.Model(x, y)\n\n @tf.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with tf.GradientTape() as tape:\n outputs = model(images)\n loss = keras.losses.mean_squared_error(targets, outputs)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.run(\n step_fn, args=(next(iterator),))\n return tf.nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n\ndef _get_dataset():\n inputs = np.zeros((31, 3), dtype=np.float32)\n targets = np.zeros((31, 4), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.batch(10)\n return dataset\n\n\ndef _get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n\nif __name__ == \"__main__\":\n tf.__internal__.distribute.multi_process_runner.test_main()\n"
] | [
[
"tensorflow.compat.v2.raw_ops.ResourceApplyAdaMax",
"tensorflow.compat.v2.pow",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.gather",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.group",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.abs"
],
[
"numpy.ones",
"tensorflow.compat.v2.function",
"numpy.zeros",
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.data.Dataset.from_tensors",
"tensorflow.compat.v2.GradientTape",
"numpy.random.random",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.__internal__.distribute.multi_process_runner.test_main",
"numpy.random.rand",
"tensorflow.compat.v2.__internal__.test.combinations.combine",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.nest.map_structure"
]
] |
kartik4949/keras-cv | [
"4c300f564d8ec99cd1351c445e1803ee6664915a"
] | [
"keras_cv/layers/preprocessing/random_sharpness_test.py"
] | [
"# Copyright 2022 The KerasCV Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport tensorflow as tf\n\nfrom keras_cv.layers import preprocessing\n\n\nclass RandomSharpnessTest(tf.test.TestCase):\n def test_random_sharpness_preserves_output_shape(self):\n img_shape = (50, 50, 3)\n xs = tf.stack(\n [2 * tf.ones(img_shape), tf.ones(img_shape)],\n axis=0,\n )\n\n layer = preprocessing.RandomSharpness(0.0, value_range=(0, 255))\n ys = layer(xs)\n\n self.assertEqual(xs.shape, ys.shape)\n self.assertAllClose(xs, ys)\n\n def test_random_sharpness_blur_effect_single_channel(self):\n xs = tf.expand_dims(\n tf.constant(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n ]\n ),\n axis=-1,\n )\n xs = tf.expand_dims(xs, axis=0)\n\n layer = preprocessing.RandomSharpness((1.0, 1.0), value_range=(0, 255))\n ys = layer(xs)\n\n self.assertEqual(xs.shape, ys.shape)\n\n result = tf.expand_dims(\n tf.constant(\n [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1 / 13, 1 / 13, 1 / 13, 0, 0],\n [0, 0, 1 / 13, 5 / 13, 1 / 13, 0, 0],\n [0, 0, 1 / 13, 1 / 13, 1 / 13, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n ]\n ),\n axis=-1,\n )\n result = tf.expand_dims(result, axis=0)\n\n self.assertAllClose(ys, result)\n"
] | [
[
"tensorflow.constant",
"tensorflow.ones",
"tensorflow.expand_dims"
]
] |
ezvk7740/robotics-rl-srl | [
"aad209d6edd1bf28d886132fecd0e503d2a7af93"
] | [
"replay/compare_plots.py"
] | [
"import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib.ticker import FuncFormatter\n\nfrom replay.aggregate_plots import lightcolors, darkcolors, Y_LIM_SHAPED_REWARD, Y_LIM_SPARSE_REWARD, millions\nfrom srl_zoo.utils import printGreen, printRed\n\n# Init seaborn\nsns.set()\n# Style for the title\nfontstyle = {'fontname': 'DejaVu Sans', 'fontsize': 16}\n\n\ndef comparePlots(path, plots, y_limits, title=\"Learning Curve\",\n timesteps=False, truncate_x=-1, no_display=False):\n \"\"\"\n :param path: (str) path to the folder where the plots are stored\n :param plots: ([str]) List of saved plots as npz file\n :param y_limits: ([float]) y-limits for the plot\n :param title: (str) plot title\n :param timesteps: (bool) Plot timesteps instead of episodes\n :param truncate_x: (int) Truncate the experiments after n ticks on the x-axis\n :param no_display: (bool) Set to true, the plot won't be displayed (useful when only saving plot)\n \"\"\"\n y_list = []\n x_list = []\n for plot in plots:\n saved_plot = np.load('{}/{}'.format(path, plot))\n x_list.append(saved_plot['x'])\n y_list.append(saved_plot['y'])\n\n lengths = list(map(len, x_list))\n min_x, max_x = np.min(lengths), np.max(lengths)\n\n print(\"Min x: {}\".format(min_x))\n print(\"Max x: {}\".format(max_x))\n\n if truncate_x > 0:\n min_x = min(truncate_x, min_x)\n print(\"Truncating the x-axis at {}\".format(min_x))\n\n x = np.array(x_list[0][:min_x])\n\n printGreen(\"{} Experiments\".format(len(y_list)))\n # print(\"Min, Max rewards:\", np.min(y), np.max(y))\n\n fig = plt.figure(title)\n for i in range(len(y_list)):\n label = plots[i].split('.npz')[0]\n y = y_list[i][:, :min_x]\n print('{}: {} experiments'.format(label, len(y)))\n # Compute mean for different seeds\n m = np.mean(y, axis=0)\n # Compute standard error\n s = np.squeeze(np.asarray(np.std(y, axis=0)))\n n = y.shape[0]\n plt.fill_between(x, m - s / np.sqrt(n), m + s / np.sqrt(n), color=lightcolors[i % len(lightcolors)], alpha=0.5)\n plt.plot(x, m, color=darkcolors[i % len(darkcolors)], label=label, linewidth=2)\n\n if timesteps:\n formatter = FuncFormatter(millions)\n plt.xlabel('Number of Timesteps')\n fig.axes[0].xaxis.set_major_formatter(formatter)\n else:\n plt.xlabel('Number of Episodes')\n plt.ylabel('Rewards')\n\n plt.title(title, **fontstyle)\n plt.ylim(y_limits)\n\n plt.legend(framealpha=0.8, frameon=True, labelspacing=0.01, loc='lower right', fontsize=16)\n\n if not no_display:\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Plot trained agent\")\n parser.add_argument('-i', '--input-dir', help='folder with the plots as npz files', type=str, required=True)\n parser.add_argument('-t', '--title', help='Plot title', type=str, default='Learning Curve')\n parser.add_argument('--episode_window', type=int, default=40,\n help='Episode window for moving average plot (default: 40)')\n parser.add_argument('--shape-reward', action='store_true', default=False,\n help='Change the y_limit to correspond shaped reward bounds')\n parser.add_argument('--y-lim', nargs=2, type=float, default=[-1, -1], help=\"limits for the y axis\")\n parser.add_argument('--truncate-x', type=int, default=-1,\n help=\"Truncate the experiments after n ticks on the x-axis (default: -1, no truncation)\")\n parser.add_argument('--timesteps', action='store_true', default=False,\n help='Plot timesteps instead of episodes')\n parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plot')\n args = parser.parse_args()\n\n y_limits = args.y_lim\n if y_limits[0] == y_limits[1]:\n if args.shape_reward:\n y_limits = Y_LIM_SHAPED_REWARD\n else:\n y_limits = Y_LIM_SPARSE_REWARD\n print(\"Using default limits:\", y_limits)\n\n plots = [f for f in os.listdir(args.input_dir) if f.endswith('.npz')]\n plots.sort()\n\n if len(plots) == 0:\n printRed(\"No npz files found in {}\".format(args.input_dir))\n exit(-1)\n\n comparePlots(args.input_dir, plots, title=args.title, y_limits=y_limits, no_display=args.no_display,\n timesteps=args.timesteps, truncate_x=args.truncate_x)\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.title",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.std",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
maobubu/stock-prediction | [
"b2442ccb027c25809a33a610f010cdec077bf61a"
] | [
"stuff/preprocess_bilstm.py"
] | [
"import json\nimport pandas as pd\nimport re, os, glob\nimport numpy as np\nfrom collections import defaultdict\nimport nltk\nimport string\nfrom gensim.models import Phrases\nfrom gensim.utils import SaveLoad\nfrom gensim.models.phrases import Phraser\nfrom nltk.corpus import stopwords # Import the stop word list\n#from sklearn.model_selection import train_test_split\n#from sklearn.feature_extraction.text import CountVectorizer\nfrom datetime import datetime\nfrom datetime import timedelta\nimport timeit\nimport sys\nfrom tqdm import tqdm\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import wordnet as wn\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\n#from nltk import pos_tag, word_tokenize\nfrom nltk.tag import PerceptronTagger\n# Pywsd's Lemmatizer.\nporter = PorterStemmer()\nwnl = WordNetLemmatizer()\ntagger = PerceptronTagger()\npos_tag = tagger.tag\ntokenizer = RegexpTokenizer(r'\\w+')\n\n\ndef lemmatize(ambiguous_word, pos=None, neverstem=True, \n lemmatizer=wnl, stemmer=porter):\n \"\"\"\n Tries to convert a surface word into lemma, and if lemmatize word is not in\n wordnet then try and convert surface word into its stem.\n This is to handle the case where users input a surface word as an ambiguous \n word and the surface word is a not a lemma.\n \"\"\"\n if pos:\n lemma = lemmatizer.lemmatize(ambiguous_word, pos=pos)\n else:\n lemma = lemmatizer.lemmatize(ambiguous_word)\n stem = stemmer.stem(ambiguous_word)\n # Ensure that ambiguous word is a lemma.\n if not wn.synsets(lemma):\n if neverstem:\n return ambiguous_word\n if not wn.synsets(stem):\n return ambiguous_word\n else:\n return stem\n else:\n return lemma\n\ndef penn2morphy(penntag, returnNone=False):\n morphy_tag = {'NN':wn.NOUN, 'JJ':wn.ADJ,\n 'VB':wn.VERB, 'RB':wn.ADV}\n try:\n return morphy_tag[penntag[:2]]\n except:\n return None if returnNone else ''\n\ndef word_tokenize(text,tokenize=tokenizer):\n return tokenize.tokenize(text.lower())#doesn't remove stopwords\n #return [w for w in tokenize.tokenize(text.lower()) if not w in stopwords.words(\"english\")]\n\ndef lemmatize_sentence(sentence, neverstem=False, keepWordPOS=False, \n tokenizer=word_tokenize, postagger=pos_tag, \n lemmatizer=wnl, stemmer=porter):\n words, lemmas, poss = [], [], []\n for word, pos in postagger(sentence):#change tokenizer(sentence) to sentence\n pos = penn2morphy(pos)\n lemmas.append(lemmatize(word.lower(), pos, neverstem,\n lemmatizer, stemmer))\n poss.append(pos)\n words.append(word)\n if keepWordPOS:\n return words, lemmas, [None if i == '' else i for i in poss]\n return lemmas\n\ndef Ding_abstract(label,bigram,trigram,types='title'):\n start = timeit.default_timer()\n print('start processing Ding_abstract data')\n article = defaultdict(list)\n with open('/home/jialong/Documents/phrase_embedding/j_news.json', \"r\") as data: # title+ abstract + article\n title = pd.DataFrame(json.loads(line) for line in data).set_index('date')\n title=title.replace(['UPDATE\\s\\d-', \"'s\"], '', regex=True)\n title=title.replace(['\\d+\\S\\d+','\\d+'], 'xxx', regex=True)\n title[types] = title[types].str.replace('[{}]'.format(string.punctuation), ' ')\n title = title.drop_duplicates(subset=[types], keep='first')\n for j in label.index:\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')\n article[j].extend(set(title.loc[day, types].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n train_ding = split(article, label, bigram, trigram)\n length = train_ding.shape[0]\n train = train_ding.iloc[0:int(length * 0.8), :]\n validate = train_ding.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_ding.iloc[int(length * 0.9):-1, :]\n stop = timeit.default_timer()\n print(\"run time for ding:\", stop - start)\n #os.chdir('/Users/maobu/Dropbox/stock/data/ding/')\n os.chdir('/home/jialong/Documents/phrase_embedding/data/yunke_'+types+'/')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\ndef Ding(label, bigram, trigram,types='title'):\n start = timeit.default_timer()\n print('start processing Ding data')\n article = defaultdict(list)\n title = pd.read_table('reuters_news_title.txt', names=[\"Date\", 'title']).set_index('Date')\n #tt=pd.read_table('bloomberg_news_title.txt',names = [\"Date\", 'title']).set_index('Date')\n #title=title.append(tt)\n title=title.replace(['UPDATE\\s\\d-', \"'s\"], '', regex=True)\n title=title.replace(['\\d+\\S\\d+','\\d+'], 'xxx', regex=True)\n #title['title'] = title['title'].str.replace('[{}]'.format(string.digits), '_NUM_ ')\n title[types] = title[types].str.replace('[{}]'.format(string.punctuation), ' ')\n title = title.drop_duplicates(subset=[types], keep='first')\n for j in label.index:\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')\n article[j].extend(set(title.loc[day, types].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n train_ding = split(article, label, bigram, trigram)\n length = train_ding.shape[0]\n train = train_ding.iloc[0:int(length * 0.8), :]\n validate = train_ding.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_ding.iloc[int(length * 0.9):-1, :]\n stop = timeit.default_timer()\n print(\"run time for ding:\", stop - start)\n os.chdir('/Users/maobu/Dropbox/stock/data/ding/')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\n\ndef convert(reviews, bigram, trigram, remove_stopwords=True):\n #letters_only = re.sub(\"[^a-zA-Z0-9]\", \" \", str(reviews))\n #words = list(filter(None, letters_only.lower().split()))\n words= word_tokenize(reviews)#tokenize and remove punctuation\n if remove_stopwords:#remove stopwords\n words = [w for w in words if not w in stopwords.words(\"english\")]\n words = trigram[bigram[words]]#to phrase\n words= lemmatize_sentence(words)#lemma\n return \" \".join(words) + ' .'\n\n\ndef split(data, label, bigram, trigram):\n data_clean = []\n lab = []\n date = []\n for key, value in data.items():\n for j in set(value):\n try:\n lab.append(label[key])\n data_clean.append(convert(j, bigram, trigram, True))\n except (KeyError, TypeError) as e:\n continue\n print(len(data_clean))\n ll = pd.DataFrame({'label': lab}, dtype='int32')\n d = pd.DataFrame({'title': data_clean}) # put the convert words into a new Dataframe\n final = pd.merge(ll, d, left_index=True, right_index=True) # merge two list\n return final\n\n\ndef maobu(label, d, article, abstract, days, add=False):\n for j in label.index:\n # for i in range(1, days + 1):\n try:\n day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=days)).strftime('%Y-%m-%d')\n article[j].extend(set(d.loc[day, \"title\"].values))\n if add:\n abstract[j].extend(set(d.loc[day, \"abstract\"].values))\n except (AttributeError, KeyError, TypeError) as e:\n continue\n\n\ndef main():\n arg1 = sys.argv[1]\n one_train, abstract_train, seven_train, month_train = defaultdict(list), defaultdict(list), defaultdict(\n list), defaultdict(list)\n one_test, seven_test, month_test = defaultdict(list), defaultdict(list), defaultdict(list)\n # nltk.download('stopwords')\n print(\"start pre-processing the data\")\n bigram = SaveLoad.load(\"data/phrase_xxx/big_phrase.pickle\")\n trigram = SaveLoad.load(\"data/phrase_xxx/trig_phrase.pickle\")\n label_one = pd.read_pickle(\"data/label_one_new.pickle\")\n label_seven = pd.read_pickle(\"data/label_seven.pickle\")\n label_month = pd.read_pickle(\"data/label_month.pickle\")\n print(\"starting the training selecting phase\")\n Ding(label_one, bigram, trigram,types=arg1)\n #Ding_abstract(label_one, bigram, trigram,types=str(arg1))\n '''os.chdir('/home/huicheng/PycharmProjects/stock/pickle')\n subfolder_list = glob.glob('*.pickle')\n pbar = tqdm(total=len(subfolder_list))\n for i, file in enumerate(glob.glob(\"*.pickle\")):\n D = pd.read_pickle(file)\n pbar.set_description('processing number:{} name:{}'.format(i, file))\n pbar.update(1)\n maobu(label_one, D, one_train, abstract_train, 1, add=False) # add abstract or not\n # maobu(label_seven, D, seven_train, 7)\n # maobu(label_month, D, month_train, 30)\n pbar.close()\n start = timeit.default_timer()\n train_one = split(one_train, label_one, bigram, trigram)\n length = train_one.shape[0]\n train = train_one.iloc[0:int(length * 0.8), :]\n validate = train_one.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_one.iloc[int(length * 0.9):-1, :]\n # train_seven = split(seven_train, label_seven)\n # train_month = split(month_train, label_month)\n stop = timeit.default_timer()\n print(\"run time for training:\", stop - start)\n os.chdir('/home/huicheng/PycharmProjects/stock/data/our')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n\n # TODO split the abstract\n train_abstract = split(abstract_train, label_one, bigram, trigram)\n length = train_abstract.shape[0]\n train = train_abstract.iloc[0:int(length * 0.8), :]\n validate = train_abstract.iloc[int(length * 0.8):int(length * 0.9), :]\n test = train_abstract.iloc[int(length * 0.9):-1, :]\n # train_seven = split(seven_train, label_seven)\n # train_month = split(month_train, label_month)\n stop = timeit.default_timer()\n print(\"run time for training2:\", stop - start)\n os.chdir('/home/huicheng/PycharmProjects/stock/data/our_abstract')\n train.to_csv(\"one_train.csv\", index=False, encoding='utf-8')\n df_train = np.split(train, [1], axis=1)\n df_train[1].to_csv('one_train_text.txt', header=None, index=None, encoding='utf-8')\n df_train[0].to_csv('one_train_label.txt', header=None, index=None, encoding='utf-8')\n validate.to_csv(\"one_validate.csv\", index=False, encoding='utf-8')\n df_validate = np.split(validate, [1], axis=1)\n df_validate[1].to_csv('one_validate_text.txt', header=None, index=None, encoding='utf-8')\n df_validate[0].to_csv('one_validate_label.txt', header=None, index=None, encoding='utf-8')\n test.to_csv(\"one_test.csv\", index=False, encoding='utf-8')\n df_test = np.split(test, [1], axis=1)\n df_test[1].to_csv('one_test_text.txt', header=None, index=None, encoding='utf-8')\n df_test[0].to_csv('one_test_label.txt', header=None, index=None, encoding='utf-8')\n '''\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_table",
"pandas.read_pickle",
"pandas.DataFrame",
"pandas.merge",
"numpy.split"
]
] |
zye1996/3DSSD | [
"036983e282cd13e6a5bf0b51ff6ad31639a75b07"
] | [
"lib/builder/postprocessor.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\nfrom lib.core.config import cfg\nfrom lib.utils.anchors_util import project_to_bev\nfrom lib.utils.box_3d_utils import box_3d_to_anchor\n\nimport lib.dataset.maps_dict as maps_dict\n\nclass PostProcessor:\n def __init__(self, stage, cls_num):\n if stage == 0:\n self.postprocessor_cfg = cfg.MODEL.FIRST_STAGE\n elif stage == 1:\n self.postprocessor_cfg = cfg.MODEL.SECOND_STAGE\n else: raise Exception('Not Implementation Error')\n\n self.max_output_size = self.postprocessor_cfg.MAX_OUTPUT_NUM\n self.nms_threshold = self.postprocessor_cfg.NMS_THRESH\n\n self.cls_num = cls_num\n \n \n def class_unaware_format(self, pred_anchors_3d, pred_score):\n \"\"\" (for rpn propose)\n Change prediction format from class-aware-format to class-ignorance-format\n pred_anchors_3d: [bs, points_num, 1/cls_num, 7]\n pred_score: [bs, points_num, cls_num]\n\n return: pred_anchors_3d: [bs, points_num, 1, 7]\n pred_score: [bs, points_num, 1]\n \"\"\" \n unaware_pred_score = tf.reduce_max(pred_score, axis=-1, keepdims=True)\n cls_num = pred_anchors_3d.get_shape().as_list()[2]\n if cls_num == 1:\n return pred_anchors_3d, unaware_pred_score\n\n # class-aware in boundingbox prediction\n pred_cls = tf.argmax(pred_score, axis=-1)\n pred_cls_onehot = tf.cast(tf.one_hot(pred_cls, depth=cls_num, on_value=1, off_value=0, axis=-1), tf.float32)\n # bs, pts_num, cls_num, 7\n unaware_pred_anchors_3d = pred_anchors_3d * tf.expand_dims(pred_cls_onehot, axis=-1)\n unaware_pred_anchors_3d = tf.reduce_sum(unaware_pred_anchors_3d, axis=2, keepdims=True)\n return unaware_pred_anchors_3d, unaware_pred_score\n\n \n\n\n def forward(self, pred_anchors_3d, pred_score, output_dict, pred_attribute=None, pred_velocity=None):\n \"\"\"\n pred_anchors_3d: [bs, points_num, 1/cls_num, 7]\n pred_score: [bs, points_num, cls_num]\n pred_attribute: [bs, points_num, 1/cls_num, 8]\n pred_velocity: [bs, points_num, 1/cls_num, 2]\n \"\"\"\n cls_num = pred_score.get_shape().as_list()[-1] \n if cls_num != self.cls_num: # format predictions to class-unaware predictions\n assert pred_attribute == None and pred_velocity == None, 'Not support the predictions of attribute and velocity in RPN phase'\n pred_anchors_3d, pred_score = self.class_unaware_format(pred_anchors_3d, pred_score)\n\n pred_anchors_3d_list = tf.unstack(pred_anchors_3d, axis=0)\n pred_scores_list = tf.unstack(pred_score, axis=0)\n\n pred_3d_bbox_list = []\n pred_3d_cls_score_list = []\n pred_3d_cls_cat_list = []\n pred_attribute_list = []\n pred_velocity_list = []\n for batch_idx, pred_anchors_3d, pred_scores in zip(range(len(pred_anchors_3d_list)), pred_anchors_3d_list, pred_scores_list):\n cur_pred_3d_bbox_list = []\n cur_pred_3d_cls_score_list = []\n cur_pred_3d_cls_cat_list = []\n cur_pred_attribute_list = []\n cur_pred_velocity_list = []\n\n for i in range(self.cls_num):\n reg_i = min(i, pred_anchors_3d.get_shape().as_list()[1] - 1)\n cur_pred_anchors_3d = pred_anchors_3d[:, reg_i, :] \n\n cur_pred_anchors = box_3d_to_anchor(cur_pred_anchors_3d) \n cur_pred_anchors_bev = project_to_bev(cur_pred_anchors) # [-1, 4]\n\n cur_cls_score = pred_scores[:, i]\n nms_index = tf.image.non_max_suppression(cur_pred_anchors_bev, cur_cls_score, max_output_size=self.max_output_size, iou_threshold=self.nms_threshold)\n \n cur_pred_3d_bbox_list.append(tf.gather(cur_pred_anchors_3d, nms_index)) \n cur_pred_3d_cls_score_list.append(tf.gather(cur_cls_score, nms_index))\n cur_pred_3d_cls_cat_list.append(tf.cast(tf.ones_like(nms_index), tf.int32) * i)\n\n if pred_attribute is not None:\n cur_pred_attribute_list.append(tf.gather(pred_attribute[batch_idx, :, reg_i, :], nms_index))\n if pred_velocity is not None:\n cur_pred_velocity_list.append(tf.gather(pred_velocity[batch_idx, :, reg_i, :], nms_index))\n\n cur_pred_3d_bbox_list = tf.concat(cur_pred_3d_bbox_list, axis=0)\n cur_pred_3d_cls_score_list = tf.concat(cur_pred_3d_cls_score_list, axis=0)\n cur_pred_3d_cls_cat_list = tf.concat(cur_pred_3d_cls_cat_list, axis=0)\n\n pred_3d_bbox_list.append(cur_pred_3d_bbox_list)\n pred_3d_cls_score_list.append(cur_pred_3d_cls_score_list)\n pred_3d_cls_cat_list.append(cur_pred_3d_cls_cat_list)\n\n if pred_attribute is not None:\n cur_pred_attribute_list = tf.concat(cur_pred_attribute_list, axis=0)\n pred_attribute_list.append(cur_pred_attribute_list)\n\n if pred_velocity is not None:\n cur_pred_velocity_list = tf.concat(cur_pred_velocity_list, axis=0)\n pred_velocity_list.append(cur_pred_velocity_list)\n\n pred_3d_bbox_list = tf.stack(pred_3d_bbox_list, axis=0)\n pred_3d_cls_score_list = tf.stack(pred_3d_cls_score_list, axis=0)\n pred_3d_cls_cat_list = tf.stack(pred_3d_cls_cat_list, axis=0)\n \n output_dict[maps_dict.PRED_3D_BBOX].append(pred_3d_bbox_list)\n output_dict[maps_dict.PRED_3D_SCORE].append(pred_3d_cls_score_list)\n output_dict[maps_dict.PRED_3D_CLS_CATEGORY].append(pred_3d_cls_cat_list)\n if pred_attribute is not None:\n output_dict[maps_dict.PRED_3D_ATTRIBUTE].append(tf.stack(pred_attribute_list, axis=0))\n if pred_velocity is not None:\n output_dict[maps_dict.PRED_3D_VELOCITY].append(tf.stack(pred_velocity_list, axis=0))\n\n return output_dict\n"
] | [
[
"tensorflow.stack",
"tensorflow.reduce_max",
"tensorflow.unstack",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.image.non_max_suppression",
"tensorflow.one_hot",
"tensorflow.argmax",
"tensorflow.concat",
"tensorflow.gather",
"tensorflow.reduce_sum"
]
] |
wix-playground/incubator-tvm | [
"99734d29b77ef88fe81b0fd0cb2b71db8dc2608e"
] | [
"tests/python/relay/test_op_level5.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level5 operator test cases.\n\"\"\"\nimport math\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import ctx_list\nimport topi.testing\n\ndef run_infer_type(expr):\n mod = relay.Module.from_expr(expr)\n mod = transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(expr, relay.Function) else entry.body\n\ndef test_resize_infer_type():\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))\n th, tw = tvm.var(\"th\"), tvm.var(\"tw\")\n z = relay.image.resize(x, (th, tw))\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, c, th, tw), \"int8\")\n\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), \"int8\"))\n z= relay.image.resize(x, (100, 200), \"NCHW\", \"bilinear\", True)\n assert \"size=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, c, 100, 200), \"int8\")\n\ndef test_resize():\n def verify_resize(dshape, scale, method, layout):\n if layout == \"NHWC\":\n size = (dshape[1] * scale, dshape[2] * scale)\n else:\n size = (dshape[2] * scale, dshape[3] * scale)\n\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n if method == \"bilinear\":\n ref_res = topi.testing.bilinear_resize_python(x_data, size, layout)\n else:\n ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n z = relay.image.resize(x, size, layout, method, True)\n assert \"size=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType(ref_res.shape, \"float32\")\n func = relay.Function([x], z)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4)\n for method in [\"bilinear\", \"nearest_neighbor\"]:\n for layout in [\"NHWC\", \"NCHW\"]:\n verify_resize((1, 4, 4, 4), 2, method, layout)\n\n\ndef test_multibox_prior():\n def get_ref_result(dshape, sizes=(1.0,),\n ratios=(1.0,), steps=(-1.0, -1.0),\n offsets=(0.5, 0.5), clip=True):\n in_height = dshape[2]\n in_width = dshape[3]\n num_sizes = len(sizes)\n num_ratios = len(ratios)\n size_ratio_concat = sizes + ratios\n steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height\n steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width\n offset_h = offsets[0]\n offset_w = offsets[1]\n\n oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)\n dtype = \"float32\"\n np_out = np.zeros(oshape).astype(dtype)\n\n for i in range(in_height):\n center_h = (i + offset_h) * steps_h\n for j in range(in_width):\n center_w = (j + offset_w) * steps_w\n for k in range(num_sizes + num_ratios - 1):\n w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0\n h = size_ratio_concat[k] / 2.0 if k < num_sizes else \\\n size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0\n count = i * in_width * (num_sizes + num_ratios - 1) + j * (num_sizes + num_ratios - 1) + k\n np_out[0][count][0] = center_w - w\n np_out[0][count][1] = center_h - h\n np_out[0][count][2] = center_w + w\n np_out[0][count][3] = center_h + h\n if clip:\n np_out = np.clip(np_out, 0, 1)\n\n return np_out\n\n def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,),\n ratios=(1.0,), steps=(-1.0, -1.0),\n offsets=(0.5, 0.5), clip=True, check_size=False,\n check_type_only=False):\n\n z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)\n zz = run_infer_type(z)\n if check_size:\n assert \"sizes=\" in z.astext()\n assert zz.checked_type == relay.TensorType(\n (1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4),\n \"float32\")\n\n if check_type_only:\n return\n\n data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n func = relay.Function([x], z)\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n sizes = (0.3, 1.5, 0.7)\n ratios = (1.3, 2.4)\n steps = (2.0, 1.5)\n offsets = (0.2, 0.3)\n dshape = (1, 3, 56, 56)\n ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets,\n check_size=True)\n y = relay.var(\"y\", relay.TensorType((tvm.var(\"n\"), 3, 56, 56), \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets,\n check_size=True, check_type_only=True)\n\n dshape = (1, 24, 32, 32)\n ref_res = get_ref_result(dshape, clip=False)\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, clip=False)\n y = relay.var(\"y\", relay.TensorType((tvm.var(\"n\"), 24, 32, 32), \"float32\"))\n verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)\n\n\ndef test_get_valid_counts():\n def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):\n dtype = \"float32\"\n batch_size, num_anchor, elem_length = dshape\n np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)\n np_out1 = np.zeros(shape=(batch_size,))\n np_out2 = np.zeros(shape=dshape).astype(dtype)\n for i in range(batch_size):\n np_out1[i] = 0\n inter_idx = 0\n for j in range(num_anchor):\n score = np_data[i, j, score_index]\n if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):\n for k in range(elem_length):\n np_out2[i, inter_idx, k] = np_data[i, j, k]\n np_out1[i] += 1\n inter_idx += 1\n if j >= np_out1[i]:\n for k in range(elem_length):\n np_out2[i, j, k] = -1.0\n\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index)\n assert \"score_threshold\" in z.astext()\n func = relay.Function([x], z.astuple())\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n if target == 'cuda':\n return\n intrp = relay.create_executor(\"debug\", ctx=ctx, target=target)\n out = intrp.evaluate(func)(np_data)\n tvm.testing.assert_allclose(out[0].asnumpy(), np_out1, rtol=1e-3, atol=1e-04)\n tvm.testing.assert_allclose(out[1].asnumpy(), np_out2, rtol=1e-3, atol=1e-04)\n\n verify_get_valid_counts((1, 2500, 6), 0, 0, 1)\n verify_get_valid_counts((1, 2500, 5), -1, -1, 0)\n verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)\n verify_get_valid_counts((16, 500, 5), 0.95, -1, 0)\n\n\ndef test_non_max_suppression():\n def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res,\n iou_threshold=0.5, force_suppress=False, top_k=-1,\n check_type_only=False):\n x0 = relay.var(\"x0\", relay.ty.TensorType(dshape, \"float32\"))\n x1 = relay.var(\"x1\", relay.ty.TensorType((dshape[0],), \"int32\"))\n z = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \\\n iou_threshold = iou_threshold, force_suppress = force_suppress, \\\n top_k = top_k, return_indices=False)\n z_indices = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \\\n iou_threshold = iou_threshold, force_suppress = force_suppress, \\\n top_k = top_k)\n assert \"iou_threshold\" in z.astext()\n assert \"iou_threshold\" in z_indices.astext()\n zz = run_infer_type(z)\n zz_indices = run_infer_type(z_indices)\n assert zz.checked_type == relay.ty.TensorType(dshape, \"float32\")\n assert zz_indices.checked_type == relay.ty.TensorType((dshape[0], dshape[1]), \"int32\")\n\n if check_type_only:\n return\n\n func = relay.Function([x0, x1], z)\n func = run_infer_type(func)\n func_indices = relay.Function([x0, x1], z_indices)\n func_indices = run_infer_type(func_indices)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x0_data, x1_data)\n op_indices_res1 = intrp1.evaluate(func_indices)(x0_data, x1_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n tvm.testing.assert_allclose(op_indices_res1.asnumpy(), ref_indices_res, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(x0_data, x1_data)\n op_indices_res2 = intrp2.evaluate(func_indices)(x0_data, x1_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n tvm.testing.assert_allclose(op_indices_res2.asnumpy(), ref_indices_res, rtol=1e-5)\n\n np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],\n [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],\n [1, 0.5, 100, 60, 70, 110]]]).astype(\"float32\")\n np_valid_count = np.array([4]).astype(\"int32\")\n np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],\n [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, -1, -1, -1]])\n num_anchors = 5\n\n dshape = (tvm.var(\"n\"), num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result,\n force_suppress=True, top_k=2, check_type_only=True)\n dshape = (1, num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result,\n force_suppress=True, top_k=2, check_type_only=False)\n\n np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],\n [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1]]])\n np_indices_result = np.array([[3, 0, 1, -1, -1]])\n dshape = (tvm.var(\"n\"), num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result,\n np_indices_result, check_type_only=True)\n dshape = (1, num_anchors, 6)\n verify_nms(np_data, np_valid_count, dshape, np_result,\n np_indices_result, top_k=3)\n\n\ndef test_multibox_transform_loc():\n def test_default_value():\n num_anchors = 3\n num_classes = 3\n\n np_cls_prob = np.array(\n [[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45],\n [0.7, 0.1, 0.2]]]).astype(\"float32\")\n np_loc_preds = np.array(\n [[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4,\n -0.8]]).astype(\"float32\")\n np_anchors = np.array(\n [[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2],\n [1.2, 1.2, 1.5, 1.5]]]).astype(\"float32\")\n\n expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],\n [0, 0.44999999, 1, 1, 1, 1],\n [0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])\n\n\n cls_prob = relay.var(\n \"cls_prob\",\n relay.ty.TensorType((1, num_anchors, num_classes), \"float32\"))\n loc_pred = relay.var(\n \"loc_pred\", relay.ty.TensorType((1, num_anchors * 4), \"float32\"))\n anchors = relay.var(\n \"anchors\", relay.ty.TensorType((1, num_anchors, 4), \"float32\"))\n\n mtl = relay.vision.multibox_transform_loc(\n cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors)\n ret = run_infer_type(mtl.astuple())\n ref_type = relay.ty.TupleType(\n tvm.convert([\n relay.ty.TensorType((1, num_anchors, 6), \"float32\"),\n relay.ty.TensorType((1, ), \"int\")\n ]))\n\n assert ret.checked_type == ref_type\n\n nms = relay.vision.non_max_suppression(mtl[0], mtl[1], return_indices=False)\n func = relay.Function([cls_prob, loc_pred, anchors], nms)\n func = run_infer_type(func)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds,\n np_anchors)\n tvm.testing.assert_allclose(op_res1.asnumpy(), expected_np_out, rtol=1e-5)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_cls_prob, np_loc_preds,\n np_anchors)\n tvm.testing.assert_allclose(op_res2.asnumpy(), expected_np_out, rtol=1e-5)\n\n def test_threshold():\n num_anchors = 5\n num_classes = 5\n n = tvm.var(\"n\")\n cls_prob = relay.var(\n \"cls_prob\",\n relay.ty.TensorType((n, num_anchors, num_classes), \"float32\"))\n loc_pred = relay.var(\n \"loc_pred\", relay.ty.TensorType((n, num_anchors * 4), \"float32\"))\n anchors = relay.var(\n \"anchors\", relay.ty.TensorType((1, num_anchors, 4), \"float32\"))\n threshold = 0.02\n variances = (0.2, 0.2, 0.3, 0.3)\n\n ret = relay.vision.multibox_transform_loc(\n cls_prob=cls_prob,\n loc_pred=loc_pred,\n anchor=anchors,\n threshold=threshold,\n variances=variances)\n ret = run_infer_type(ret.astuple())\n ref_type = relay.ty.TupleType(\n tvm.convert([\n relay.ty.TensorType((n, num_anchors, 6), \"float32\"),\n relay.ty.TensorType((n, ), \"int\")\n ]))\n assert ret.checked_type == ref_type\n\n test_default_value()\n test_threshold()\n\n\ndef test_roi_align():\n def verify_roi_align(data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio):\n data = relay.var(\"data\", relay.ty.TensorType(data_shape, \"float32\"))\n rois = relay.var(\"rois\", relay.ty.TensorType(rois_shape, \"float32\"))\n z = relay.vision.roi_align(data, rois, pooled_size=(pooled_size, pooled_size),\n spatial_scale=spatial_scale, sample_ratio=sample_ratio,\n layout=\"NCHW\")\n zz = run_infer_type(z)\n batch, channel, in_size, _ = data_shape\n num_roi = rois_shape[0]\n assert zz.checked_type == relay.ty.TensorType(\n (num_roi, channel, pooled_size, pooled_size), \"float32\")\n\n func = relay.Function([data, rois], z)\n func = run_infer_type(func)\n np_data = np.random.uniform(size=data_shape).astype(\"float32\")\n np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size\n np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi)\n ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale,\n sample_ratio=sample_ratio)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)\n\n verify_roi_align((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1)\n verify_roi_align((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2)\n\n\ndef test_roi_pool():\n def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):\n data = relay.var(\"data\", relay.ty.TensorType(data_shape, \"float32\"))\n rois = relay.var(\"rois\", relay.ty.TensorType(rois_shape, \"float32\"))\n z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size),\n spatial_scale=spatial_scale, layout=\"NCHW\")\n zz = run_infer_type(z)\n batch, channel, in_size, _ = data_shape\n num_roi = rois_shape[0]\n assert zz.checked_type == relay.ty.TensorType(\n (num_roi, channel, pooled_size, pooled_size), \"float32\")\n\n func = relay.Function([data, rois], z)\n func = run_infer_type(func)\n np_data = np.random.uniform(size=data_shape).astype(\"float32\")\n np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size\n np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')\n ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,\n spatial_scale=spatial_scale)\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_data, np_rois)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)\n\n verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0)\n verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5)\n\n\ndef test_proposal():\n def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):\n cls_prob = relay.var(\"cls_prob\", relay.ty.TensorType(np_cls_prob.shape, \"float32\"))\n bbox_pred = relay.var(\"bbox_pred\", relay.ty.TensorType(np_bbox_pred.shape, \"float32\"))\n im_info = relay.var(\"im_info\", relay.ty.TensorType(np_im_info.shape, \"float32\"))\n z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.ty.TensorType(np_out.shape, \"float32\")\n\n func = relay.Function([cls_prob, bbox_pred, im_info], z)\n func = run_infer_type(func)\n for target in ['llvm', 'cuda']:\n if not tvm.module.enabled(target):\n print(\"Skip test because %s is not enabled.\" % target)\n continue\n ctx = tvm.context(target, 0)\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info)\n tvm.testing.assert_allclose(op_res1.asnumpy(), np_out, rtol=1e-4)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res2 = intrp2.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info)\n tvm.testing.assert_allclose(op_res2.asnumpy(), np_out, rtol=1e-4)\n\n attrs = {\n 'scales': (0.5,),\n 'ratios': (0.5,),\n 'feature_stride': 16,\n 'iou_loss': False,\n 'rpn_min_size': 16,\n 'threshold': 0.7,\n 'rpn_pre_nms_top_n': 200,\n 'rpn_post_nms_top_n': 4,\n }\n\n np_cls_prob = np.array([[\n [[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],\n [[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]]\n ]], dtype='float32')\n np_bbox_pred = np.array([[\n [[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],\n [[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],\n [[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],\n [[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],\n ]], dtype='float32')\n np_im_info = np.array([[48., 48., 1.]], dtype='float32')\n np_out = np.array([\n [0., 0., 2.8451548,28.38012, 18.154846],\n [0., 0., 15.354933, 41.96971, 41.245064],\n [0., 18.019852, 1.0538368, 51.98015, 25.946163],\n [0., 27.320923, -1.266357, 55., 24.666357]\n ], dtype='float32')\n\n\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n np_out = np.array([\n [ 0., -5.25, -2.5, 21.75, 19.],\n [ 0., 11.25, -2., 37.25, 18.5],\n [ 0., 26.849998, -2.3000002, 53.45, 18.6],\n [ 0., -4.95, 13.799999, 22.25, 35.5]\n ], dtype='float32')\n attrs['iou_loss'] = True\n verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)\n\n\ndef test_yolo_reorg_infer_shape():\n def verify_yolo_reorg(shape, stride, out_shape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.vision.yolo_reorg(x, stride=stride)\n zz = run_infer_type(z)\n assert \"stride=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(out_shape, \"float32\")\n\n n, c, h, w = tvm.var(\"n\"), tvm.var(\"c\"), tvm.var(\"h\"), tvm.var(\"w\")\n idxd = tvm.indexdiv\n verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2))\n verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2)))\n\ndef test_yolo_reorg():\n def verify_yolo_reorg(shape, stride):\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n ref_res = topi.testing.reorg_python(x_data, stride)\n\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.vision.yolo_reorg(x, stride=stride)\n zz = run_infer_type(z)\n assert \"stride=\" in z.astext()\n assert zz.checked_type == relay.ty.TensorType(ref_res.shape, \"float32\")\n\n func = relay.Function([x], z)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp = relay.create_executor(kind, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n verify_yolo_reorg((1, 100, 20, 20), 10)\n verify_yolo_reorg((1, 4, 6, 6), 2)\n\n\ndef test_deformable_conv2d():\n def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, groups):\n data_shape = (batch, in_channel, size, size)\n data = relay.var(\"data\", shape=data_shape)\n offset = relay.var(\"offset\")\n kernel = relay.var(\"kernel\")\n kernel_size = (3, 3)\n y = relay.nn.deformable_conv2d(data, offset, kernel,\n strides=(1, 1),\n padding=(1, 1),\n dilation=(1, 1),\n kernel_size=kernel_size,\n deformable_groups=deformable_groups,\n groups=groups,\n channels=out_channel)\n weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])\n out_shape = (batch, out_channel, size, size)\n offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3])\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(out_shape)\n assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type\n assert yy.args[2].checked_type == relay.TensorType(weight_shape)\n\n test_infer_type(1, 4, 16, 4, 4, 1)\n test_infer_type(2, 4, 16, 4, 1, 2)\n\n\n def test_run(batch, in_channel, size, out_channel, deformable_groups, groups):\n kernel_size = (3, 3)\n data_shape = (batch, in_channel, size, size)\n offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, size, size)\n kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])\n dtype = 'float32'\n data = relay.var(\"data\", shape=data_shape, dtype=dtype)\n offset = relay.var(\"offset\")\n kernel = relay.var(\"kernel\")\n y = relay.nn.deformable_conv2d(data, offset, kernel,\n strides=(1, 1),\n padding=(1, 1),\n dilation=(1, 1),\n kernel_size=kernel_size,\n deformable_groups=deformable_groups,\n groups=groups,\n channels=out_channel)\n func = relay.Function([data, offset, kernel], y)\n data = np.random.uniform(size=data_shape).astype(dtype)\n offset = np.random.uniform(size=offset_shape).astype(dtype)\n kernel = np.random.uniform(size=kernel_shape).astype(dtype)\n ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)\n\n for target, ctx in ctx_list():\n for kind in [\"graph\", \"debug\"]:\n intrp1 = relay.create_executor(kind, ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(data, offset, kernel)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)\n test_run(1, 4, 16, 4, 1, 1)\n test_run(2, 4, 16, 4, 4, 1)\n\n\nif __name__ == \"__main__\":\n test_resize_infer_type()\n test_resize()\n test_multibox_prior()\n test_multibox_transform_loc()\n test_get_valid_counts()\n test_roi_align()\n test_roi_pool()\n test_proposal()\n test_yolo_reorg_infer_shape()\n test_yolo_reorg()\n test_non_max_suppression()\n test_deformable_conv2d()\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.clip",
"numpy.array",
"numpy.random.randint"
]
] |
gauthamkrishna-g/Gest-Face | [
"f20def897d8ce2b10c6312b02cb57cb7241a9d93"
] | [
"other/9_morphological.py"
] | [
"import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n lower_red = np.array([30, 150, 50])\r\n upper_red = np.array([255, 255, 180])\r\n \r\n mask = cv2.inRange(hsv, lower_red, upper_red)\r\n res = cv2.bitwise_and(frame, frame, mask= mask)\r\n\r\n kernel = np.ones((5, 5), np.uint8)\r\n erosion = cv2.erode(mask, kernel, iterations = 1)\r\n dilation = cv2.dilate(mask, kernel, iterations = 1)\r\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\r\n\r\n\r\n cv2.imshow('frame', frame)\r\n cv2.imshow('mask', mask)\r\n #cv2.imshow('erosion',erosion)\r\n #cv2.imshow('dilation',dilation)\r\n cv2.imshow('opening', opening)\r\n cv2.imshow('closing', closing)\r\n \r\n if cv2.waitKey(5) & 0xFF == 27:\r\n break\r\n \r\ncv2.destroyAllWindows()\r\ncap.release() \r\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] |
Nobuo-Namura/EPBII | [
"ad50b7c4e291ea53a9b3924f24cb84aed4d347b2"
] | [
"indicator.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nindicator.py\nCopyright (c) 2020 Nobuo Namura\nThis code is released under the MIT License.\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial import distance\n\n#======================================================================\ndef rmse_history(x_rmse, problem, func, nfg=0):\n rmse = 0.0\n for x in x_rmse:\n rmse += (problem(x)[nfg] - func(x))**2.0\n rmse = np.sqrt(rmse/float(len(x_rmse[:,0])))\n return rmse\n\n#======================================================================\ndef igd_history(f, igd_ref):\n dist = distance.cdist(f, igd_ref)\n igd = np.mean(np.min(dist,axis=0))\n\n return igd"
] | [
[
"scipy.spatial.distance.cdist",
"numpy.min"
]
] |
wchen459/hgan_jmd_2019 | [
"ca8e58b4525eb59f51ec699f1e874eca455a6bac"
] | [
"SC/feasibility.py"
] | [
"import numpy as np\nfrom SC.build_data import check_feasibility\n\n\nif __name__ == '__main__':\n \n X = np.load('../results/SC/SC.npy')\n X0 = X[:,:64]\n X1 = X[:,64:]\n for i in range(X.shape[0]):\n is_feasibe = check_feasibility(X0[i], X1[i])\n print('{}: {}'.format(i, is_feasibe))\n if not is_feasibe:\n break"
] | [
[
"numpy.load"
]
] |
lengyuner/hyperpose4fly | [
"c9866bce1a0109e1b9c727ca550b5a380eb3ee17"
] | [
"hyperpose/Model/openpose/model/mbv2_th_openpose.py"
] | [
"import tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer import layers\nfrom tensorlayer.models import Model\nfrom tensorlayer.layers import BatchNorm2d, Conv2d, DepthwiseConv2d, LayerList, MaxPool2d\nfrom ..utils import tf_repeat\nfrom ..define import CocoPart,CocoLimb\n\ninitial_w=tl.initializers.random_normal(stddev=0.01)\ninitial_b=tl.initializers.constant(value=0.0)\n\nclass MobilenetThinOpenpose(Model):\n def __init__(self,parts=CocoPart,limbs=CocoLimb,colors=None,n_pos=19,n_limbs=19,num_channels=128,\\\n hin=368,win=368,hout=46,wout=46,backbone=None,pretraining=False,data_format=\"channels_first\"):\n super().__init__()\n self.num_channels=num_channels\n self.parts=parts\n self.limbs=limbs\n self.n_pos=n_pos\n self.colors=colors\n self.n_limbs=n_limbs\n self.n_confmaps=n_pos\n self.n_pafmaps=2*n_limbs\n self.hin=hin\n self.win=win\n self.hout=hout\n self.wout=wout\n self.data_format=data_format\n if(self.data_format==\"channels_first\"):\n self.concat_dim=1\n else:\n self.concat_dim=-1\n if(backbone==None):\n self.backbone=self.Mobilenetv2_variant(data_format=self.data_format)\n else:\n self.backbone=backbone(scale_size=8,pretraining=pretraining,data_format=self.data_format)\n self.init_stage=self.Init_stage(n_confmaps=self.n_confmaps,in_channels=self.backbone.out_channels,data_format=self.data_format)\n self.refinement_stage_1=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_2=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_3=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_4=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n self.refinement_stage_5=self.Refinement_stage(n_confmaps=self.n_confmaps,n_pafmaps=self.n_pafmaps,in_channels=self.backbone.out_channels+self.n_confmaps+self.n_pafmaps,data_format=self.data_format)\n \n @tf.function\n def forward(self,x,is_train=False,stage_num=5,domainadapt=False):\n conf_list=[]\n paf_list=[] \n backbone_features=self.backbone.forward(x)\n conf_map,paf_map=self.init_stage.forward(backbone_features)\n conf_list.append(conf_map)\n paf_list.append(paf_map)\n for refinement_stage_idx in range(1,stage_num+1):\n x=tf.concat([backbone_features,conf_list[-1],paf_list[-1]],self.concat_dim)\n conf_map,paf_map=eval(f\"self.refinement_stage_{refinement_stage_idx}.forward(x)\")\n conf_list.append(conf_map)\n paf_list.append(paf_map)\n if(domainadapt):\n return conf_list[-1],paf_list[-1],conf_list,paf_list,backbone_features\n if(is_train):\n return conf_list[-1],paf_list[-1],conf_list,paf_list\n else:\n return conf_list[-1],paf_list[-1]\n \n @tf.function(experimental_relax_shapes=True)\n def infer(self,x):\n conf_map,paf_map=self.forward(x,is_train=False)\n return conf_map,paf_map\n \n def cal_loss(self,gt_conf,gt_paf,mask,stage_confs,stage_pafs):\n stage_losses=[]\n batch_size=gt_conf.shape[0]\n mask_conf=tf_repeat(mask, [1,self.n_confmaps ,1,1])\n mask_paf=tf_repeat(mask,[1,self.n_pafmaps ,1,1])\n loss_confs,loss_pafs=[],[]\n for stage_conf,stage_paf in zip(stage_confs,stage_pafs):\n loss_conf=tf.nn.l2_loss((gt_conf-stage_conf)*mask_conf)\n loss_paf=tf.nn.l2_loss((gt_paf-stage_paf)*mask_paf)\n stage_losses.append(loss_conf)\n stage_losses.append(loss_paf)\n loss_confs.append(loss_conf)\n loss_pafs.append(loss_paf)\n pd_loss=tf.reduce_mean(stage_losses)/batch_size\n return pd_loss,loss_confs,loss_pafs\n\n class Mobilenetv2_variant(Model):\n def __init__(self,data_format=\"channels_first\"):\n super().__init__()\n self.data_format=data_format\n if(self.data_format==\"channels_first\"):\n self.concat_dim=1\n else:\n self.concat_dim=-1\n self.out_channels=1152\n self.scale_size=8\n self.convblock_0=conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_1=separable_block(n_filter=64,in_channels=32,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_2=separable_block(n_filter=128,in_channels=64,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_3=separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_4=separable_block(n_filter=256,in_channels=128,filter_size=(3,3),strides=(2,2),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_5=separable_block(n_filter=256,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_6=separable_block(n_filter=512,in_channels=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_7=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_8=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_9=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_10=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.convblock_11=separable_block(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format)\n self.maxpool=MaxPool2d(filter_size=(2,2),strides=(2,2),padding=\"SAME\",data_format=self.data_format)\n \n \n def forward(self,x):\n concat_list=[]\n x=self.convblock_0.forward(x)\n x=self.convblock_1.forward(x)\n x=self.convblock_2.forward(x)\n x=self.convblock_3.forward(x)\n concat_list.append(self.maxpool.forward(x))\n x=self.convblock_4.forward(x)\n x=self.convblock_5.forward(x)\n x=self.convblock_6.forward(x)\n x=self.convblock_7.forward(x)\n concat_list.append(x)\n x=self.convblock_8.forward(x)\n x=self.convblock_9.forward(x)\n x=self.convblock_10.forward(x)\n x=self.convblock_11.forward(x)\n concat_list.append(x)\n x=tf.concat(concat_list,self.concat_dim)\n return x\n \n class Init_stage(Model):\n def __init__(self,n_confmaps=19,n_pafmaps=38,in_channels=1152,data_format=\"channels_first\"):\n super().__init__()\n self.n_confmaps=n_confmaps\n self.n_pafmaps=n_pafmaps\n self.in_channels=in_channels\n self.data_format=data_format\n #conf block\n self.conf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=512,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_confmaps,in_channels=512,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format)\n ])\n #paf block\n self.paf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=512,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_pafmaps,in_channels=512,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format)\n ])\n \n def forward(self,x):\n conf_map=self.conf_block.forward(x)\n paf_map=self.paf_block.forward(x)\n return conf_map,paf_map\n \n class Refinement_stage(Model):\n def __init__(self,n_confmaps=19,n_pafmaps=38,in_channels=19+38+1152,data_format=\"channels_first\"):\n super().__init__()\n self.n_confmaps=n_confmaps\n self.n_pafmaps=n_pafmaps\n self.in_channels=in_channels\n self.data_format=data_format\n #conf_block\n self.conf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_confmaps,in_channels=128,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format),\n ])\n #paf_block\n self.paf_block=LayerList([\n separable_block(n_filter=128,in_channels=self.in_channels,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=128,in_channels=128,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,data_format=self.data_format),\n separable_block(n_filter=self.n_pafmaps,in_channels=128,filter_size=(1,1),strides=(1,1),act=None,data_format=self.data_format),\n ])\n \n def forward(self,x):\n conf_map=self.conf_block.forward(x)\n paf_map=self.paf_block.forward(x)\n return conf_map,paf_map\n\ndef conv_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),act=tf.nn.relu,padding=\"SAME\",data_format=\"channels_first\"):\n layer_list=[]\n layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,act=act,\\\n W_init=initial_w,b_init=initial_b,data_format=data_format,padding=padding))\n layer_list.append(BatchNorm2d(num_features=n_filter,decay=0.999,is_train=True,act=act,data_format=data_format))\n return LayerList(layer_list)\n\ndef separable_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),act=tf.nn.relu,data_format=\"channels_first\"):\n layer_list=[]\n layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels,\n dilation_rate=dilation_rate,W_init=initial_w,b_init=None,data_format=data_format))\n layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=in_channels,data_format=data_format,is_train=True))\n layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1,1),strides=(1,1),in_channels=in_channels,W_init=initial_w,b_init=None,data_format=data_format))\n layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=n_filter,data_format=data_format,is_train=True))\n return layers.LayerList(layer_list)"
] | [
[
"tensorflow.function",
"tensorflow.reduce_mean",
"tensorflow.concat",
"tensorflow.nn.l2_loss"
]
] |
Basketkase/openpilot | [
"769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e"
] | [
"selfdrive/car/volkswagen/carstate.py"
] | [
"import numpy as np\nfrom cereal import car\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.interfaces import CarStateBase\nfrom opendbc.can.parser import CANParser\nfrom opendbc.can.can_define import CANDefine\nfrom selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, NetworkLocation, TransmissionType, GearShifter, BUTTON_STATES, CarControllerParams\n\nclass CarState(CarStateBase):\n def __init__(self, CP):\n super().__init__(CP)\n can_define = CANDefine(DBC_FILES.mqb)\n if CP.transmissionType == TransmissionType.automatic:\n self.shifter_values = can_define.dv[\"Getriebe_11\"][\"GE_Fahrstufe\"]\n elif CP.transmissionType == TransmissionType.direct:\n self.shifter_values = can_define.dv[\"EV_Gearshift\"][\"GearPosition\"]\n self.hca_status_values = can_define.dv[\"LH_EPS_03\"][\"EPS_HCA_Status\"]\n self.buttonStates = BUTTON_STATES.copy()\n\n def update(self, pt_cp, cam_cp, ext_cp, trans_type):\n ret = car.CarState.new_message()\n # Update vehicle speed and acceleration from ABS wheel speeds.\n ret.wheelSpeeds.fl = pt_cp.vl[\"ESP_19\"][\"ESP_VL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.fr = pt_cp.vl[\"ESP_19\"][\"ESP_VR_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rl = pt_cp.vl[\"ESP_19\"][\"ESP_HL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rr = pt_cp.vl[\"ESP_19\"][\"ESP_HR_Radgeschw_02\"] * CV.KPH_TO_MS\n\n ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr]))\n ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)\n\n ret.standstill = ret.vEgoRaw < 0.1\n\n # Update steering angle, rate, yaw rate, and driver input torque. VW send\n # the sign/direction in a separate signal so they must be recombined.\n ret.steeringAngleDeg = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Berechneter_LW\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_BLW\"])]\n ret.steeringRateDeg = pt_cp.vl[\"LWI_01\"][\"LWI_Lenkradw_Geschw\"] * (1, -1)[int(pt_cp.vl[\"LWI_01\"][\"LWI_VZ_Lenkradw_Geschw\"])]\n ret.steeringTorque = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Lenkmoment\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_Lenkmoment\"])]\n ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE\n ret.yawRate = pt_cp.vl[\"ESP_02\"][\"ESP_Gierrate\"] * (1, -1)[int(pt_cp.vl[\"ESP_02\"][\"ESP_VZ_Gierrate\"])] * CV.DEG_TO_RAD\n\n # Verify EPS readiness to accept steering commands\n hca_status = self.hca_status_values.get(pt_cp.vl[\"LH_EPS_03\"][\"EPS_HCA_Status\"])\n ret.steerError = hca_status in [\"DISABLED\", \"FAULT\"]\n ret.steerWarning = hca_status in [\"INITIALIZING\", \"REJECTED\"]\n\n # Update gas, brakes, and gearshift.\n ret.gas = pt_cp.vl[\"Motor_20\"][\"MO_Fahrpedalrohwert_01\"] / 100.0\n ret.gasPressed = ret.gas > 0\n ret.brake = pt_cp.vl[\"ESP_05\"][\"ESP_Bremsdruck\"] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects\n ret.brakePressed = bool(pt_cp.vl[\"ESP_05\"][\"ESP_Fahrer_bremst\"])\n\n # Update gear and/or clutch position data.\n if trans_type == TransmissionType.automatic:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"Getriebe_11\"][\"GE_Fahrstufe\"], None))\n elif trans_type == TransmissionType.direct:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"EV_Gearshift\"][\"GearPosition\"], None))\n elif trans_type == TransmissionType.manual:\n ret.clutchPressed = not pt_cp.vl[\"Motor_14\"][\"MO_Kuppl_schalter\"]\n if bool(pt_cp.vl[\"Gateway_72\"][\"BCM1_Rueckfahrlicht_Schalter\"]):\n ret.gearShifter = GearShifter.reverse\n else:\n ret.gearShifter = GearShifter.drive\n\n # Update door and trunk/hatch lid open status.\n ret.doorOpen = any([pt_cp.vl[\"Gateway_72\"][\"ZV_FT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_BT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HBFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HD_offen\"]])\n\n # Update seatbelt fastened status.\n ret.seatbeltUnlatched = pt_cp.vl[\"Airbag_02\"][\"AB_Gurtschloss_FA\"] != 3\n\n # Update driver preference for metric. VW stores many different unit\n # preferences, including separate units for for distance vs. speed.\n # We use the speed preference for OP.\n self.displayMetricUnits = not pt_cp.vl[\"Einheiten_01\"][\"KBI_MFA_v_Einheit_02\"]\n\n # Consume blind-spot monitoring info/warning LED states, if available.\n # Infostufe: BSM LED on, Warnung: BSM LED flashing\n if self.CP.enableBsm:\n ret.leftBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_li\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_li\"])\n ret.rightBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_re\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_re\"])\n\n # Consume factory LDW data relevant for factory SWA (Lane Change Assist)\n # and capture it for forwarding to the blind spot radar controller\n self.ldw_lane_warning_left = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_links\"])\n self.ldw_lane_warning_right = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_rechts\"])\n self.ldw_side_dlc_tlc = bool(cam_cp.vl[\"LDW_02\"][\"LDW_Seite_DLCTLC\"])\n self.ldw_dlc = cam_cp.vl[\"LDW_02\"][\"LDW_DLC\"]\n self.ldw_tlc = cam_cp.vl[\"LDW_02\"][\"LDW_TLC\"]\n\n # Stock FCW is considered active if the release bit for brake-jerk warning\n # is set. Stock AEB considered active if the partial braking or target\n # braking release bits are set.\n # Refer to VW Self Study Program 890253: Volkswagen Driver Assistance\n # Systems, chapter on Front Assist with Braking: Golf Family for all MQB\n ret.stockFcw = bool(ext_cp.vl[\"ACC_10\"][\"AWV2_Freigabe\"])\n ret.stockAeb = bool(ext_cp.vl[\"ACC_10\"][\"ANB_Teilbremsung_Freigabe\"]) or bool(ext_cp.vl[\"ACC_10\"][\"ANB_Zielbremsung_Freigabe\"])\n\n # Update ACC radar status.\n accStatus = pt_cp.vl[\"TSK_06\"][\"TSK_Status\"]\n if accStatus == 2:\n # ACC okay and enabled, but not currently engaged\n ret.cruiseState.available = True\n ret.cruiseState.enabled = False\n elif accStatus in [3, 4, 5]:\n # ACC okay and enabled, currently engaged and regulating speed (3) or engaged with driver accelerating (4) or overrun (5)\n ret.cruiseState.available = True\n ret.cruiseState.enabled = True\n else:\n # ACC okay but disabled (1), or a radar visibility or other fault/disruption (6 or 7)\n ret.cruiseState.available = False\n ret.cruiseState.enabled = False\n\n # Update ACC setpoint. When the setpoint is zero or there's an error, the\n # radar sends a set-speed of ~90.69 m/s / 203mph.\n ret.cruiseState.speed = ext_cp.vl[\"ACC_02\"][\"ACC_Wunschgeschw\"] * CV.KPH_TO_MS\n if ret.cruiseState.speed > 90:\n ret.cruiseState.speed = 0\n\n # Update control button states for turn signals and ACC controls.\n self.buttonStates[\"accelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Hoch\"])\n self.buttonStates[\"decelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Runter\"])\n self.buttonStates[\"cancel\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Abbrechen\"])\n self.buttonStates[\"setCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Setzen\"])\n self.buttonStates[\"resumeCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Wiederaufnahme\"])\n self.buttonStates[\"gapAdjustCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Verstellung_Zeitluecke\"])\n ret.leftBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Left\"])\n ret.rightBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Right\"])\n\n # Read ACC hardware button type configuration info that has to pass thru\n # to the radar. Ends up being different for steering wheel buttons vs\n # third stalk type controls.\n self.graHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Hauptschalter\"]\n self.graTypHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Typ_Hauptschalter\"]\n self.graButtonTypeInfo = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_ButtonTypeInfo\"]\n self.graTipStufe2 = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Stufe_2\"]\n # Pick up the GRA_ACC_01 CAN message counter so we can sync to it for\n # later cruise-control button spamming.\n self.graMsgBusCounter = pt_cp.vl[\"GRA_ACC_01\"][\"COUNTER\"]\n\n # Additional safety checks performed in CarInterface.\n self.parkingBrakeSet = bool(pt_cp.vl[\"Kombi_01\"][\"KBI_Handbremse\"]) # FIXME: need to include an EPB check as well\n ret.espDisabled = pt_cp.vl[\"ESP_21\"][\"ESP_Tastung_passiv\"] != 0\n\n return ret\n\n @staticmethod\n def get_can_parser(CP):\n # this function generates lists for signal, messages and initial values\n signals = [\n # sig_name, sig_address, default\n (\"EPS_Berechneter_LW\", \"LH_EPS_03\", 0), # Absolute steering angle\n (\"EPS_VZ_BLW\", \"LH_EPS_03\", 0), # Steering angle sign\n (\"LWI_Lenkradw_Geschw\", \"LWI_01\", 0), # Absolute steering rate\n (\"LWI_VZ_Lenkradw_Geschw\", \"LWI_01\", 0), # Steering rate sign\n (\"ESP_VL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front left\n (\"ESP_VR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front right\n (\"ESP_HL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear left\n (\"ESP_HR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear right\n (\"ESP_Gierrate\", \"ESP_02\", 0), # Absolute yaw rate\n (\"ESP_VZ_Gierrate\", \"ESP_02\", 0), # Yaw rate sign\n (\"ZV_FT_offen\", \"Gateway_72\", 0), # Door open, driver\n (\"ZV_BT_offen\", \"Gateway_72\", 0), # Door open, passenger\n (\"ZV_HFS_offen\", \"Gateway_72\", 0), # Door open, rear left\n (\"ZV_HBFS_offen\", \"Gateway_72\", 0), # Door open, rear right\n (\"ZV_HD_offen\", \"Gateway_72\", 0), # Trunk or hatch open\n (\"Comfort_Signal_Left\", \"Blinkmodi_02\", 0), # Left turn signal including comfort blink interval\n (\"Comfort_Signal_Right\", \"Blinkmodi_02\", 0), # Right turn signal including comfort blink interval\n (\"AB_Gurtschloss_FA\", \"Airbag_02\", 0), # Seatbelt status, driver\n (\"AB_Gurtschloss_BF\", \"Airbag_02\", 0), # Seatbelt status, passenger\n (\"ESP_Fahrer_bremst\", \"ESP_05\", 0), # Brake pedal pressed\n (\"ESP_Bremsdruck\", \"ESP_05\", 0), # Brake pressure applied\n (\"MO_Fahrpedalrohwert_01\", \"Motor_20\", 0), # Accelerator pedal value\n (\"EPS_Lenkmoment\", \"LH_EPS_03\", 0), # Absolute driver torque input\n (\"EPS_VZ_Lenkmoment\", \"LH_EPS_03\", 0), # Driver torque input sign\n (\"EPS_HCA_Status\", \"LH_EPS_03\", 3), # EPS HCA control status\n (\"ESP_Tastung_passiv\", \"ESP_21\", 0), # Stability control disabled\n (\"KBI_MFA_v_Einheit_02\", \"Einheiten_01\", 0), # MPH vs KMH speed display\n (\"KBI_Handbremse\", \"Kombi_01\", 0), # Manual handbrake applied\n (\"TSK_Status\", \"TSK_06\", 0), # ACC engagement status from drivetrain coordinator\n (\"GRA_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC button, on/off\n (\"GRA_Abbrechen\", \"GRA_ACC_01\", 0), # ACC button, cancel\n (\"GRA_Tip_Setzen\", \"GRA_ACC_01\", 0), # ACC button, set\n (\"GRA_Tip_Hoch\", \"GRA_ACC_01\", 0), # ACC button, increase or accel\n (\"GRA_Tip_Runter\", \"GRA_ACC_01\", 0), # ACC button, decrease or decel\n (\"GRA_Tip_Wiederaufnahme\", \"GRA_ACC_01\", 0), # ACC button, resume\n (\"GRA_Verstellung_Zeitluecke\", \"GRA_ACC_01\", 0), # ACC button, time gap adj\n (\"GRA_Typ_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC main button type\n (\"GRA_Tip_Stufe_2\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"GRA_ButtonTypeInfo\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"COUNTER\", \"GRA_ACC_01\", 0), # GRA_ACC_01 CAN message counter\n ]\n\n checks = [\n # sig_address, frequency\n (\"LWI_01\", 100), # From J500 Steering Assist with integrated sensors\n (\"LH_EPS_03\", 100), # From J500 Steering Assist with integrated sensors\n (\"ESP_19\", 100), # From J104 ABS/ESP controller\n (\"ESP_05\", 50), # From J104 ABS/ESP controller\n (\"ESP_21\", 50), # From J104 ABS/ESP controller\n (\"Motor_20\", 50), # From J623 Engine control module\n (\"TSK_06\", 50), # From J623 Engine control module\n (\"ESP_02\", 50), # From J104 ABS/ESP controller\n (\"GRA_ACC_01\", 33), # From J533 CAN gateway (via LIN from steering wheel controls)\n (\"Gateway_72\", 10), # From J533 CAN gateway (aggregated data)\n (\"Airbag_02\", 5), # From J234 Airbag control module\n (\"Kombi_01\", 2), # From J285 Instrument cluster\n (\"Blinkmodi_02\", 1), # From J519 BCM (sent at 1Hz when no lights active, 50Hz when active)\n (\"Einheiten_01\", 1), # From J??? not known if gateway, cluster, or BCM\n ]\n\n if CP.transmissionType == TransmissionType.automatic:\n signals += [(\"GE_Fahrstufe\", \"Getriebe_11\", 0)] # Auto trans gear selector position\n checks += [(\"Getriebe_11\", 20)] # From J743 Auto transmission control module\n elif CP.transmissionType == TransmissionType.direct:\n signals += [(\"GearPosition\", \"EV_Gearshift\", 0)] # EV gear selector position\n checks += [(\"EV_Gearshift\", 10)] # From J??? unknown EV control module\n elif CP.transmissionType == TransmissionType.manual:\n signals += [(\"MO_Kuppl_schalter\", \"Motor_14\", 0), # Clutch switch\n (\"BCM1_Rueckfahrlicht_Schalter\", \"Gateway_72\", 0)] # Reverse light from BCM\n checks += [(\"Motor_14\", 10)] # From J623 Engine control module\n\n if CP.networkLocation == NetworkLocation.fwdCamera:\n # Radars are here on CANBUS.pt\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.pt)\n\n @staticmethod\n def get_cam_can_parser(CP):\n\n signals = [\n # sig_name, sig_address, default\n (\"LDW_SW_Warnung_links\", \"LDW_02\", 0), # Blind spot in warning mode on left side due to lane departure\n (\"LDW_SW_Warnung_rechts\", \"LDW_02\", 0), # Blind spot in warning mode on right side due to lane departure\n (\"LDW_Seite_DLCTLC\", \"LDW_02\", 0), # Direction of most likely lane departure (left or right)\n (\"LDW_DLC\", \"LDW_02\", 0), # Lane departure, distance to line crossing\n (\"LDW_TLC\", \"LDW_02\", 0), # Lane departure, time to line crossing\n ]\n\n checks = [\n # sig_address, frequency\n (\"LDW_02\", 10) # From R242 Driver assistance camera\n ]\n\n if CP.networkLocation == NetworkLocation.gateway:\n # Radars are here on CANBUS.cam\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.cam)\n\nclass MqbExtraSignals:\n # Additional signal and message lists for optional or bus-portable controllers\n fwd_radar_signals = [\n (\"ACC_Wunschgeschw\", \"ACC_02\", 0), # ACC set speed\n (\"AWV2_Freigabe\", \"ACC_10\", 0), # FCW brake jerk release\n (\"ANB_Teilbremsung_Freigabe\", \"ACC_10\", 0), # AEB partial braking release\n (\"ANB_Zielbremsung_Freigabe\", \"ACC_10\", 0), # AEB target braking release\n ]\n fwd_radar_checks = [\n (\"ACC_10\", 50), # From J428 ACC radar control module\n (\"ACC_02\", 17), # From J428 ACC radar control module\n ]\n bsm_radar_signals = [\n (\"SWA_Infostufe_SWA_li\", \"SWA_01\", 0), # Blind spot object info, left\n (\"SWA_Warnung_SWA_li\", \"SWA_01\", 0), # Blind spot object warning, left\n (\"SWA_Infostufe_SWA_re\", \"SWA_01\", 0), # Blind spot object info, right\n (\"SWA_Warnung_SWA_re\", \"SWA_01\", 0), # Blind spot object warning, right\n ]\n bsm_radar_checks = [\n (\"SWA_01\", 20), # From J1086 Lane Change Assist\n ]\n"
] | [
[
"numpy.mean"
]
] |
christophershultz/spatial_ag | [
"1c56e2e5fbc15a4f56d6d7bb94fab6a796d07dbf"
] | [
"usda_data/joinUSDA.py"
] | [
"import pandas as pd\nimport numpy as np\nimport os, pdb, sys\n\ndef netIncome(): \n df = pd.read_csv('usda_data/net_income.csv')\n df = df[df['Year'] == 2017].reset_index().drop(['index'], axis = 1)\n df = df[['Year', 'State', 'State ANSI', 'County', 'County ANSI', 'Zip Code', 'Value']]\n df.columns = ['yr', 'st', 'st_ansi', 'cty', 'cty_ansi', 'zip', 'netinc']\n df['st_cty_yr'] = [df['st'][i] + '_' + df['cty'][i] + '_' + str(df['yr'][i]) for i in range(len(df))]\n print(str(len(df)))\n return df\n\ndef joinData(df, col):\n new = pd.read_csv('usda_data/' + col + '.csv')\n if col == 'labor': new = new[new['Domain'] == 'TOTAL'].reset_index().drop(['index'], axis = 1)\n new = new[['Year', 'State', 'State ANSI', 'County', 'County ANSI', 'Zip Code', 'Value']]\n new.columns = ['yr', 'st', 'st_ansi', 'cty', 'cty_ansi', 'zip', col]\n new['st_cty_yr'] = [new['st'][i] + '_' + new['cty'][i] + '_' + str(new['yr'][i]) for i in range(len(new))]\n new = new[['st_cty_yr', col]]\n df = pd.merge(df, new, how = 'left', on = 'st_cty_yr')\n print(str(len(df)))\n return df\n\ndef updateFips(df): \n df['st_ansi'] = [str(i) for i in df['st_ansi']]\n df['st_ansi'] = ['0' + i if len(i) == 1 else i for i in df['st_ansi']]\n df['cty_ansi'] = [int(i) if str(i).lower() != 'nan' else 0 for i in df['cty_ansi']]\n df['cty_ansi'] = [str(i) for i in df['cty_ansi']]\n df['cty_ansi'] = ['0'*(3-len(i)) + i if len(i) != 3 else i for i in df['cty_ansi']]\n df['fips'] = [st + '-' + cty for st, cty in zip(df['st_ansi'], df['cty_ansi'])]\n return df\n\ndef main(): \n df = netIncome()\n for column in ['fertilizer', 'fuel', 'labor', 'land', 'machinery', 'tractors', 'trucks']: \n print(\"Joining \" + column)\n df = joinData(df, column)\n df = updateFips(df)\n df.to_csv('usda_data/joined_usda_df.csv', index = None)\n\nmain()"
] | [
[
"pandas.read_csv",
"pandas.merge"
]
] |
kmedian/potpourri | [
"54f7c517b6de5be82577e35849f67a0ead4410ae"
] | [
"potpourri/simi3.py"
] | [
"\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import SGDRegressor\nimport scipy.stats as ss\n\nmodel = Pipeline(steps=[\n ('scl', StandardScaler()),\n ('lin', SGDRegressor(\n # Logistic Regression\n loss = 'squared_loss',\n penalty = 'l1',\n l1_ratio = 1,\n fit_intercept = True,\n # solver settings\n max_iter = 1000,\n tol = 1e-3,\n shuffle = True,\n random_state = 42,\n # adaptive learning\n learning_rate = 'adaptive',\n eta0 = 0.5,\n # early stopping\n early_stopping = True,\n validation_fraction = 0.15,\n n_iter_no_change = 10,\n # other\n warm_start = True,\n average = False, # disable for Lasso!\n ))\n])\n\nhyper = {\n 'lin__alpha': ss.gamma(a=1.2, loc=1e-6, scale=.08), # alpha ~ [1e-6, 1]\n}\n\nmeta = {\n 'id': \"simi3\",\n 'name': 'LinReg Lasso',\n 'descriptions': (\n \"Lasso Regression (L1 penalty), SGD solver, squared loss function.\"),\n 'solver': 'Stochastic Gradient Descent',\n 'active': True,\n 'keywords': [\n 'linear regression', 'univariate regression', 'multiple regression'],\n 'output_num': 'single',\n 'output_scale': 'interval',\n 'output_dtype': 'float',\n 'input_num': 'multi',\n 'input_scale': 'interval',\n 'input_dtype': 'float'\n}\n"
] | [
[
"scipy.stats.gamma",
"sklearn.linear_model.SGDRegressor",
"sklearn.preprocessing.StandardScaler"
]
] |
christianb93/MachineLearning | [
"30d3b182d33f19b210aa393208236e626eaf5f6a"
] | [
"RBM/Base.py"
] | [
"#####################################################\n#\n# Base class for restricted Boltzmann machines\n#\n#\n# Copyright (c) 2018 christianb93\n# Permission is hereby granted, free of charge, to \n# any person obtaining a copy of this software and \n# associated documentation files (the \"Software\"), \n# to deal in the Software without restriction, \n# including without limitation the rights to use, \n# copy, modify, merge, publish, distribute, \n# sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice \n# shall be included in all copies or substantial \n# portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY \n# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT \n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS \n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, \n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#####################################################\n\nimport numpy as np\nfrom scipy.special import expit\n\n\nclass BaseRBM:\n \n \n #\n # Call this after the training has completed\n #\n def postTraining(self):\n pass\n \n #\n # Run one step in a Gibbs sampling Markov chain. \n # We sample the hidden units from the visible\n # units V and the visible units V' from the\n # hidden units. V' is returned\n # \n def runGibbsStep(self, V, size = 1):\n #\n # Sample hidden units from visible units\n # \n E = expit(self.beta*(np.matmul(V.astype(int), self.W) + self.c), dtype=self.np_type)\n U = np.random.random_sample(size=(size, self.hidden)).astype(self.np_type)\n H = (U <= E).astype(int)\n #\n # and now sample visible units from hidden units\n #\n P = expit(self.beta*(np.matmul(H, np.transpose(self.W)) + self.b), dtype=self.np_type)\n U = np.random.random_sample(size=(size, self.visible)).astype(self.np_type)\n return (U <= P).astype(int), E\n\n #\n # Sample from the learned distribution, starting with a \n # random value\n #\n def sample(self, iterations = 100, size = 1):\n return self.sampleFrom(np.random.randint(low=0, high=2, size=(size,self.visible)), iterations = iterations, size = size)\n\n #\n # Sample from the learned distribution, starting at some\n # initial value\n #\n def sampleFrom(self, initial, iterations = 100, size = 1):\n V = initial.astype(int)\n for i in range(iterations):\n V, _ = self.runGibbsStep(V, size = size)\n if (iterations > 1000):\n if 0 == i % 1000:\n print(\"Sampling iteration \", i)\n return V\n \n \n #\n # Visualize the weights\n #\n def showWeights(self, fig, cols, rows, x_pix, y_pix):\n for r in range(rows):\n for c in range(cols):\n j = r*cols + c\n #\n # We display the weigths connected to hidden unit j\n #\n w = self.W[:,j]\n #\n # Normalize\n #\n min = np.min(w)\n w = w + min\n max = np.max(w)\n w = w / max\n ax = fig.add_subplot(rows, cols, j+1)\n ax.imshow(w.reshape(x_pix, y_pix), \"Greys\")\n ax.set_yticks([],[])\n ax.set_xticks([],[])\n \n #\n # Retrieve the weights and parameters\n #\n def getParameters(self):\n params = {}\n params['W'] = self.W\n params['b'] = self.b\n params['c'] = self.c\n return params\n \n #\n # Set parameter\n #\n def setParameters(self, params):\n self.W = params['W'].astype(self.np_type)\n self.b = params['b'].astype(self.np_type)\n self.c = params['c'].astype(self.np_type)\n\n"
] | [
[
"numpy.random.random_sample",
"numpy.transpose",
"numpy.max",
"numpy.min",
"numpy.random.randint"
]
] |
arpanmangal/Regression | [
"06969286d7db65a537e89ac37905310592542ca9"
] | [
"Q4/read.py"
] | [
"\"\"\"\nModule for reading data from 'q4x.csv' and 'q4y.csv'\n\"\"\"\n\nimport numpy as np\n\ndef loadData (x_file=\"../ass1_data/q4x.dat\", y_file=\"../ass1_data/q4y.dat\"):\n \"\"\"\n Loads the X, Y matrices.\n \"\"\"\n\n X = np.genfromtxt(x_file, delimiter=' ', dtype=int)\n labels = np.genfromtxt(y_file, dtype=str)\n Y = []\n for label in labels:\n if (label == \"Alaska\"):\n Y.append(0)\n else:\n Y.append(1)\n\n return (X, Y)\n"
] | [
[
"numpy.genfromtxt"
]
] |
mfkoerner/icarus | [
"eb480596be127f760d10531d27569290df3e8ff9"
] | [
"photons.py"
] | [
"########################################\n# written for Python 3 #\n# by Doug Fabini ([email protected]) #\n########################################\n\n'''\n\n This script requires the following files to be located in 'baseDir':\n - IBZKPT (to extract number of k points) POSSIBLY NO LONGER NEEDED\n - DOSCAR (to extract bandgap)\n - OUTCAR (to extract dielectric properties and energy resolution)\n\nCurrently only handles an isotropic equivalent for the dielectric / absorption tensors.\n\n'''\n\n\n\n# import packages, apply stylesheet\nimport config\nimport os\nfrom electrons import np, plt, getTotalDOS, bandgap\n\n\n\n# ****************** #\n# DATA I/O FUNCTIONS #\n# ****************** #\n\ndef getNkPts(bd):\n\t''' Parse OUTCAR for number of k-points '''\n\tfname = os.path.join(bd, 'OUTCAR')\n\t# print(fname) #debug line\n\twith open(fname, 'r') as f:\n\t\tfor line in f:\n\t\t\tif 'irreducible k-points' in line:\n\t\t\t\t# print(line) #debug line\n\t\t\t\treturn int(line.split()[1])\n\t\t\t\tbreak\n\ndef getDielectric(bd, anisotropic=False):\n\t''' Parse OUTCAR for dielectric properties, convert to appropriate form '''\n\tfname = os.path.join(bd, 'OUTCAR')\n\twith open(fname, 'r') as f:\n\t\traw = []\n\t\tlnImag, lnReal = 0, 0\n\t\tfor i, line in enumerate(f):\n\t\t\traw.append(line)\n\t\t\tif 'NEDOS' in line: \t\t\t\t\t#This an below find number points per section and start of lines for sections\n\t\t\t\tNEDOS = int(line.split()[5])\n\t\t\tif 'IMAGINARY DIELECTRIC' in line and lnImag is 0:\t#Selecting the first set of Dielectric numbers from VASP\n\t\t\t\tlnImag = i\n\t\t\tif 'REAL DIELECTRIC' in line and lnReal is 0:\n\t\t\t\tlnReal = i\n\tEepsRe, EepsIm = [], []\n\tfor i in range(lnImag+3,lnImag+NEDOS+3):\t\t#All of the imaginary dielectric components (NEDOS components and start point of lnImag+3)\n\t\tif len(raw[i]) < 5:\t\t\t\t\t\t\t#Checking for early termination of DIELECTRIC DATA (printing to output)\n\t\t\tprint('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnImag-3))\n\t\t\tbreak\n\t\tEepsIm.append([float(ri) for ri in raw[i].strip('\\n').split()])\t#Energy (frequency) then X,Y,Z,XY,YZ,ZX for imaginary component\n\tE = np.array(EepsIm)[:,0]\t\t\t\t\t\t#Energies pulled from first part of EepsIm\n\tfor i in range(lnReal+3,lnReal+NEDOS+3):\n\t\tif len(raw[i]) < 5:\n\t\t\t# print('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnReal-3))\n\t\t\tbreak\n\t\tEepsRe.append([float(ri) for ri in raw[i].strip('\\n').split()])\t#Real part from above\n\tif anisotropic:\n\t\tepsIm = np.array([row[1:] for row in EepsIm])\n\t\tepsRe = np.array([row[1:] for row in EepsRe])\n\telse:\n\t\tepsIm = np.array([isotropic(row[1:]) for row in EepsIm])\t#epsIm is the isotropic equivilent values for each energy\n\t\tepsRe = np.array([isotropic(row[1:]) for row in EepsRe])\t#Real part for epsIm, this time is epsRe\n\treturn E, epsRe + 1j*epsIm \t\t\t\t\t#Returns list of isotropic equivalent values\n\ndef saveResults(bd, E, alpha, eps):\n\t''' Store absorption coefficient and dielectric function '''\n\tout = np.hstack((E, alpha, eps.real, eps.imag))\n\tout = np.reshape(out, (-1, 4), order='F')\n\tnp.savetxt(os.path.join(bd, 'optical.csv'), out, header='h*nu (eV), alpha_iso (cm^-1), Re[eps_iso] (eps_0), Im[eps_iso] (eps_0)')\n\ndef getSolarSpectrum():\n\t''' Get direct+diffuse solar irradiance at global tilt, ASTM G173-03 '''\n\td = np.loadtxt('data/ASTMG173.dat')\n\treturn d[:,0], d[:,2]\n\n\n# ****************** #\n# ANALYSIS FUNCTIONS #\n# ****************** #\n\ndef nm2eV(lam):\n\t''' Convert wavelength in nm to energy in eV '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\treturn h*c/(lam*1e-9)\n\ndef eV2nm(hnu):\n\t''' Convert energy in eV to wavelength in nm '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\treturn h*c/hnu*1e9\n\ndef isotropic(sixElements):\n\t''' Returns an isotropic equivalent value for a symmetric 3x3 matrix '''\n\txx, yy, zz, xy, yz, zx = sixElements\n\tA = np.array([[xx, xy, zx], [xy, yy, yz], [zx, yz, zz]])\n\teigval, _ = np.linalg.eigh(A)\n\treturn np.mean(eigval)\n\ndef dielec2optical(hnu, eps):\n\t''' Calculate complex refractive index and absorption coefficient from dielectric function '''\n\th = 4.136e-15 # Planck constant, eV / s\n\tc = 2.998e8 # speed of light, m / s\n\tN = np.sqrt(eps)\n\talpha = 4*np.pi/(h*c)*hnu*N.imag/100 # divisor of 100 takes from m-1 to cm-1\n\treturn N, alpha\n\ndef FOM(hnu, alpha, Eg):\n\n\txx = np.linspace(100, eV2nm(Eg), int(1e4)) \t\t\t\t\t\t\t#proper range of light to think about (100 nm [13eV] to band gap wavelength)\n\txSun, ySun = getSolarSpectrum() \t\t\t\t\t\t\t\t\t\t#xSun -> wavelength of sun, ySun -> intensity of sun\n\tyySun = np.interp(xx, xSun, ySun) \t\t\t\t\t\t\t\t\t\t#ySun calculated at the points for xx (so that we have the right resolution)\n\tyyMat = np.interp(xx, np.flipud(eV2nm(hnu[1:])), np.flipud(alpha[1:])) \t#absorption as a function of wavelength\n\tfrom scipy.integrate import cumtrapz \t\t\t\t\t\t\t\t\t#Trapezoidal numeric integration\n\treturn xx, yySun, yyMat, cumtrapz(yySun*yyMat, xx) \t\t\t\t\t\t#FOM is the last value, which is integral of sum intensity time absorption along wavel\n\n\n\n# ****************** #\n# PLOTTING FUNCTIONS #\n# ****************** #\n\ndef plotDielectric(ax, E, eps, N, El=(0, 10)):\n\t''' Plot complex dielectric function and complex refractive index '''\n\tax.plot(E, eps.real, label='$\\\\epsilon_r\\\\prime$')\n\tax.plot(E, eps.imag, label='$\\\\epsilon_r\\\\prime\\\\prime$')\n\tax.plot(E, N.real, label='$n$')\n\tax.plot(E, N.imag, label='$k$')\n\tax.set_xlim(El)\n\tax.set_xlabel('$h\\\\nu$ (eV)')\n\tax.legend()\n\ndef plotAbsorption(ax, hnu, alpha, xl=(0, 4), yl=(1e2, 1e7), rel2eg=None, lbl=None, wavelength=False):\n\t''' Plot absorption coefficient '''\n\tif wavelength:\n\t\tif rel2eg is not None:\n\t\t\traise Exception('Relative to gap option not available when plotting by wavelength')\n\t\tlh, = ax.semilogy(eV2nm(hnu), alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$\\\\lambda$ (nm)')\n\telif not wavelength and rel2eg is None:\n\t\tlh, = ax.semilogy(hnu, alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$h\\\\nu$ (eV)')\n\telse:\n\t\tlh, = ax.semilogy(hnu-rel2eg, alpha, '.-', label=lbl)\n\t\tax.set_xlabel('$h\\\\nu-E_g$ (eV)')\n\tax.set_xlim(xl)\n\tax.set_ylim(yl)\n\tax.set_ylabel('$\\\\alpha$ (cm$^{-1}$)')\n\treturn lh\n\n\n\n# ********** #\n# HIGH LEVEL #\n# ********** #\n\ndef optical(bd, save=False):\n\t''' DESCRIPTION GOES HERE '''\n\tNk = getNkPts(bd) \t\t\t\t\t\t\t#Gets number of irreducible kpoints but never uses it :O\n\tE, eps = getDielectric(bd) \t\t\t\t#Gets lists of E and equivilent eigenvalues (real + i*imag) for dialectric function\n\tN, alpha = dielec2optical(E, eps)\t\t\t#N (dielectric constant) and alpha (absorption coefficient) from dielectric equivilent eigenvalues\n\n\tEdos, tdos = getTotalDOS(bd)\t\t\t\t#arrays of len NEDOS with energy and DOS at that energy\n\tEg = bandgap(Edos, tdos)\t\t\t\t\t#Calculates bandgap from DOS data\n\n\tif save:\n\t\tsaveResults(bd, E, alpha, eps)\t\t\t\t#Saves Energy, absorption, eigenvalue to basedir/optical.csv\n\treturn E, alpha, eps, N, Eg \t\t\t\t#Returns Energy, absorption, eigenvalue, refractive index, bandgap\n"
] | [
[
"scipy.integrate.cumtrapz"
]
] |
nirandaperera/pipedream | [
"bc05a4e8ce150f681ba6066805604873a3a7cf97"
] | [
"runtime/runtime.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport collections\nimport itertools\nimport time\nimport torch\nimport torch.distributed as dist\n\nimport communication\nimport runtime_utilities\n\nIMAGE_CLASSIFICATION = \"image_classification\"\nTRANSLATION = \"translation\"\nSPEECH_TO_TEXT = \"speech_to_text\"\n\n\nclass ModulesWithDependencies:\n def __init__(self, modules_with_dependencies):\n self._modules = []\n self._all_input_names = []\n self._all_output_names = []\n for (module, input_names, output_names) in modules_with_dependencies:\n self._modules.append(module)\n self._all_input_names.append(input_names)\n self._all_output_names.append(output_names)\n\n def modules(self):\n return self._modules\n\n def all_input_names(self):\n return self._all_input_names\n\n def all_output_names(self):\n return self._all_output_names\n\n def is_input_tensor(self, tensor_name):\n for module_input_names in self._all_input_names:\n if tensor_name in module_input_names:\n return True\n return False\n\n\nclass StageRuntime:\n def __init__(self, model, distributed_backend, fp16, loss_scale,\n training_tensor_shapes, eval_tensor_shapes,\n training_tensor_dtypes, inputs_module_destinations,\n target_tensor_names, configuration_maps, master_addr,\n rank, local_rank, num_ranks_in_server, verbose_freq,\n model_type, enable_recompute=False):\n # Metadata needed for forward and backward pass within this stage.\n self.tensors = []\n self.gradients = {}\n self.distributed_backend = distributed_backend\n self.fp16 = fp16\n self.loss_scale = loss_scale\n self.training_tensor_shapes = training_tensor_shapes\n self.eval_tensor_shapes = eval_tensor_shapes\n self.training_tensor_dtypes = training_tensor_dtypes\n self.model_type = model_type\n self.target_tensor_names = target_tensor_names\n\n self.initialize(model, inputs_module_destinations, configuration_maps,\n master_addr, rank, local_rank, num_ranks_in_server)\n\n self.verbose_freq = verbose_freq\n self.forward_only = False\n\n self.forward_stats = runtime_utilities.RuntimeStats(forward=True)\n self.backward_stats = runtime_utilities.RuntimeStats(forward=False)\n\n # Enable recomputation to prevent the need to save activations\n # computed from the forward pass for the backward pass.\n self.enable_recompute = enable_recompute\n\n # Disable recomputation for the last stage.\n if rank == num_ranks_in_server - 1:\n self.enable_recompute = False\n\n def initialize(self, model, inputs_module_destinations,\n configuration_maps, master_addr, rank,\n local_rank, num_ranks_in_server):\n self.send_ranks = {}\n self.receive_ranks = {}\n self.rank = rank\n self.local_rank = local_rank\n self.stage = None\n self.tensor_tags = {}\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n self.criterion_input_name = str(model[-1][1][0])\n\n tensor_tag = 1\n for (_, input_tensors, output_tensors) in model:\n for input_tensor in input_tensors:\n if input_tensor not in self.tensor_tags:\n self.tensor_tags[input_tensor] = tensor_tag\n tensor_tag += 1\n for output_tensor in output_tensors:\n if output_tensor not in self.tensor_tags:\n self.tensor_tags[output_tensor] = tensor_tag\n tensor_tag += 1\n for target_tensor_name in sorted(self.target_tensor_names):\n self.tensor_tags[target_tensor_name] = tensor_tag\n tensor_tag += 1\n self.tensor_tags[\"ack\"] = tensor_tag\n tensor_tag += 1\n\n module_to_stage_map = configuration_maps['module_to_stage_map']\n stage_to_rank_map = configuration_maps['stage_to_rank_map']\n stage_to_depth_map = configuration_maps['stage_to_depth_map']\n\n if module_to_stage_map is None:\n # If IP addresses not specified, resort to all layers on\n # single machine.\n assert self.rank is None\n self.modules_with_dependencies = ModulesWithDependencies(model)\n self.is_criterion = True\n self.rank_in_stage = 0\n self.num_ranks = 1\n self.num_ranks_in_first_stage = 1\n self.num_ranks_in_previous_stage = 0\n self.num_ranks_in_next_stage = 0\n self.num_stages = 1\n self.num_ranks_in_stage = 1\n self.num_warmup_minibatches = 0\n self.comm_handler = None\n else:\n assert len(module_to_stage_map) == len(model)\n assert self.rank is not None\n\n stage_to_module_map = collections.defaultdict(list)\n for module in range(len(module_to_stage_map)):\n stage_to_module_map[module_to_stage_map[module]].append(module)\n\n rank_to_stage_map = {}\n for stage in stage_to_rank_map:\n for rank in stage_to_rank_map[stage]:\n rank_to_stage_map[rank] = stage\n\n # Now, use this mapping to determine the modules contained in\n # each stage.\n assert 0 <= self.rank < len(rank_to_stage_map)\n self.num_ranks = len(rank_to_stage_map)\n self.num_stages = len(stage_to_module_map)\n self.stage = rank_to_stage_map[self.rank]\n self.rank_in_stage = stage_to_rank_map[self.stage].index(self.rank)\n self.num_ranks_in_stage = len(stage_to_rank_map[self.stage])\n self.num_ranks_in_first_stage = len(stage_to_rank_map[0])\n self.num_ranks_in_previous_stage = 0\n self.ranks_in_previous_stage = []\n if self.stage > 0:\n self.num_ranks_in_previous_stage = len(\n stage_to_rank_map[self.stage - 1])\n self.ranks_in_previous_stage = stage_to_rank_map[self.stage - 1]\n self.num_ranks_in_next_stage = 0\n self.ranks_in_next_stage = []\n if self.stage < self.num_stages - 1:\n self.num_ranks_in_next_stage = len(\n stage_to_rank_map[self.stage + 1])\n self.ranks_in_next_stage = stage_to_rank_map[self.stage + 1]\n modules = stage_to_module_map[self.stage]\n self.modules_with_dependencies = ModulesWithDependencies(\n [model[module] for module in modules])\n self.is_criterion = self.stage == (self.num_stages - 1)\n if stage_to_depth_map is not None:\n self.num_warmup_minibatches = stage_to_depth_map[\n str(self.stage)]\n else:\n self.num_warmup_minibatches = self.num_ranks - 1\n for i in range(self.stage):\n self.num_warmup_minibatches -= len(\n stage_to_rank_map[i])\n self.num_warmup_minibatches = self.num_warmup_minibatches // \\\n self.num_ranks_in_stage\n\n # To determine where tensors should be sent and received, first\n # determine the \"producing\" and \"consuming\" module IDs of each\n # tensor. We then use the corresponding machine ranks to send\n # and receive tensors.\n master_port = 12345\n self.comm_handler = communication.CommunicationHandler(\n master_addr=master_addr,\n master_port=master_port,\n rank=self.rank,\n local_rank=self.local_rank,\n num_ranks_in_server=num_ranks_in_server,\n world_size=self.num_ranks,\n fp16=self.fp16,\n backend=self.distributed_backend)\n\n for i in range(len(model)):\n for j in range(i + 1, len(model)):\n for tensor_name in model[i][2]:\n if tensor_name in model[j][1]:\n if module_to_stage_map[i] == \\\n module_to_stage_map[j]:\n continue\n # For now, assume that each stage is served by only\n # a single machine.\n if module_to_stage_map[j] == self.stage:\n self.receive_ranks[tensor_name] = \\\n stage_to_rank_map[module_to_stage_map[i]]\n if module_to_stage_map[i] == self.stage:\n self.send_ranks[tensor_name] = \\\n stage_to_rank_map[module_to_stage_map[j]]\n\n for model_inputs in inputs_module_destinations.keys():\n destination_stage = module_to_stage_map[\n inputs_module_destinations[model_inputs]]\n if destination_stage > self.stage:\n self.send_ranks[model_inputs] = \\\n self.ranks_in_next_stage\n\n if 0 < self.stage <= destination_stage:\n self.receive_ranks[model_inputs] = \\\n self.ranks_in_previous_stage\n\n if destination_stage > 0:\n if model_inputs not in self.tensor_tags:\n self.tensor_tags[model_inputs] = tensor_tag\n tensor_tag += 1\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i] = modules[i].cuda()\n if self.fp16:\n import apex.fp16_utils as fp16_utils\n modules[i] = fp16_utils.BN_convert_float(modules[i].half())\n\n # Initialize all groups in the same order on every worker.\n if stage_to_rank_map is not None:\n groups = []\n for stage in range(self.num_stages):\n ranks = stage_to_rank_map[stage]\n if len(ranks) > 1:\n groups.append(dist.new_group(ranks=ranks))\n else:\n groups.append(None)\n group = groups[self.stage]\n else:\n group = None\n\n # self.modules_with_dependencies contains a list of PyTorch\n # modules, along with a list of user-defined input and output\n # tensor names. We use our module_executor.ModuleExecutor\n # class to wrap these dependencies, and use run_forward and\n # run_backward methods downstream.\n num_parameters = 0\n for i in range(len(modules)):\n if group is not None:\n if ((i < (len(modules) - 1) and self.is_criterion)\n or not self.is_criterion):\n num_parameters += \\\n sum(x.size()[0] * x.size()[1]\n if len(x.size()) > 1 else x.size()[0]\n for x in modules[i].parameters() if x.size())\n modules[i] = torch.nn.parallel.DistributedDataParallel(\n modules[i],\n process_group=group,\n device_ids=[local_rank],\n output_device=local_rank)\n if self.num_ranks_in_stage > 1:\n module_size = 4. * num_parameters\n print(\"Replicating stage: ranks=%d, module_size=%.3f\" % (\n self.num_ranks_in_stage, module_size))\n\n if self.fp16:\n self.master_parameters = []\n self.model_parameters = []\n for i in range(len(modules)):\n import apex.fp16_utils as fp16_utils\n module_parameters, module_master_parameters = \\\n fp16_utils.prep_param_lists(modules[i])\n self.master_parameters.extend(module_master_parameters)\n self.model_parameters.extend(module_parameters)\n else:\n self.master_parameters = list(self.parameters())\n self.model_parameters = None\n\n if self.comm_handler is not None:\n self.comm_handler.initialize(\n self.receive_ranks,\n self.send_ranks,\n self.tensor_tags,\n self.target_tensor_names,\n self.training_tensor_dtypes,\n self.rank_in_stage,\n self.num_ranks_in_stage,\n self.ranks_in_previous_stage,\n self.ranks_in_next_stage)\n\n @property\n def target(self):\n return self.tensors[-1][\"target\"]\n\n def modules(self):\n return self.modules_with_dependencies.modules()\n\n def parameters(self):\n parameter_iterators = []\n for module in self.modules_with_dependencies.modules():\n parameter_iterators.append(module.parameters())\n return itertools.chain(*parameter_iterators)\n\n def state_dict(self):\n state_dict = collections.OrderedDict()\n for i, module in enumerate(self.modules_with_dependencies.modules()):\n state_dict[\"module%d\" % i] = module.state_dict()\n if self.fp16:\n state_dict[\"master_parameters\"] = self.master_parameters\n return state_dict\n\n def load_state_dict(self, state_dict):\n for i, module in enumerate(self.modules_with_dependencies.modules()):\n module.load_state_dict(state_dict[\"module%d\" % i])\n if self.fp16:\n saved_master_parameters = state_dict[\"master_parameters\"]\n for master_parameter, saved_master_parameter in zip(\n self.master_parameters, saved_master_parameters):\n master_parameter.data.copy_(saved_master_parameter.data)\n\n def cuda(self):\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i] = modules[i].cuda()\n\n def zero_grad(self):\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].zero_grad()\n\n def train(self, num_iterations):\n self.tensors = []\n self.gradients = {}\n self.tensor_shapes = self.training_tensor_shapes\n self.forward_only = False\n\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n\n if self.comm_handler is not None:\n self.comm_handler.set_tensor_shapes(self.tensor_shapes)\n self.comm_handler.start_helper_threads(\n num_iterations, forward_only=False)\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].train()\n\n def eval(self, num_iterations):\n self.tensors = []\n self.gradients = {}\n self.tensor_shapes = self.eval_tensor_shapes\n self.tensor_shapes[\"ack\"] = (1,)\n self.forward_only = True\n\n self.forward_minibatch_id = 0\n self.backward_minibatch_id = 0\n\n if self.comm_handler is not None:\n self.comm_handler.set_tensor_shapes(self.tensor_shapes)\n self.comm_handler.start_helper_threads(\n num_iterations, forward_only=True)\n\n modules = self.modules_with_dependencies.modules()\n for i in range(len(modules)):\n modules[i].eval()\n\n def set_loader(self, loader):\n if loader is not None:\n self.loader_iter = iter(loader)\n else:\n self.loader_iter = None\n\n def receive_tensors_forward(self):\n if self.forward_only and len(self.tensors) > 0:\n self.tensors.pop(0)\n self.tensors.append({})\n if self.loader_iter is not None:\n # print(f\"### rcvt0 {time.time()}\")\n input = next(self.loader_iter)\n # print(f\"### rcvt1 {time.time()}\")\n if self.model_type == TRANSLATION:\n (input, target) = input\n src, src_length = input\n tgt, tgt_length = target\n\n self.tensors[-1][\"input0\"] = src.cuda(non_blocking=True)\n self.tensors[-1][\"input1\"] = torch.LongTensor(src_length).cuda(\n non_blocking=True)\n self.tensors[-1][\"input2\"] = tgt[:-1].cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = tgt[1:].cuda().contiguous().view(-1)\n self.tensors[-1][\"target_length\"] = \\\n torch.tensor([int(sum(torch.LongTensor(tgt_length) - 1))],\n dtype=torch.int).cuda()\n elif self.model_type == IMAGE_CLASSIFICATION:\n (input, target) = input\n if self.fp16:\n input = input.half()\n self.tensors[-1][\"input0\"] = input.cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = target.cuda(non_blocking=True)\n elif self.model_type == SPEECH_TO_TEXT:\n input, target, input_percentages, target_sizes = input\n input_sizes = input_percentages.mul_(int(input.size(3))).int()\n self.tensors[-1][\"input0\"] = input.cuda(non_blocking=True)\n self.tensors[-1][\"input1\"] = input_sizes.cuda(non_blocking=True)\n self.tensors[-1][\"target\"] = target.cuda(non_blocking=True)\n self.tensors[-1][\"target_length\"] = target_sizes.cuda(\n non_blocking=True)\n # print(f\"### rcv2 {time.time()}\")\n else:\n # Receive all required tensors from upstream machines.\n for input_name in self.receive_ranks:\n if input_name == \"ack\":\n continue\n\n self.tensors[-1][input_name] = \\\n self.comm_handler.recv(\n input_name,\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=False)\n\n self.forward_stats.stats['receive_tensors_size'] += \\\n (self.tensors[-1][input_name].element_size() *\n self.tensors[-1][input_name].nelement())\n\n # Used to track where to receive forward from.\n self.comm_handler.increment_messaging_index(\n sending=False)\n\n def send_tensors_forward(self):\n # Send all required tensors downstream.\n for output_name in self.send_ranks:\n if output_name == \"ack\":\n continue\n\n self.comm_handler.send(\n output_name,\n self.tensors[-1][output_name],\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=False)\n\n self.forward_stats.stats['send_tensors_size'] += \\\n (self.tensors[-1][output_name].element_size() *\n self.tensors[-1][output_name].nelement())\n\n def receive_tensors_backward(self):\n # Receive all required gradients from downstream\n # machines.\n for output_name in self.send_ranks:\n if output_name in self.target_tensor_names:\n continue\n\n self.gradients[output_name] = \\\n self.comm_handler.recv(\n output_name,\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n self.backward_stats.stats['receive_tensors_size'] += \\\n (self.gradients[output_name].element_size() *\n self.gradients[output_name].nelement())\n\n def send_tensors_backward(self):\n # Send all required gradients upstream.\n for input_name in self.receive_ranks:\n if input_name in self.target_tensor_names:\n continue\n\n self.comm_handler.send(\n input_name,\n self.gradients[input_name],\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n self.backward_stats.stats['send_tensors_size'] += \\\n (self.gradients[input_name].element_size() *\n self.gradients[input_name].nelement())\n\n if self.num_ranks_in_previous_stage > 0:\n # Used to track where to send tensors in the\n # backward pass.\n self.comm_handler.increment_messaging_index(\n sending=True)\n\n def run_forward(self, recompute_step=False):\n \"\"\"Run forward pass.\n \"\"\"\n # Receive tensors from previous worker.\n ts1 = time.time()\n self.receive_tensors_forward()\n tensors = self.tensors[-1]\n ts2 = time.time()\n\n # Run forward pass.\n self._run_forward(tensors)\n ts3 = time.time()\n\n # Send tensors forward.\n self.send_tensors_forward()\n ts4 = time.time()\n\n if self.verbose_freq > 0 and self.forward_minibatch_id % self.verbose_freq == 0:\n self.forward_stats.print_stats()\n # print(f\"### fwd_rcvd {self.rank} {epoch} {self.forward_minibatch_id} {ts1:.3f} {ts2:.3f} {ts3:.3f} {ts4:.3f}\")\n # print(f\"### fwd_comp r:{self.rank} e:{epoch} b:{self.forward_minibatch_id}/{num_batches} ts: {ts2 - ts1:.3f}\")\n # print(f\"### fwd_snd_q r:{self.rank} e:{epoch} b:{self.forward_minibatch_id}/{num_batches} ts: {ts2:.3f}\")\n\n self.forward_stats.reset_stats()\n self.forward_minibatch_id += 1\n\n return ts1, ts2, ts3, ts4\n\n def _run_forward(self, tensors):\n # Perform forward pass through model (self.modules_with_dependencies already\n # has modules in topological order).\n modules = self.modules_with_dependencies.modules()\n all_input_names = self.modules_with_dependencies.all_input_names()\n all_output_names = self.modules_with_dependencies.all_output_names()\n for i, (module, input_names, output_names) in \\\n enumerate(zip(modules, all_input_names, all_output_names)):\n if i == (len(modules) - 1) and self.is_criterion:\n # If layer is criterion (loss).\n if self.model_type == SPEECH_TO_TEXT:\n output = tensors[\"output\"].transpose(0, 1).float()\n output_sizes = tensors[\"output_sizes\"].cpu()\n target = tensors[\"target\"].cpu()\n target_sizes = tensors[\"target_length\"].cpu()\n input0_size = tensors[\"input0_size\"].cpu()\n module_outputs = [module(output, target, output_sizes, target_sizes) / input0_size[0]]\n else:\n module_outputs = [module(tensors[input_name],\n tensors[\"target\"])\n for input_name in input_names]\n module_outputs = [sum(module_outputs)]\n else:\n # If layer is non-criterion.\n module_outputs = module(*[tensors[input_name]\n for input_name in input_names])\n if not isinstance(module_outputs, tuple):\n module_outputs = (module_outputs,)\n module_outputs = list(module_outputs)\n\n for (output_name, module_output) in zip(output_names, module_outputs):\n tensors[output_name] = module_output\n\n self.output = tensors[input_names[0]]\n if self.is_criterion and self.model_type == TRANSLATION:\n loss_per_batch = tensors[output_names[0]] * tensors[self.criterion_input_name].size(1)\n loss_per_token = loss_per_batch / tensors[\"target_length\"][0].item()\n self.loss = loss_per_token\n elif self.is_criterion:\n self.loss = tensors[output_names[0]]\n else:\n self.loss = 1\n\n def run_backward(self):\n # Receive input gradients needed for backward pass.\n ts1 = time.time()\n self.receive_tensors_backward()\n ts2 = time.time()\n # Backward pass through modules in reverse order.\n inputs = {}\n outputs = {}\n input_gradients = {}\n output_gradients = {}\n\n # Get input and output names spanning all modules in this stage.\n all_input_names_set = set()\n all_output_names_set = set()\n\n modules = self.modules_with_dependencies.modules()\n all_input_names = self.modules_with_dependencies.all_input_names()\n all_output_names = self.modules_with_dependencies.all_output_names()\n\n for (input_names, output_names) in zip(all_input_names, all_output_names):\n for input_name in input_names:\n all_input_names_set.add(input_name)\n for output_name in output_names:\n all_output_names_set.add(output_name)\n\n tensors = self.tensors.pop(0)\n # Set inputs, outputs, and output_gradients.\n # Only set outputs/output_gradients for tensors that are not inputs of\n # other modules in this stage.\n # Similarly, only set inputs for tensors that are not outputs of other\n # modules in this stage.\n for (module, input_names, output_names) in \\\n zip(reversed(modules), reversed(all_input_names), reversed(all_output_names)):\n for output_name in output_names:\n if output_name not in all_input_names_set:\n if output_name not in self.gradients:\n output_gradients[output_name] = None\n else:\n output_gradients[output_name] = self.gradients[output_name]\n if tensors[output_name].requires_grad:\n outputs[output_name] = tensors[output_name]\n for input_name in input_names:\n if input_name not in all_output_names_set:\n inputs[input_name] = tensors[input_name]\n\n # Hook to record input gradients.\n def hook_wrapper(input_name):\n def hook(input_gradient):\n input_gradients[input_name] = input_gradient\n\n return hook\n\n for input_name in inputs:\n if input_name != \"input0\" and input_name != \"input1\" and input_name != \"input2\" \\\n and inputs[input_name].requires_grad:\n inputs[input_name].register_hook(hook_wrapper(input_name))\n\n if \"loss\" in outputs:\n outputs[\"loss\"] *= self.loss_scale\n\n # Perform backward pass.\n torch.autograd.backward(tuple([outputs[output_name] for output_name in outputs]),\n grad_tensors=tuple([output_gradients[output_name]\n for output_name in outputs]))\n\n # Input tensors don't need gradients.\n for input_name in inputs:\n if not inputs[input_name].requires_grad:\n self.gradients[input_name] = inputs[input_name]\n continue\n\n if input_name != \"input0\" and input_name != \"input1\" and input_name != \"input2\" and input_name != \"input\":\n self.gradients[input_name] = input_gradients[input_name]\n\n ts3 = time.time()\n\n # Send output gradients.\n self.send_tensors_backward()\n ts4 = time.time()\n\n if self.verbose_freq > 0 and self.backward_minibatch_id % self.verbose_freq == 0:\n self.backward_stats.print_stats()\n # print(\n # f\"### bwd_rcvd {self.rank} {epoch} {self.backward_minibatch_id} {ts1:.3f} {ts2:.3f} {ts3:.3f} {ts4:.3f}\")\n # print(f\"### bwd_comp r:{self.rank} e:{epoch} b:{self.backward_minibatch_id}/{num_batches} ts: {ts2 - ts1:.3f}\")\n # print(f\"### bwd_snd_q r:{self.rank} e:{epoch} b:{self.backward_minibatch_id}/{num_batches} ts: {ts2:.3f}\")\n\n self.backward_stats.reset_stats()\n self.backward_minibatch_id += 1\n\n return ts1, ts2, ts3, ts4\n\n def num_tokens(self):\n return self.tensors[-1][\"target_length\"][0].item()\n\n def run_ack(self):\n # No need for ack if running on a single worker.\n if self.rank is None:\n return\n\n # Receive ack from next stage. Send ack to previous stage.\n if self.stage < (self.num_stages - 1):\n self.comm_handler.recv(\n \"ack\",\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n if self.stage > 0:\n self.comm_handler.send(\n \"ack\",\n torch.zeros(self.tensor_shapes[\"ack\"],\n dtype=torch.int64).cuda(),\n forward_minibatch_id=self.forward_minibatch_id,\n backward_minibatch_id=self.backward_minibatch_id,\n backward=True)\n\n # Used to track where to receive forward from.\n self.comm_handler.increment_messaging_index(sending=True)\n\n self.backward_minibatch_id += 1\n\n def wait(self):\n if self.comm_handler is not None:\n self.comm_handler.wait()\n\n def num_iterations(self, loader_size):\n \"\"\" Determines number of iterations for this stage\n\n TODO: don't currently support uneven configurations.\n \"\"\"\n if self.stage == 0 or self.stage is None:\n return loader_size\n\n num_iterations = loader_size * self.num_ranks_in_first_stage\n assert num_iterations % self.num_ranks_in_stage == 0\n num_iterations = num_iterations // self.num_ranks_in_stage\n\n return num_iterations\n\n def get_adjusted_learning_rate(self, base_lr):\n if self.stage == 0:\n return base_lr\n\n adjusted_lr = float(base_lr) * float(self.num_ranks_in_stage) \\\n / float(self.num_ranks_in_first_stage)\n\n return adjusted_lr\n"
] | [
[
"torch.zeros",
"torch.distributed.new_group",
"torch.nn.parallel.DistributedDataParallel",
"torch.LongTensor"
]
] |
NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation | [
"24ca4432873f145ad33810f40c851ac10bf030fa"
] | [
"setup_scripts/extract_images.py"
] | [
"import h5py\r\nimport numpy as np\r\nimport matplotlib.image as mpimg\r\nfrom tqdm import tqdm\r\nimport os\r\n\r\n\r\ndef clear_screen():\r\n \"\"\"Clears the console screen irrespective of os used\"\"\"\r\n import platform\r\n if platform.system() == 'Windows':\r\n os.system('cls')\r\n return\r\n os.system('clear')\r\n\r\n\r\ndef make_folder(target_folder):\r\n \"\"\"Creates folder if there is no folder in the specified path.\r\n Parameters: \r\n target_folder(str): path of the folder which needs to be created.\r\n\r\n Returns: None\r\n \"\"\"\r\n if not (os.path.isdir(target_folder)):\r\n print(f'Creating {target_folder} folder')\r\n os.mkdir(target_folder)\r\n\r\n\r\ndef get_image_data(filename, path):\r\n \"\"\" Reads the mat image file and returns the image & mask array.\r\n Parameters:\r\n filename(str): Name of the file without the extension.\r\n path(str): Path where the filename is located.\r\n\r\n Returns:\r\n data(dict): A dictionary with the image & mask numpy array.\r\n 'image': The numpy array for image.\r\n 'mask' : The numpy array for the above image mask.\r\n \"\"\"\r\n path = os.path.join(path, filename+'.mat')\r\n file = h5py.File(path, 'r')\r\n data = dict()\r\n data['image'] = np.array(file.get('cjdata/image'))\r\n data['mask'] = np.array(file.get('cjdata/tumorMask'))\r\n return data\r\n\r\n\r\ndef save_image_data(filename, path, data):\r\n \"\"\" Saves the image & mask array in png format.\r\n Parameters:\r\n filename(str): Name of the file without the extension.\r\n path(str): Path where the filename is to be saved.\r\n data(dict): A dictionary with the image & mask numpy array.\r\n 'image': The numpy array for image.\r\n 'mask' : The numpy array for the above image mask.\r\n\r\n Returns: None\r\n \"\"\"\r\n path_image = os.path.join(path, filename+'.png')\r\n path_mask = os.path.join(path, filename+'_mask.png')\r\n mpimg.imsave(path_image, data['image'], cmap='gray', format='png')\r\n mpimg.imsave(path_mask, data['mask'], cmap='gray', format='png')\r\n\r\n\r\ndef main():\r\n # Total number of images\r\n total_images = 3064\r\n\r\n # Dataset paths\r\n data_read_path = os.path.join('dataset', 'mat_dataset')\r\n data_save_path = os.path.join('dataset', 'png_dataset')\r\n\r\n clear_screen()\r\n\r\n # Make if folder is missing.\r\n make_folder(data_save_path)\r\n\r\n print(f'Starting to save images in {data_save_path}')\r\n\r\n for filename in tqdm(range(1, total_images+1)):\r\n filename = str(filename)\r\n data = get_image_data(filename, data_read_path)\r\n save_image_data(str(int(filename)-1), data_save_path, data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] | [
[
"matplotlib.image.imsave"
]
] |
marcelovca90-inatel/EC017 | [
"61bbf3c93c13a6743b829c0098d5e33340703f1f"
] | [
"NeuralNetworks-python/Perceptron.py"
] | [
"import numpy as np\r\nfrom _data import DataSets\r\nfrom _math import ActivationFunctions\r\nfrom _plot import PlotUtils\r\n\r\nclass Perceptron:\r\n\r\n def __init__(self, n, g):\r\n self.n = n # learning rate\r\n self.g = g # activation function\r\n self.plot_data_x = [] # epochs for plotting\r\n self.plot_data_y = [] # error for plotting\r\n\r\n def train(self, x, d):\r\n k = len(x)\r\n w = np.random.rand(len(x[0]))\r\n epoch = 0\r\n error = True\r\n while error and epoch < 10000:\r\n error = False\r\n for i in range(0, k):\r\n v = np.dot(np.transpose(w), x[i])\r\n y = self.g(v)\r\n if y != d[i]:\r\n w = np.add(w, np.multiply(self.n * (d[i] - y), x[i]))\r\n error = True\r\n epoch = epoch + 1\r\n print(f\"Epoch: {epoch}\\tWeights: {w}\")\r\n self.plot_data_x.append(epoch)\r\n self.plot_data_y.append(1 if error else 0)\r\n return w\r\n\r\n def test(self, w, x):\r\n v = np.dot(np.transpose(w), x)\r\n y = self.g(v)\r\n return y\r\n \r\n def evaluate(self, w, x, d):\r\n correct = 0\r\n total = len(x)\r\n for i in range(0, len(x)):\r\n y = self.test(w, x[i])\r\n if (y == d[i]):\r\n correct = correct + 1\r\n accuracy = 100.0 * (float(correct) / float(total))\r\n print(f\"Accuracy: {accuracy:.2f}% ({correct}/{total})\")\r\n return accuracy\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # set random number generator seed\r\n np.random.seed(NUMERO_DE_MATRICULA)\r\n\r\n # set floating point formatting when printing\r\n np.set_printoptions(formatter={\"float\": \"{: 0.6f}\".format})\r\n\r\n # load data\r\n x = DataSets.NOME_DO_DATASET.input\r\n d = DataSets.NOME_DO_DATASET.output\r\n\r\n # define the network parameters\r\n n = TAXA_DE_APRENDIZADO\r\n g = ActivationFunctions.FUNCAO_DE_ATIVACAO\r\n\r\n # create the neural network\r\n nn = Perceptron(n, g)\r\n\r\n # train the neural network\r\n w = nn.train(x, d)\r\n\r\n # evaluate the neural network\r\n acc = nn.evaluate(w, x, d)\r\n \r\n # plot epoch versus error data\r\n PlotUtils.plot(nn.plot_data_x, \"epoch\", nn.plot_data_y, \"error\")\r\n"
] | [
[
"numpy.transpose",
"numpy.multiply",
"numpy.random.seed",
"numpy.set_printoptions"
]
] |
waitong94/nn_physical_concepts | [
"f8cc03d46431641e7ef2ecbaeb1a1494a95f2550"
] | [
"scinet/data_loader.py"
] | [
"# Copyright 2018 SciNet (https://github.com/eth-nn-physics/nn_physical_concepts)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cPickle\nimport gzip\nimport io\nimport numpy as np\n\n\ndef load(validation_size_p, file_name):\n \"\"\"\n Params:\n validation_size_p: percentage of data to be used for validation\n file_name (str): File containing the data\n \"\"\"\n f = gzip.open(io.data_path + file_name + \".plk.gz\", 'rb')\n data, states, params = cPickle.load(f)\n states = np.array(states)\n train_val_separation = int(len(data[0]) * (1 - validation_size_p / 100.))\n training_data = [data[i][:train_val_separation] for i in [0, 1, 2]]\n training_states = states[:train_val_separation]\n validation_data = [data[i][train_val_separation:] for i in [0, 1, 2]]\n validation_states = states[train_val_separation:]\n f.close()\n return (training_data, validation_data, training_states, validation_states, params)\n"
] | [
[
"numpy.array"
]
] |
ravikumarvc/incubator-tvm | [
"9826947ffce0ed40e9d47a0db2abb033e394279e"
] | [
"apps/howto_deploy/python_deploy.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# brief Example code on load and run TVM module.s\n# file python_deploy.py\n\nimport tvm\nimport numpy as np\n\ndef verify(mod, fname):\n # Get the function from the module\n f = mod.get_function(fname)\n # Use tvm.nd.array to convert numpy ndarray to tvm\n # NDArray type, so that function can be invoked normally\n N = 10 \n x = tvm.nd.array(np.arange(N, dtype=np.float32))\n y = tvm.nd.array(np.zeros(N, dtype=np.float32))\n # Invoke the function\n f(x, y)\n np_x = x.asnumpy() \n np_y = y.asnumpy() \n # Verify correctness of function\n assert(np.all([xi+1 == yi for xi, yi in zip(np_x, np_y)]))\n print(\"Finish verification...\")\n \n\nif __name__ == \"__main__\":\n # The normal dynamic loading method for deployment\n mod_dylib = tvm.module.load(\"lib/test_addone_dll.so\")\n print(\"Verify dynamic loading from test_addone_dll.so\")\n verify(mod_dylib, \"addone\")\n # There might be methods to use the system lib way in\n # python, but dynamic loading is good enough for now.\n"
] | [
[
"numpy.arange",
"numpy.zeros"
]
] |
manhcuogntin4/handwritting-ocr | [
"aa55c2d46156a10663ad55e2fa4590c3e1333130"
] | [
"ocr/charSeg.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom .helpers import *\nfrom .tfhelpers import Graph\nimport cv2\nimport math\n\n# Preloading trained model with activation function\n# Loading is slow -> prevent multiple loads\nprint(\"Loading Segmantation model:\")\nsegCNNGraph = Graph('models/gap-clas/CNN-CG')\nsegLargeCNNGraph = Graph('models/gap-clas/large/CNN-CG')\nsegRNNGraph = Graph('models/gap-clas/RNN/Bi-RNN', 'prediction')\nsegRNNDenseGraph = Graph('models/gap-clas/RNN/Bi-RNN-dense', 'prediction')\n\ndef classify(img, step=2, RNN=False, large=False):\n if large and RNN:\n slider = (60, 60)\n elif large:\n slider = (60, 120)\n else:\n slider = (60, 30)\n \n length = (img.shape[1] - slider[1]) // 2 + 1\n if RNN:\n input_seq = np.zeros((1, length, slider[0]*slider[1]), dtype=np.float32)\n input_seq[0][:] = [img[:, loc * step: loc * step + slider[1]].flatten()\n for loc in range(length)]\n if large:\n pred = segRNNDenseGraph.eval_feed({'inputs:0': input_seq,\n 'length:0': [length],\n 'keep_prob:0': 1})[0]\n else:\n pred = segRNNGraph.eval_feed({'inputs:0': input_seq,\n 'length:0': [length],\n 'keep_prob:0': 1})[0]\n else:\n input_seq = np.zeros((length, slider[0]*slider[1]), dtype=np.float32)\n input_seq[:] = [img[:, loc * step: loc * step + slider[1]].flatten()\n for loc in range(length)]\n if large:\n pred = segLargeCNNGraph.run(input_seq)\n else:\n pred = segCNNGraph.run(input_seq)\n \n return pred\n \n\ndef segmentation(img, step=2, RNN=False, large=False, debug=False):\n \"\"\"\n Take preprocessed image of word\n and return array of positions separating chars - gaps\n \"\"\" \n if large:\n slider = (60, 120)\n else:\n slider = (60, 30)\n length = (img.shape[1] - slider[1]) // 2 + 1\n \n pred = classify(img, step, RNN, large)\n\n gaps = []\n\n lastGap = 0\n gapCount = 1\n gapPositionSum = slider[1] / 2\n firstGap = True\n gapBlockFirst = 0\n gapBlockLast = slider[1]/2\n\n for i, p in enumerate(pred):\n if p == 1:\n gapPositionSum += i * step + slider[1] / 2\n gapBlockLast = i * step + slider[1] / 2\n gapCount += 1\n lastGap = 0\n if gapBlockFirst == 0:\n gapBlockFirst = i * step + slider[1] / 2\n else:\n if gapCount != 0 and lastGap >= 1:\n if firstGap:\n gaps.append(int(gapBlockLast))\n firstGap = False\n else:\n gaps.append(int(gapPositionSum // gapCount))\n gapPositionSum = 0\n gapCount = 0\n gapBlockFirst = 0\n lastGap += 1\n\n # Adding final gap position\n if gapBlockFirst != 0:\n gaps.append(int(gapBlockFirst))\n else:\n gapPositionSum += (length - 1) * 2 + slider[1]/2\n gaps.append(int(gapPositionSum / (gapCount + 1)))\n \n if debug:\n # Drawing lines\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n for gap in gaps:\n cv2.line(img,\n ((int)(gap), 0),\n ((int)(gap), slider[1]),\n (0, 255, 0), 1)\n implt(img, t=\"Separated characters\")\n \n return gaps"
] | [
[
"numpy.zeros"
]
] |
cmutel/SALib | [
"32e33c423bcc981d0cfd4339a3e2435d6b945de1"
] | [
"src/SALib/test_functions/Sobol_G.py"
] | [
"from __future__ import division\r\n\r\nimport numpy as np\r\n\r\n\r\n# Non-monotonic Sobol G Function (8 parameters)\r\n# First-order indices:\r\n# x1: 0.7165\r\n# x2: 0.1791\r\n# x3: 0.0237\r\n# x4: 0.0072\r\n# x5-x8: 0.0001\r\ndef evaluate(values, a=None):\r\n if type(values) != np.ndarray:\r\n raise TypeError(\"The argument `values` must be a numpy ndarray\")\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n\r\n ltz = values < 0\r\n gto = values > 1\r\n\r\n if ltz.any() == True:\r\n raise ValueError(\"Sobol G function called with values less than zero\")\r\n elif gto.any() == True:\r\n raise ValueError(\"Sobol G function called with values greater than one\")\r\n\r\n Y = np.ones([values.shape[0]])\r\n\r\n len_a = len(a)\r\n for i, row in enumerate(values):\r\n for j in range(len_a):\r\n x = row[j]\r\n a_j = a[j]\r\n Y[i] *= (np.abs(4 * x - 2) + a_j) / (1 + a_j)\r\n\r\n return Y\r\n\r\n\r\ndef partial_first_order_variance(a=None):\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n a = np.array(a)\r\n return np.divide(1, np.multiply(3, np.square(1 + a)))\r\n\r\n\r\ndef total_variance(a=None):\r\n if a is None:\r\n a = [0, 1, 4.5, 9, 99, 99, 99, 99]\r\n a = np.array(a)\r\n return np.add(-1, np.product(1 + partial_first_order_variance(a), axis=0))\r\n\r\n\r\ndef sensitivity_index(a):\r\n a = np.array(a)\r\n return np.divide(partial_first_order_variance(a), total_variance(a))\r\n\r\n\r\ndef total_sensitivity_index(a):\r\n a = np.array(a)\r\n \r\n pv = partial_first_order_variance(a)\r\n tv = total_variance(a)\r\n \r\n sum_pv = pv.sum(axis=0)\r\n \r\n return np.subtract(1, np.divide(np.subtract(sum_pv, pv.T), tv))\r\n"
] | [
[
"numpy.ones",
"numpy.subtract",
"numpy.abs",
"numpy.array",
"numpy.square"
]
] |
fmobrj/doctr | [
"b149266ea57fd59047193a01c328c2b8ecb9330a"
] | [
"doctr/models/recognition/crnn/tensorflow.py"
] | [
"# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom copy import deepcopy\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential, Model\nfrom typing import Tuple, Dict, Any, Optional, List\n\nfrom ...backbones import vgg16_bn, resnet31, mobilenet_v3_small_r, mobilenet_v3_large_r\nfrom ...utils import load_pretrained_params\nfrom ..core import RecognitionModel, RecognitionPostProcessor\nfrom ....datasets import VOCABS\n\n__all__ = ['CRNN', 'crnn_vgg16_bn', 'CTCPostProcessor', 'crnn_mobilenet_v3_small',\n 'crnn_mobilenet_v3_large']\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'crnn_vgg16_bn': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': vgg16_bn, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['legacy_french'],\n 'url': 'https://github.com/mindee/doctr/releases/download/v0.3.0/crnn_vgg16_bn-76b7f2c6.zip',\n },\n 'crnn_mobilenet_v3_small': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': mobilenet_v3_small_r, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['french'],\n 'url': 'https://github.com/mindee/doctr/releases/download/v0.3.1/crnn_mobilenet_v3_small-7f36edec.zip',\n },\n 'crnn_mobilenet_v3_large': {\n 'mean': (0.694, 0.695, 0.693),\n 'std': (0.299, 0.296, 0.301),\n 'backbone': mobilenet_v3_large_r, 'rnn_units': 128,\n 'input_shape': (32, 128, 3),\n 'vocab': VOCABS['french'],\n 'url': None,\n },\n}\n\n\nclass CTCPostProcessor(RecognitionPostProcessor):\n \"\"\"\n Postprocess raw prediction of the model (logits) to a list of words using CTC decoding\n\n Args:\n vocab: string containing the ordered sequence of supported characters\n ignore_case: if True, ignore case of letters\n ignore_accents: if True, ignore accents of letters\n \"\"\"\n\n def __call__(\n self,\n logits: tf.Tensor\n ) -> List[Tuple[str, float]]:\n \"\"\"\n Performs decoding of raw output with CTC and decoding of CTC predictions\n with label_to_idx mapping dictionnary\n\n Args:\n logits: raw output of the model, shape BATCH_SIZE X SEQ_LEN X NUM_CLASSES + 1\n\n Returns:\n A list of decoded words of length BATCH_SIZE\n\n \"\"\"\n # Decode CTC\n _decoded, _log_prob = tf.nn.ctc_beam_search_decoder(\n tf.transpose(logits, perm=[1, 0, 2]),\n tf.fill(logits.shape[0], logits.shape[1]),\n beam_width=1, top_paths=1,\n )\n out_idxs = tf.sparse.to_dense(_decoded[0], default_value=len(self.vocab))\n probs = tf.math.exp(tf.squeeze(_log_prob, axis=1))\n\n # Map it to characters\n _decoded_strings_pred = tf.strings.reduce_join(\n inputs=tf.nn.embedding_lookup(tf.constant(self._embedding, dtype=tf.string), out_idxs),\n axis=-1\n )\n _decoded_strings_pred = tf.strings.split(_decoded_strings_pred, \"<eos>\")\n decoded_strings_pred = tf.sparse.to_dense(_decoded_strings_pred.to_sparse(), default_value='not valid')[:, 0]\n word_values = [word.decode() for word in decoded_strings_pred.numpy().tolist()]\n\n return list(zip(word_values, probs.numpy().tolist()))\n\n\nclass CRNN(RecognitionModel, Model):\n \"\"\"Implements a CRNN architecture as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Args:\n feature_extractor: the backbone serving as feature extractor\n vocab: vocabulary used for encoding\n rnn_units: number of units in the LSTM layers\n cfg: configuration dictionary\n \"\"\"\n\n _children_names: List[str] = ['feat_extractor', 'decoder', 'postprocessor']\n\n def __init__(\n self,\n feature_extractor: tf.keras.Model,\n vocab: str,\n rnn_units: int = 128,\n cfg: Optional[Dict[str, Any]] = None,\n ) -> None:\n # Initialize kernels\n h, w, c = feature_extractor.output_shape[1:]\n\n super().__init__()\n self.vocab = vocab\n self.max_length = w\n self.cfg = cfg\n self.feat_extractor = feature_extractor\n\n self.decoder = Sequential(\n [\n layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),\n layers.Bidirectional(layers.LSTM(units=rnn_units, return_sequences=True)),\n layers.Dense(units=len(vocab) + 1)\n ]\n )\n self.decoder.build(input_shape=(None, w, h * c))\n\n self.postprocessor = CTCPostProcessor(vocab=vocab)\n\n def compute_loss(\n self,\n model_output: tf.Tensor,\n target: List[str],\n ) -> tf.Tensor:\n \"\"\"Compute CTC loss for the model.\n\n Args:\n gt: the encoded tensor with gt labels\n model_output: predicted logits of the model\n seq_len: lengths of each gt word inside the batch\n\n Returns:\n The loss of the model on the batch\n \"\"\"\n gt, seq_len = self.compute_target(target)\n batch_len = model_output.shape[0]\n input_length = tf.fill((batch_len,), model_output.shape[1])\n ctc_loss = tf.nn.ctc_loss(\n gt, model_output, seq_len, input_length, logits_time_major=False, blank_index=len(self.vocab)\n )\n return ctc_loss\n\n def call(\n self,\n x: tf.Tensor,\n target: Optional[List[str]] = None,\n return_model_output: bool = False,\n return_preds: bool = False,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n\n features = self.feat_extractor(x, **kwargs)\n # B x H x W x C --> B x W x H x C\n transposed_feat = tf.transpose(features, perm=[0, 2, 1, 3])\n w, h, c = transposed_feat.get_shape().as_list()[1:]\n # B x W x H x C --> B x W x H * C\n features_seq = tf.reshape(transposed_feat, shape=(-1, w, h * c))\n logits = self.decoder(features_seq, **kwargs)\n\n out: Dict[str, tf.Tensor] = {}\n if return_model_output:\n out[\"out_map\"] = logits\n\n if target is None or return_preds:\n # Post-process boxes\n out[\"preds\"] = self.postprocessor(logits)\n\n if target is not None:\n out['loss'] = self.compute_loss(logits, target)\n\n return out\n\n\ndef _crnn(\n arch: str,\n pretrained: bool,\n pretrained_backbone: bool = True,\n input_shape: Optional[Tuple[int, int, int]] = None,\n **kwargs: Any\n) -> CRNN:\n\n pretrained_backbone = pretrained_backbone and not pretrained\n\n # Patch the config\n _cfg = deepcopy(default_cfgs[arch])\n _cfg['input_shape'] = input_shape or _cfg['input_shape']\n _cfg['vocab'] = kwargs.get('vocab', _cfg['vocab'])\n _cfg['rnn_units'] = kwargs.get('rnn_units', _cfg['rnn_units'])\n\n # Feature extractor\n feat_extractor = _cfg['backbone'](\n input_shape=_cfg['input_shape'],\n include_top=False,\n pretrained=pretrained_backbone,\n )\n\n kwargs['vocab'] = _cfg['vocab']\n kwargs['rnn_units'] = _cfg['rnn_units']\n\n # Build the model\n model = CRNN(feat_extractor, cfg=_cfg, **kwargs)\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, _cfg['url'])\n\n return model\n\n\ndef crnn_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a VGG-16 backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_vgg16_bn\n >>> model = crnn_vgg16_bn(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_vgg16_bn', pretrained, **kwargs)\n\n\ndef crnn_mobilenet_v3_small(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a MobileNet V3 Small backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_mobilenet_v3_small\n >>> model = crnn_mobilenet_v3_small(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_mobilenet_v3_small', pretrained, **kwargs)\n\n\ndef crnn_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> CRNN:\n \"\"\"CRNN with a MobileNet V3 Large backbone as described in `\"An End-to-End Trainable Neural Network for Image-based\n Sequence Recognition and Its Application to Scene Text Recognition\" <https://arxiv.org/pdf/1507.05717.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import crnn_mobilenet_v3_large\n >>> model = crnn_mobilenet_v3_large(pretrained=True)\n >>> input_tensor = tf.random.uniform(shape=[1, 32, 128, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on our text recognition dataset\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _crnn('crnn_mobilenet_v3_large', pretrained, **kwargs)\n"
] | [
[
"tensorflow.reshape",
"tensorflow.strings.split",
"tensorflow.fill",
"tensorflow.squeeze",
"tensorflow.keras.layers.LSTM",
"tensorflow.constant",
"tensorflow.transpose"
]
] |
emmahodcroft/treetime | [
"8926e49e17538c19ad0950e365f15035a76c8fc5"
] | [
"treetime/treeanc.py"
] | [
"from __future__ import print_function, division\nimport time\nimport config as ttconf\nfrom Bio import Phylo\nfrom Bio import AlignIO\nimport numpy as np\nfrom gtr import GTR\nimport seq_utils\nfrom version import tt_version as __version__\ntry:\n from itertools import izip\nexcept ImportError: #python3.x\n izip = zip\n\n\nclass TreeAnc(object):\n \"\"\"\n Class defines simple tree object with basic interface methods: reading and\n saving from/to files, initializing leaves with sequences from the\n alignment, making ancestral state inferrence\n \"\"\"\n\n def __init__(self, tree=None, aln=None, gtr=None, fill_overhangs=True,\n ref=None, verbose = ttconf.VERBOSE, ignore_gaps=True,\n convert_upper=True, seq_multiplicity=None, log=None, **kwargs):\n \"\"\"\n TreeAnc constructor. It prepares tree, attach sequences to the leaf nodes,\n and sets some configuration parameters.\n\n Parameters\n ----------\n\n tree : str, Bio.Phylo.Tree\n Phylogenetic tree. String passed is interpreted as a filename to\n construct the Biopython tree.\n\n aln : str, Bio.Align.MultipleSequenceAlignment\n Sequence alignment. If a string passed, it is interpreted as the\n filename to read Biopython alignment from.\n\n gtr : str, GTR\n gtr model object. If string passed, it is interpreted as the type of\n the GTR model. A new GTR instance will be created for this type.\n **Note** some GTR types require additional configuration parameters.\n If the new GTR is being instantiated, these parameters are expected\n to be passed as kwargs. If nothing is passed, the default values are\n used, which might cause unexpected results.\n\n fill_overhangs : bool\n In some cases, the missing data on both ends of the alignment is\n filled with the gap sign('-'). As we suppose that the most\n appropriate way to deal with the missing data is to assign it to the\n \"unknown\" character ('N' for nucleotides, 'X' for aminoacids). If the\n parameter is set to True, the end-gaps are converted to unknown\n symbols. Otherwise, the alignment is treated as-is\n\n ignore_gaps: bool\n ignore gaps in branch length calculations\n\n verbose : int\n verbosity level as number from 0 (lowest) to 10 (highest).\n\n seq_multiplicity: dict\n if individual nodes in the tree correspond to multiple sampled sequences\n (i.e. read count in a deep sequencing experiment), these can be\n specified as a dictionary\n\n Keyword Args\n ------------\n\n Keyword arguments to construct GTR model\n\n \"\"\"\n if tree is None:\n raise TypeError(\"TreeAnc requires a tree!\")\n self.__version__ = __version__\n self.t_start = time.time()\n self.verbose = verbose\n self.log=log\n self.logger(\"TreeAnc: set-up\",1)\n self._internal_node_count = 0\n self.use_mutation_length=False\n self.one_mutation = None\n self.fill_overhangs = fill_overhangs\n self.is_vcf = False #this is set true when aln is set, if aln is dict\n\n self.var_positions = None #set during seq compression, if aln is dict\n self.inferred_const_sites = [] #keeps track of pos where ambig sites replaced with base\n #This preserves original compressed sequence so ambiguous positions can be recovered later\n self.ambigPos = {}\n\n self.seq_multiplicity = {} if seq_multiplicity is None else seq_multiplicity\n\n self.ignore_gaps = ignore_gaps\n self.set_gtr(gtr if gtr is not None else 'JC69', **kwargs)\n\n self.tree = tree\n if tree is None:\n self.logger(\"TreeAnc: tree loading failed! exiting\",0)\n return\n\n # will be None if not set\n self.ref = ref\n\n # force all sequences to be upper case letters\n # (desired for nuc or aa, not for other discrete states)\n self.convert_upper = convert_upper\n\n # set alignment and attach sequences to tree on success.\n # otherwise self.aln will be None\n self.aln = aln\n\n\n def logger(self, msg, level, warn=False):\n \"\"\"\n Print log message *msg* to stdout.\n\n Parameters\n -----------\n\n msg : str\n string to print on the screen\n\n level : int\n log-level. Only the messages with the level higher than the\n current verbose level will be shown.\n\n warn : bool\n warning flag. If True, the message will be displayed\n regardless of its log-level.\n\n \"\"\"\n if level<self.verbose or (warn and level<=self.verbose):\n dt = time.time() - self.t_start\n outstr = '\\n' if level<2 else ''\n outstr+=format(dt, '4.2f')+'\\t'\n outstr+= level*'-'\n outstr+=msg\n try:\n log.write(outstr+'\\n')\n log.flush()\n except:\n print(outstr)\n\n\n####################################################################\n## SET-UP\n####################################################################\n @property\n def leaves_lookup(self):\n \"\"\"\n Leaves lookup is the {leaf-name:leaf-node} dictionary. It enables fast\n search of a tree leaf object by its name.\n \"\"\"\n return self._leaves_lookup\n\n @property\n def gtr(self):\n \"\"\"\n Get GTR object currently used.\n \"\"\"\n return self._gtr\n\n @gtr.setter\n def gtr(self, value):\n \"\"\"\n Set a new GTR object\n\n Parameters\n -----------\n\n value :GTR\n the new GTR object\n \"\"\"\n if not isinstance(value, GTR):\n raise TypeError(\" GTR instance expected\")\n self._gtr = value\n\n\n def set_gtr(self, in_gtr, **kwargs):\n \"\"\"\n Create new GTR model if needed, and set the model as an attribute of the\n TreeAnc class\n\n Parameters\n -----------\n\n in_gtr : str, GTR\n The gtr model to be assigned. If string is passed,\n it is understood as the name of the standard GTR model, and is\n attempted to be created through GTR.standard() interface. In case\n GTR instance is passed, it is directly set as the class attribute\n\n Keyword Args\n ------------\n\n All parameters needed for the gtr creation. If none passed, defaults are assumed.\n Refer to the particular GTR models for the exact parameter values\n\n \"\"\"\n if type(in_gtr)==str:\n self._gtr = GTR.standard(model=in_gtr, **kwargs)\n self._gtr.logger = self.logger\n\n elif isinstance(in_gtr, GTR):\n self._gtr = in_gtr\n self._gtr.logger=self.logger\n else:\n self.logger(\"TreeAnc.gtr_setter: can't interpret GTR model\", 1, warn=True)\n raise TypeError(\"Cannot set GTR model in TreeAnc class: GTR or \"\n \"string expected\")\n\n if self._gtr.ambiguous is None:\n self.fill_overhangs=False\n\n\n @property\n def tree(self):\n \"\"\"\n Get reference to the phylogenetic tree currently used by the TreeAnc.\n \"\"\"\n return self._tree\n\n @tree.setter\n def tree(self, in_tree):\n '''\n assigns a tree to the internal self._tree variable. The tree is either\n loaded from file (if in_tree is str) or assigned (if in_tree is a Phylo.tree)\n '''\n from os.path import isfile\n if isinstance(in_tree, Phylo.BaseTree.Tree):\n self._tree = in_tree\n elif type(in_tree) in [str, unicode] and isfile(in_tree):\n try:\n self._tree=Phylo.read(in_tree, 'newick')\n except:\n fmt = in_tree.split('.')[-1]\n if fmt in ['nexus', 'nex']:\n self._tree=Phylo.read(in_tree, 'nexus')\n else:\n self.logger('TreeAnc: could not load tree, format needs to be nexus or newick! input was '+str(in_tree),1)\n self._tree = None\n return\n else:\n self.logger('TreeAnc: could not load tree! input was '+str(in_tree),1)\n self._tree = None\n return\n\n # remove all existing sequence attributes\n for node in self._tree.find_clades():\n if hasattr(node, \"sequence\"):\n node.__delattr__(\"sequence\")\n node.original_length = node.branch_length\n node.mutation_length = node.branch_length\n self.prepare_tree()\n\n\n @property\n def aln(self):\n \"\"\"\n Get the multiple sequence alignment currently used by the TreeAnc\n \"\"\"\n return self._aln\n\n @aln.setter\n def aln(self,in_aln):\n # load alignment from file if necessary\n from os.path import isfile\n from Bio.Align import MultipleSeqAlignment\n self._aln = None\n if isinstance(in_aln, MultipleSeqAlignment):\n self._aln = in_aln\n elif type(in_aln) in [str, unicode] and isfile(in_aln):\n for fmt in ['fasta', 'phylip-relaxed', 'nexus']:\n try:\n self._aln=AlignIO.read(in_aln, 'fasta')\n break\n except:\n continue\n elif type(in_aln) is dict: #if is read in from VCF file\n self._aln = in_aln\n self.is_vcf = True\n\n if self._aln is None:\n self.logger(\"TreeAnc: loading alignment failed... \",1, warn=True)\n return\n\n #Convert to uppercase here, rather than in _attach_sequences_to_nodes\n #(which used to do it through seq2array in seq_utils.py)\n #so that it is controlled by param convert_upper. This way for\n #mugration (ancestral reconstruction of non-sequences), you can\n #use upper- and lower case characters for discrete states!\n if (not self.is_vcf) and self.convert_upper:\n self._aln = MultipleSeqAlignment([seq.upper() for seq in self._aln])\n\n if hasattr(self, '_tree'):\n self._attach_sequences_to_nodes()\n else:\n self.logger(\"TreeAnc.aln: sequences not yet attached to tree\", 3, warn=True)\n\n @property\n def ref(self):\n \"\"\"\n Get the str reference nucleotide sequence currently used by TreeAnc\n When having read in from a VCF, this is what variants map to\n \"\"\"\n return self._ref\n\n\n @ref.setter\n def ref(self, in_ref):\n self._ref = in_ref\n\n\n def _attach_sequences_to_nodes(self):\n #print (\"inside attach seq to nodes\")\n '''\n For each node of the tree, check whether there is a sequence available\n in the alignment and assign this sequence as a character array\n '''\n if type(self.aln) is dict:\n self.seq_len = len(self.ref)\n else:\n self.seq_len = self.aln.get_alignment_length()\n self.one_mutation = 1.0/self.seq_len\n\n failed_leaves= 0\n if type(self.aln) is dict:\n # if alignment is specified as difference from ref\n dic_aln = self.aln\n self.seq_len = len(self.ref)\n else:\n # if full alignment is specified\n dic_aln = {k.name: seq_utils.seq2array(k.seq, fill_overhangs=self.fill_overhangs,\n ambiguous_character=self.gtr.ambiguous)\n for k in self.aln} #\n self.seq_len = self.aln.get_alignment_length()\n\n self.one_mutation = 1.0/self.seq_len\n\n\n # loop over tree,\n for l in self.tree.find_clades():\n if l.name in dic_aln:\n l.sequence= dic_aln[l.name]\n if l.name in self.seq_multiplicity:\n l.count = self.seq_multiplicity[l.name]\n else:\n l.count = 1.0\n elif l.is_terminal():\n self.logger(\"***WARNING: TreeAnc._attach_sequences_to_nodes: NO SEQUENCE FOR LEAF: %s\" % l.name, 0, warn=True)\n failed_leaves += 1\n l.sequence = seq_utils.seq2array(self.gtr.ambiguous*self.seq_len, fill_overhangs=self.fill_overhangs,\n ambiguous_character=self.gtr.ambiguous)\n if failed_leaves > self.tree.count_terminals() / 3:\n self.logger(\"ERROR: At least 30\\\\% terminal nodes cannot be assigned with a sequence!\\n\", 0, warn=True)\n self.logger(\"Are you sure the alignment belongs to the tree?\", 2, warn=True)\n break\n else: # could not assign sequence for internal node - is OK\n pass\n\n if failed_leaves:\n self.logger(\"***WARNING: TreeAnc: %d nodes don't have a matching sequence in the alignment. POSSIBLE ERROR.\"%failed_leaves, 0, warn=True)\n\n self.make_reduced_alignment()\n\n\n def make_reduced_alignment(self):\n \"\"\"\n Create the reduced alignment from the full sequences attached to (some)\n tree nodes. The methods collects all sequences from the tree nodes, creates\n the alignment, counts the multiplicity for each column of the alignment\n ('alignment pattern'), and creates the reduced alignment, where only the\n unique patterns are present. The reduced alignment and the pattern multiplicity\n are sufficient for the GTR calculations and allow to save memory on profile\n instantiation.\n The maps from full sequence to reduced sequence and back are also stored to allow\n compressing and expanding the sequences.\n\n The following attributes are assigned by the method:\n\n - full_to_reduced_sequence_map: map to reduce a sequence\n - reduced_to_full_sequence_map: map to restore sequence from reduced alignment\n - multiplicity: numpy array, which stores the pattern multiplicity for\n each position of the reduced alignment.\n - reduced_alignment: 2D numpy array, representing the alignment. Shape is\n (N x L'), where N is number of sequences, L' - number of unique alignment patterns\n\n\n In addition, each node gets\n\n - cseq: compressed sequence (corresponding row of the reduced alignment)\n\n \"\"\"\n\n self.logger(\"TreeAnc: making reduced alignment...\", 1)\n\n from collections import defaultdict\n\n # bind positions in real sequence to that of the reduced (compressed) sequence\n self.full_to_reduced_sequence_map = np.zeros(self.seq_len, dtype=int)\n\n # bind position in reduced sequence to the array of positions in real (expanded) sequence\n self.reduced_to_full_sequence_map = {}\n\n #if is a dict, want to be efficient and not iterate over a bunch of const_sites\n #so pre-load alignment_patterns with the location of const sites!\n #and get the sites that we want to iterate over only!\n if type(self.aln) is dict:\n tmp_reduced_aln, alignment_patterns, positions = self.process_alignment_dict()\n seqNames = self.aln.keys() #store seqName order to put back on tree\n else:\n # transpose real alignment, for ease of iteration\n alignment_patterns = {}\n tmp_reduced_aln = []\n # NOTE the order of tree traversal must be the same as below\n # for assigning the cseq attributes to the nodes.\n seqs = [n.sequence for n in self.tree.find_clades() if hasattr(n, 'sequence')]\n if len(np.unique([len(x) for x in seqs]))>1:\n self.logger(\"TreeAnc: Sequences differ in in length! ABORTING\",0, warn=True)\n aln_transpose = None\n return\n else:\n aln_transpose = np.array(seqs).T\n positions = range(self.seq_len)\n\n for pi in positions:\n if type(self.aln) is dict:\n pattern = [ self.aln[k][pi] if pi in self.aln[k].keys()\n else self.ref[pi] for k,v in self.aln.iteritems() ]\n else:\n pattern = aln_transpose[pi]\n\n str_pat = \"\".join(pattern)\n # if the column contains only one state and ambiguous nucleotides, replace\n # those with the state in other strains right away\n unique_letters = list(np.unique(pattern))\n if hasattr(self.gtr, \"ambiguous\"):\n if len(unique_letters)==2 and self.gtr.ambiguous in unique_letters:\n self.inferred_const_sites.append(pi) #keep track\n other = [c for c in unique_letters if c!=self.gtr.ambiguous][0]\n str_pat = str_pat.replace(self.gtr.ambiguous, other)\n unique_letters = [other]\n # if there is a mutation in this column, give it its private pattern\n # this is required when sampling mutations from reconstructed profiles.\n # otherwise, all mutations corresponding to the same pattern will be coupled.\n if len(unique_letters)>1:\n str_pat += '_%d'%pi\n\n # if the pattern is not yet seen,\n if str_pat not in alignment_patterns:\n # bind the index in the reduced aln, index in sequence to the pattern string\n alignment_patterns[str_pat] = (len(tmp_reduced_aln), [pi])\n # append this pattern to the reduced alignment\n tmp_reduced_aln.append(pattern)\n else:\n # if the pattern is already seen, append the position in the real\n # sequence to the reduced aln<->sequence_pos_indexes map\n alignment_patterns[str_pat][1].append(pi)\n\n # count how many times each column is repeated in the real alignment\n self.multiplicity = np.zeros(len(alignment_patterns))\n for p, pos in alignment_patterns.values():\n self.multiplicity[p]=len(pos)\n\n # create the reduced alignment as np array\n self.reduced_alignment = np.array(tmp_reduced_aln).T\n\n # create map to compress a sequence\n for p, pos in alignment_patterns.values():\n self.full_to_reduced_sequence_map[np.array(pos)]=p\n\n # create a map to reconstruct full sequence from the reduced (compressed) sequence\n for p, val in alignment_patterns.iteritems():\n self.reduced_to_full_sequence_map[val[0]]=np.array(val[1], dtype=int)\n\n # assign compressed sequences to all nodes of the tree, which have sequence assigned\n # for dict we cannot assume this is in the same order, as it does below!\n # so do it explicitly\n if type(self.aln) is dict:\n seq_reduce_align = {n:self.reduced_alignment[i]\n for i, n in enumerate(seqNames)}\n #This copy of the compressed sequences can be used to recover Ambiguous variable positions later\n #after all other processing has been done (see \"recover_var_ambigs\")\n self.ambigPos = {n:self.reduced_alignment[i]\n for i, n in enumerate(seqNames)}\n\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n n.cseq = seq_reduce_align[n.name]\n else:\n # NOTE the order of tree traversal must be the same as above to catch the\n # index in the reduced alignment correctly\n seq_count = 0\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n n.cseq = self.reduced_alignment[seq_count]\n seq_count+=1\n\n self.logger(\"TreeAnc: finished reduced alignment...\", 1)\n\n\n def process_alignment_dict(self):\n \"\"\"\n prepare the dictionary specifying differences from a reference sequence\n to construct the reduced alignment with variable sites only. NOTE:\n - sites can be constant but different from the reference\n - sites can be constant plus a ambiguous sites\n\n assigns:\n - self.nonref_positions: at least one sequence is different from ref\n returns:\n - reduced_alignment_const: reduced alignment accounting for\n non-variable postitions\n - alignment_patterns_const:\n dict pattern -> (pos in reduced alignment, list of pos in full alignment)\n - variable_positions: list of variable positions needed to construct remaining\n \"\"\"\n\n # number of sequences in alignment\n nseq = len(self.aln)\n\n from collections import defaultdict\n inv_map = defaultdict(list)\n for k,v in self.aln.iteritems():\n for pos, bs in v.iteritems():\n inv_map[pos].append(bs)\n\n self.nonref_positions = np.sort(inv_map.keys())\n\n ambiguous_char = self.gtr.ambiguous\n nonref_const = []\n nonref_alleles = []\n ambiguous_const = []\n variable_pos = []\n for pos, bs in inv_map.iteritems(): #loop over positions and patterns\n bases = \"\".join(np.unique(bs))\n if len(bs) == nseq:\n if (len(bases)<=2 and ambiguous_char in bases) or len(bases)==1:\n # all sequences different from reference, but only one state\n # (other than ambiguous_char) in column\n nonref_const.append(pos)\n nonref_alleles.append(bases.replace(ambiguous_char, ''))\n if ambiguous_char in bases: #keep track of sites 'made constant'\n self.inferred_const_sites.append(pos)\n else:\n # at least two non-reference alleles\n variable_pos.append(pos)\n else:\n # not every sequence different from reference\n if bases==ambiguous_char:\n ambiguous_const.append(pos)\n self.inferred_const_sites.append(pos) #keep track of sites 'made constant'\n else:\n # at least one non ambiguous non-reference allele not in\n # every sequence\n variable_pos.append(pos)\n\n refMod = np.fromstring(self.ref, 'S1')\n # place constant non reference positions by their respective allele\n refMod[nonref_const] = nonref_alleles\n # mask variable positions\n states = self.gtr.alphabet\n # maybe states = np.unique(refMod)\n refMod[variable_pos] = '.'\n\n # for each base in the gtr, make constant alignment pattern and\n # assign it to all const positions in the modified reference sequence\n reduced_alignment_const = []\n alignment_patterns_const = {}\n for base in states:\n p = base*nseq\n #if the alignment doesn't have a const site of this base, don't add! (ex: no '----' site!)\n if len(np.where(refMod==base)[0]):\n alignment_patterns_const[p] = [len(reduced_alignment_const),\n list(np.where(refMod==base)[0])]\n reduced_alignment_const.append(list(p))\n\n return reduced_alignment_const, alignment_patterns_const, variable_pos\n\n\n def prepare_tree(self):\n \"\"\"\n Set link to parent and net distance to root for all tree nodes.\n Should be run once the tree is read and after every tree topology or branch\n length optimizations.\n \"\"\"\n if self.one_mutation is None:\n self.tree.root.branch_length = 0.001\n else:\n self.tree.root.branch_length = self.one_mutation\n self.tree.root.mutation_length = self.tree.root.branch_length\n self.tree.root.mutations = []\n self.tree.ladderize()\n self._prepare_nodes()\n self._leaves_lookup = {node.name:node for node in self.tree.get_terminals()}\n\n\n def _prepare_nodes(self):\n \"\"\"\n Set auxilliary parameters to every node of the tree.\n \"\"\"\n self.tree.root.up = None\n self.tree.root.bad_branch=self.tree.root.bad_branch if hasattr(self.tree.root, 'bad_branch') else False\n internal_node_count = 0\n for clade in self.tree.get_nonterminals(order='preorder'): # parents first\n internal_node_count+=1\n if clade.name is None:\n clade.name = \"NODE_\" + format(self._internal_node_count, '07d')\n self._internal_node_count += 1\n for c in clade.clades:\n c.bad_branch=c.bad_branch if hasattr(c, 'bad_branch') else False\n c.up = clade\n self._calc_dist2root()\n self._internal_node_count = max(internal_node_count, self._internal_node_count)\n\n def _calc_dist2root(self):\n \"\"\"\n For each node in the tree, set its root-to-node distance as dist2root\n attribute\n \"\"\"\n self.tree.root.dist2root = 0.0\n for clade in self.tree.get_nonterminals(order='preorder'): # parents first\n for c in clade.clades:\n if not hasattr(c, 'mutation_length'):\n c.mutation_length=c.branch_length\n c.dist2root = c.up.dist2root + c.mutation_length\n\n\n####################################################################\n## END SET-UP\n####################################################################\n\n def infer_gtr(self, print_raw=False, marginal=False, normalized_rate=True,\n fixed_pi=None, pc=5.0, **kwargs):\n \"\"\"\n Calculates GTR model given the multiple sequence alignment and the tree.\n It performs ancestral sequence inferrence (joint or marginal) followed by\n the branch lengths optimization. Then, the numbers of mutations are counted\n in the optimal tree and related to the time within the mutation happened.\n From this statistics, the relative state transition probabilities are inferred,\n and the transition matrix is computed.\n The result is used to construct the new GTR model of type 'custom'.\n The model is assigned to the TreeAnc and is used in the following analysis.\n\n Parameters\n -----------\n\n print_raw : bool\n Should print the inferred GTR model?\n\n marginal : bool\n Should use marginal sequence reconstruction?\n\n normalized_rate : bool\n If True, will set the mutation rate prefactor to 1.0.\n\n fixed_pi : np.array, None\n Provide the equilibrium character concentrations.\n If None is passed, the concentrations will be inferred from scratch.\n\n pc: float, 5.0\n Number of pseudo counts to use in gtr inference\n\n Returns\n -------\n\n gtr : GTR\n The inferred GTR model.\n \"\"\"\n\n # decide which type of the Maximum-likelihood reconstruction use\n # (marginal) or (joint)\n if marginal:\n _ml_anc = self._ml_anc_marginal\n else:\n _ml_anc = self._ml_anc_joint\n\n self.logger(\"TreeAnc inferring the GTR model from the tree...\", 1)\n _ml_anc(final=True, **kwargs) # call one of the reconstruction types\n alpha = list(self.gtr.alphabet)\n n=len(alpha)\n nij = np.zeros((n,n))\n Ti = np.zeros(n)\n\n self.logger(\"TreeAnc.infer_gtr: counting mutations...\", 2)\n for node in self.tree.find_clades():\n if hasattr(node,'mutations'):\n for a,pos, d in node.mutations:\n i,j = alpha.index(a), alpha.index(d)\n nij[i,j]+=1\n Ti[i] += 0.5*self._branch_length_to_gtr(node)\n Ti[j] -= 0.5*self._branch_length_to_gtr(node)\n for ni,nuc in enumerate(node.cseq):\n i = alpha.index(nuc)\n Ti[i] += self._branch_length_to_gtr(node)*self.multiplicity[ni]\n self.logger(\"TreeAnc.infer_gtr: counting mutations...done\", 3)\n if print_raw:\n print('alphabet:',alpha)\n print('n_ij:', nij)\n print('T_i:', Ti)\n root_state = np.array([np.sum((self.tree.root.cseq==nuc)*self.multiplicity) for nuc in alpha])\n\n self._gtr = GTR.infer(nij, Ti, root_state, fixed_pi=fixed_pi, pc=pc,\n alphabet=self.gtr.alphabet, logger=self.logger,\n prof_map = self.gtr.profile_map)\n if normalized_rate:\n self.logger(\"TreeAnc.infer_gtr: setting overall rate to 1.0...\", 2)\n self._gtr.mu=1.0\n return self._gtr\n\n\n###################################################################\n### ancestral reconstruction\n###################################################################\n def infer_ancestral_sequences(self,*args, **kwargs):\n \"\"\"Shortcut for :meth:`reconstruct_anc`\n\n Reconstruct ancestral states\n\n Parameters\n -----------\n\n method : str\n Method to use. Supported values are \"fitch\" and \"ml\"\n\n Returns\n -------\n\n N_diff : int\n Number of nucleotides different from the previous\n reconstruction. If there were no pre-set sequences, returns N*L\n\n \"\"\"\n self.reconstruct_anc(*args,**kwargs)\n\n\n def reconstruct_anc(self, method='ml', infer_gtr=False, marginal=False, **kwargs):\n \"\"\"\n\n Reconstruct ancestral states\n\n Parameters\n -----------\n\n method : str\n Method to use. Supported values are \"fitch\" and \"ml\"\n\n Returns\n -------\n\n N_diff : int\n Number of nucleotides different from the previous\n reconstruction. If there were no pre-set sequences, returns N*L\n \"\"\"\n self.logger(\"TreeAnc.infer_ancestral_sequences: method: \" + method, 1)\n\n if method == 'ml':\n if marginal:\n _ml_anc = self._ml_anc_marginal\n else:\n _ml_anc = self._ml_anc_joint\n else:\n _ml_anc = self._fitch_anc\n\n if infer_gtr:\n self.infer_gtr(marginal=marginal, **kwargs)\n N_diff = _ml_anc(**kwargs)\n else:\n N_diff = _ml_anc(**kwargs)\n\n return N_diff\n\n\n def recover_var_ambigs(self):\n \"\"\"\n Recalculates mutations using the original compressed sequence for terminal nodes\n which will recover ambiguous bases at variable sites. (See 'get_mutations')\n\n Once this has been run, infer_gtr and other functions which depend on self.gtr.alphabet\n will not work, as ambiguous bases are not part of that alphabet (only A, C, G, T, -).\n This is why it's left for the user to choose when to run\n \"\"\"\n for node in self.tree.find_clades(order='preorder'):\n if node.is_terminal():\n node.mutations = self.get_mutations(node, keep_var_ambigs=True)\n\n\n\n def get_mutations(self, node, keep_var_ambigs=False):\n \"\"\"\n Get the mutations on a tree branch. Take compressed sequences from both sides\n of the branch (attached to the node), compute mutations between them, and\n expand these mutations to the positions in the real sequences.\n\n Parameters\n ----------\n\n node : PhyloTree.Clade\n Tree node, which is the child node attached to the branch.\n\n keep_var_ambigs : boolean\n If true, generates mutations based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n Returns\n -------\n\n muts : list\n List of mutations. Each mutation is represented as tuple of\n (parent_state, position, child_state).\n\n \"\"\"\n nodeseq = node.cseq\n if keep_var_ambigs and self.ambigPos and node.is_terminal():\n #use the original compressed sequence with ambiguous positions\n nodeseq = self.ambigPos[node.name]\n muts = []\n for p, (anc, der) in enumerate(izip(node.up.cseq, nodeseq)):\n # only if the states in compressed sequences differ:\n if anc!=der:\n # expand to the positions in real sequence\n muts.extend([(anc, pos, der) for pos in self.reduced_to_full_sequence_map[p]])\n\n #sort by position\n return sorted(muts, key=lambda x:x[1])\n\n\n def expanded_sequence(self, node):\n \"\"\"\n Get node's compressed sequence and expand it to the real sequence\n\n Parameters\n ----------\n\n node : PhyloTree.Clade\n Tree node\n\n Returns\n -------\n\n seq : np.array\n Sequence as np.array of chars\n \"\"\"\n seq = np.zeros_like(self.full_to_reduced_sequence_map, dtype='S1')\n for pos, state in enumerate(node.cseq):\n seq[self.reduced_to_full_sequence_map[pos]] = state\n\n return seq\n\n\n def dict_sequence(self, node, keep_var_ambigs=False):\n \"\"\"\n For VCF-based TreeAnc objects, we do not want to store the entire\n sequence on every node - not space efficient! Instead, return the dict\n of mutation locations for this sequence. This is used in place of\n 'expanded_sequence' for VCF-based obj throughout TreeAnc. However, users\n can still call 'expanded_sequence' if they do actually want the whole thing!\n\n Parameters\n ----------\n node : PhyloTree.Clade\n Tree node\n\n keep_var_ambigs : boolean\n If true, generates dict sequence based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n Returns\n -------\n seq : dict\n dict where keys are position and value is the mutation\n\n EBH 6 Dec 2017\n \"\"\"\n seq = {}\n\n nodeseq = node.cseq\n if keep_var_ambigs and self.ambigPos and node.is_terminal():\n #use the original compressed sequence with ambiguous positions\n nodeseq = self.ambigPos[node.name]\n\n for pos in self.nonref_positions:\n cseqLoc = self.full_to_reduced_sequence_map[pos]\n base = nodeseq[cseqLoc]\n if self.ref[pos] != base:\n seq[pos] = base\n\n return seq\n\n###################################################################\n### FITCH\n###################################################################\n def _fitch_anc(self, **kwargs):\n \"\"\"\n Reconstruct ancestral states using Fitch's algorithm. The method requires\n sequences to be assigned to leaves. It implements the iteration from\n leaves to the root constructing the Fitch profiles for each character of\n the sequence, and then by propagating from the root to the leaves,\n reconstructs the sequences of the internal nodes.\n\n Keyword Args\n ------------\n\n\n Returns\n -------\n\n Ndiff : int\n Number of the characters that changed since the previous\n reconstruction. These changes are determined from the pre-set\n sequence attributes of the nodes. If there are no sequences available\n (i.e., no reconstruction has been made before), returns the total\n number of characters in the tree.\n\n \"\"\"\n # set fitch profiiles to each terminal node\n\n for l in self.tree.get_terminals():\n l.state = [[k] for k in l.cseq]\n\n L = len(self.tree.get_terminals()[0].cseq)\n\n self.logger(\"TreeAnc._fitch_anc: Walking up the tree, creating the Fitch profiles\",2)\n for node in self.tree.get_nonterminals(order='postorder'):\n node.state = [self._fitch_state(node, k) for k in range(L)]\n\n ambs = [i for i in range(L) if len(self.tree.root.state[i])>1]\n if len(ambs) > 0:\n for amb in ambs:\n self.logger(\"Ambiguous state of the root sequence \"\n \"in the position %d: %s, \"\n \"choosing %s\" % (amb, str(self.tree.root.state[amb]),\n self.tree.root.state[amb][0]), 4)\n self.tree.root.cseq = np.array([k[np.random.randint(len(k)) if len(k)>1 else 0]\n for k in self.tree.root.state])\n\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n\n self.logger(\"TreeAnc._fitch_anc: Walking down the self.tree, generating sequences from the \"\n \"Fitch profiles.\", 2)\n N_diff = 0\n for node in self.tree.get_nonterminals(order='preorder'):\n if node.up != None: # not root\n sequence = np.array([node.up.cseq[i]\n if node.up.cseq[i] in node.state[i]\n else node.state[i][0] for i in range(L)])\n if hasattr(node, 'sequence'):\n N_diff += (sequence!=node.cseq).sum()\n else:\n N_diff += L\n node.cseq = sequence\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n node.mutations = self.get_mutations(node)\n\n node.profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n del node.state # no need to store Fitch states\n self.logger(\"Done ancestral state reconstruction\",3)\n for node in self.tree.get_terminals():\n node.profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n return N_diff\n\n def _fitch_state(self, node, pos):\n \"\"\"\n Determine the Fitch profile for a single character of the node's sequence.\n The profile is essentially the intersection between the children's\n profiles or, if the former is empty, the union of the profiles.\n\n Parameters\n ----------\n\n node : PhyloTree.Clade:\n Internal node which the profiles are to be determined\n\n pos : int\n Position in the node's sequence which the profiles should\n be determinedf for.\n\n Returns\n -------\n state : numpy.array\n Fitch profile for the character at position pos of the given node.\n \"\"\"\n state = self._fitch_intersect([k.state[pos] for k in node.clades])\n if len(state) == 0:\n state = np.concatenate([k.state[pos] for k in node.clades])\n return state\n\n def _fitch_intersect(self, arrays):\n \"\"\"\n Find the intersection of any number of 1D arrays.\n Return the sorted, unique values that are in all of the input arrays.\n Adapted from numpy.lib.arraysetops.intersect1d\n \"\"\"\n def pairwise_intersect(arr1, arr2):\n s2 = set(arr2)\n b3 = [val for val in arr1 if val in s2]\n return b3\n\n arrays = list(arrays) # allow assignment\n N = len(arrays)\n while N > 1:\n arr1 = arrays.pop()\n arr2 = arrays.pop()\n arr = pairwise_intersect(arr1, arr2)\n arrays.append(arr)\n N = len(arrays)\n\n return arrays[0]\n\n\n\n###################################################################\n### Maximum Likelihood\n###################################################################\n\n def ancestral_likelihood(self):\n \"\"\"\n Calculate the likelihood of the given realization of the sequences in\n the tree\n\n Returns\n -------\n\n log_lh : float\n The tree likelihood given the sequences\n \"\"\"\n log_lh = np.zeros(self.tree.root.cseq.shape[0])\n for node in self.tree.find_clades(order='postorder'):\n\n if node.up is None: # root node\n # 0-1 profile\n profile = seq_utils.seq2prof(node.cseq, self.gtr.profile_map)\n # get the probabilities to observe each nucleotide\n profile *= self.gtr.Pi\n profile = profile.sum(axis=1)\n log_lh += np.log(profile) # product over all characters\n continue\n\n t = node.branch_length\n\n indices = np.array([(np.argmax(self.gtr.alphabet==a),\n np.argmax(self.gtr.alphabet==b)) for a, b in izip(node.up.cseq, node.cseq)])\n\n logQt = np.log(self.gtr.expQt(t))\n lh = logQt[indices[:, 1], indices[:, 0]]\n log_lh += lh\n\n return log_lh\n\n def _branch_length_to_gtr(self, node):\n \"\"\"\n Set branch lengths to either mutation lengths of given branch lengths.\n The assigend values are to be used in the following ML analysis.\n \"\"\"\n if self.use_mutation_length:\n return max(ttconf.MIN_BRANCH_LENGTH*self.one_mutation, node.mutation_length)\n else:\n return max(ttconf.MIN_BRANCH_LENGTH*self.one_mutation, node.branch_length)\n\n\n def _ml_anc_marginal(self, verbose=0, store_compressed=True, final=True,\n sample_from_profile=False,\n debug=False, **kwargs):\n \"\"\"\n Perform marginal ML reconstruction of the ancestral states. In contrast to\n joint reconstructions, this needs to access the probabilities rather than only\n log probabilities and is hence handled by a separate function.\n\n Keyword Args\n ------------\n\n store_lh : bool\n If True, all likelihoods will be stored for all nodes. Useful for\n testing, diagnostics and if special post-processing is required.\n\n verbose :int\n How verbose the output should be\n \"\"\"\n\n tree = self.tree\n # number of nucleotides changed from prev reconstruction\n N_diff = 0\n\n L = self.tree.get_terminals()[0].cseq.shape[0]\n n_states = self.gtr.alphabet.shape[0]\n self.logger(\"TreeAnc._ml_anc_marginal: type of reconstruction: Marginal\", 2)\n\n self.logger(\"Walking up the tree, computing likelihoods... \", 3)\n # set the leaves profiles\n for leaf in tree.get_terminals():\n # in any case, set the profile\n leaf.marginal_subtree_LH = seq_utils.seq2prof(leaf.cseq, self.gtr.profile_map)\n leaf.marginal_subtree_LH_prefactor = np.zeros(L)\n\n # propagate leaves -->> root, set the marginal-likelihood messages\n for node in tree.get_nonterminals(order='postorder'): #leaves -> root\n # regardless of what was before, set the profile to ones\n node.marginal_subtree_LH_prefactor = np.zeros(L)\n node.marginal_subtree_LH = np.ones((L, n_states)) # we will multiply it\n for ch in node.clades:\n ch.marginal_Lx = self.gtr.propagate_profile(ch.marginal_subtree_LH,\n self._branch_length_to_gtr(ch), return_log=False) # raw prob to transfer prob up\n node.marginal_subtree_LH *= ch.marginal_Lx\n node.marginal_subtree_LH_prefactor += ch.marginal_subtree_LH_prefactor\n\n pre = node.marginal_subtree_LH.sum(axis=1) #sum over nucleotide states\n node.marginal_subtree_LH = (node.marginal_subtree_LH.T/pre).T # normalize so that the sum is 1\n node.marginal_subtree_LH_prefactor += np.log(pre) # and store log-prefactor\n\n self.logger(\"Computing root node sequence and total tree likelihood...\",3)\n # reconstruct the root node sequence\n tree.root.marginal_subtree_LH *= self.gtr.Pi # Msg to the root from the distant part (equ frequencies)\n pre=tree.root.marginal_subtree_LH.sum(axis=1)\n tree.root.marginal_profile = (tree.root.marginal_subtree_LH.T/pre).T\n tree.root.marginal_subtree_LH_prefactor += np.log(pre)\n\n # choose sequence characters from this profile.\n # treat root node differently to avoid piling up mutations on the longer branch\n if sample_from_profile=='root':\n root_sample_from_profile = True\n other_sample_from_profile = False\n elif isinstance(sample_from_profile, bool):\n root_sample_from_profile = sample_from_profile\n other_sample_from_profile = sample_from_profile\n\n seq, prof_vals, idxs = seq_utils.prof2seq(tree.root.marginal_profile,\n self.gtr, sample_from_prof=root_sample_from_profile)\n\n self.tree.sequence_LH = np.log(prof_vals) + tree.root.marginal_subtree_LH_prefactor\n self.tree.sequence_marginal_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self.tree.root.cseq = seq\n if final:\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n # need this fake msg to account for the complementary subtree when traversing tree back\n tree.root.seq_msg_from_parent = np.repeat([self.gtr.Pi], len(tree.root.cseq), axis=0)\n\n self.logger(\"Walking down the tree, computing maximum likelihood sequences...\",3)\n # propagate root -->> leaves, reconstruct the internal node sequences\n # provided the upstream message + the message from the complementary subtree\n for node in tree.find_clades(order='preorder'):\n if node.up is None: # skip if node is root\n continue\n\n # integrate the information coming from parents with the information\n # of all children my multiplying it to the prev computed profile\n tmp_msg = np.copy(node.up.seq_msg_from_parent)\n for c in node.up.clades:\n if c != node:\n tmp_msg*=c.marginal_Lx\n norm_vector = tmp_msg.sum(axis=1)\n tmp_msg=(tmp_msg.T/norm_vector).T\n node.seq_msg_from_parent = self.gtr.propagate_profile(tmp_msg,\n self._branch_length_to_gtr(node), return_log=False)\n node.marginal_profile = node.marginal_subtree_LH * node.seq_msg_from_parent\n\n norm_vector = node.marginal_profile.sum(axis=1)\n node.marginal_profile=(node.marginal_profile.T/norm_vector).T\n # choose sequence based maximal marginal LH.\n seq, prof_vals, idxs = seq_utils.prof2seq(node.marginal_profile, self.gtr,\n sample_from_prof=other_sample_from_profile)\n\n if hasattr(node, 'cseq') and node.cseq is not None:\n N_diff += (seq!=node.cseq).sum()\n else:\n N_diff += L\n\n #assign new sequence\n node.cseq = seq\n if final:\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n node.mutations = self.get_mutations(node)\n\n\n # note that the root doesn't contribute to N_diff (intended, since root sequence is often ambiguous)\n self.logger(\"TreeAnc._ml_anc_marginal: ...done\", 3)\n if store_compressed:\n self._store_compressed_sequence_pairs()\n\n # do clean-up:\n if not debug:\n for node in self.tree.find_clades():\n del node.marginal_subtree_LH\n del node.marginal_subtree_LH_prefactor\n del node.seq_msg_from_parent\n\n return N_diff\n\n\n def _ml_anc_joint(self, verbose=0, store_compressed=True, final=True,\n sample_from_profile=False,\n debug=False, **kwargs):\n\n \"\"\"\n Perform joint ML reconstruction of the ancestral states. In contrast to\n marginal reconstructions, this only needs to compare and multiply LH and\n can hence operate in log space.\n\n Keyword Args\n ------------\n\n store_lh : bool\n If True, all likelihoods will be stored for all nodes. Useful for\n testing, diagnostics and if special post-processing is required.\n\n verbose : int\n How verbose the output should be\n\n \"\"\"\n N_diff = 0 # number of sites differ from perv reconstruction\n L = self.tree.get_terminals()[0].cseq.shape[0]\n n_states = self.gtr.alphabet.shape[0]\n\n self.logger(\"TreeAnc._ml_anc_joint: type of reconstruction: Joint\", 2)\n\n self.logger(\"TreeAnc._ml_anc_joint: Walking up the tree, computing likelihoods... \", 3)\n # for the internal nodes, scan over all states j of this node, maximize the likelihood\n for node in self.tree.find_clades(order='postorder'):\n if node.up is None:\n node.joint_Cx=None # not needed for root\n continue\n\n # preallocate storage\n node.joint_Lx = np.zeros((L, n_states)) # likelihood array\n node.joint_Cx = np.zeros((L, n_states), dtype=int) # max LH indices\n branch_len = self._branch_length_to_gtr(node)\n # transition matrix from parent states to the current node states.\n # denoted as Pij(i), where j - parent state, i - node state\n log_transitions = np.log(self.gtr.expQt(branch_len))\n\n if node.is_terminal():\n msg_from_children = np.log(np.maximum(seq_utils.seq2prof(node.cseq, self.gtr.profile_map), ttconf.TINY_NUMBER))\n msg_from_children[np.isnan(msg_from_children) | np.isinf(msg_from_children)] = -ttconf.BIG_NUMBER\n else:\n # Product (sum-Log) over all child subtree likelihoods.\n # this is prod_ch L_x(i)\n msg_from_children = np.sum(np.stack([c.joint_Lx for c in node.clades], axis=0), axis=0)\n\n # for every possible state of the parent node,\n # get the best state of the current node\n # and compute the likelihood of this state\n for char_i, char in enumerate(self.gtr.alphabet):\n # Pij(i) * L_ch(i) for given parent state j\n msg_to_parent = (log_transitions.T[char_i, :] + msg_from_children)\n # For this parent state, choose the best state of the current node:\n node.joint_Cx[:, char_i] = msg_to_parent.argmax(axis=1)\n # compute the likelihood of the best state of the current node\n # given the state of the parent (char_i)\n node.joint_Lx[:, char_i] = msg_to_parent.max(axis=1)\n\n # root node profile = likelihood of the total tree\n msg_from_children = np.sum(np.stack([c.joint_Lx for c in self.tree.root.clades], axis = 0), axis=0)\n # Pi(i) * Prod_ch Lch(i)\n self.tree.root.joint_Lx = msg_from_children + np.log(self.gtr.Pi)\n normalized_profile = (self.tree.root.joint_Lx.T - self.tree.root.joint_Lx.max(axis=1)).T\n\n # choose sequence characters from this profile.\n # treat root node differently to avoid piling up mutations on the longer branch\n if sample_from_profile=='root':\n root_sample_from_profile = True\n elif isinstance(sample_from_profile, bool):\n root_sample_from_profile = sample_from_profile\n\n seq, anc_lh_vals, idxs = seq_utils.prof2seq(np.exp(normalized_profile),\n self.gtr, sample_from_prof = root_sample_from_profile)\n\n # compute the likelihood of the most probable root sequence\n self.tree.sequence_LH = np.choose(idxs, self.tree.root.joint_Lx.T)\n self.tree.sequence_joint_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self.tree.root.cseq = seq\n self.tree.root.seq_idx = idxs\n if final:\n if self.is_vcf:\n self.tree.root.sequence = self.dict_sequence(self.tree.root)\n else:\n self.tree.root.sequence = self.expanded_sequence(self.tree.root)\n\n self.logger(\"TreeAnc._ml_anc_joint: Walking down the tree, computing maximum likelihood sequences...\",3)\n # for each node, resolve the conditioning on the parent node\n for node in self.tree.find_clades(order='preorder'):\n\n # root node has no mutations, everything else has been alread y set\n if node.up is None:\n node.mutations = []\n continue\n\n # choose the value of the Cx(i), corresponding to the state of the\n # parent node i. This is the state of the current node\n node.seq_idx = np.choose(node.up.seq_idx, node.joint_Cx.T)\n # reconstruct seq, etc\n tmp_sequence = np.choose(node.seq_idx, self.gtr.alphabet)\n if hasattr(node, 'sequence') and node.cseq is not None:\n N_diff += (tmp_sequence!=node.cseq).sum()\n else:\n N_diff += L\n\n node.cseq = tmp_sequence\n\n if final:\n node.mutations = self.get_mutations(node)\n if self.is_vcf:\n node.sequence = self.dict_sequence(node)\n else:\n node.sequence = self.expanded_sequence(node)\n\n\n self.logger(\"TreeAnc._ml_anc_joint: ...done\", 3)\n if store_compressed:\n self._store_compressed_sequence_pairs()\n\n # do clean-up\n if not debug:\n for node in self.tree.find_clades(order='preorder'):\n del node.joint_Lx\n del node.joint_Cx\n del node.seq_idx\n\n return N_diff\n\n\n def _store_compressed_sequence_to_node(self, node):\n \"\"\"\n make a compressed representation of a pair of sequences only counting\n the number of times a particular pair of states (e.g. (A,T)) is observed\n the the aligned sequences of parent and child.\n\n Parameters\n -----------\n\n node : PhyloTree.Clade\n Tree node. **Note** because the method operates\n on the sequences on both sides of a branch, sequence reconstruction\n must be performed prior to calling this method.\n\n \"\"\"\n seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,\n node.cseq,\n pattern_multiplicity = self.multiplicity,\n ignore_gaps = self.ignore_gaps)\n node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity}\n\n\n def _store_compressed_sequence_pairs(self):\n \"\"\"\n Traverse the tree, and for each node store the compressed sequence pair.\n **Note** sequence reconstruction should be performed prior to calling\n this method.\n \"\"\"\n self.logger(\"TreeAnc._store_compressed_sequence_pairs...\",2)\n for node in self.tree.find_clades():\n if node.up is None:\n continue\n self._store_compressed_sequence_to_node(node)\n self.logger(\"TreeAnc._store_compressed_sequence_pairs...done\",3)\n\n\n###################################################################\n### Branch length\n###################################################################\n def optimize_branch_len(self, **kwargs):\n self.optimize_branch_length(**kwargs)\n\n def optimize_branch_length(self, **kwargs):\n \"\"\"\n Perform optimization for the branch lengths of the whole tree or any\n subtree. **Note** this method assumes that each node stores information\n about its sequence as numpy.array object (node.sequence attribute).\n Therefore, before calling this method, sequence reconstruction with\n either of the available models must be performed.\n\n Keyword Args\n ------------\n\n verbose : int\n Output detalization\n\n store_old : bool\n If True, the old lenths will be saved in node._old_dist attribute.\n Useful for testing, and special post-processing.\n\n Returns\n -------\n None, the phylogenetic tree is modified in-place.\n\n \"\"\"\n\n self.logger(\"TreeAnc.optimize_branch_length: running branch length optimization...\",1)\n\n verbose = 0\n store_old_dist = False\n\n if 'verbose' in kwargs:\n verbose = int(kwargs['verbose'])\n if 'store_old' in kwargs:\n store_old_dist = kwargs['store_old'] == True\n\n for node in self.tree.find_clades(order='postorder'):\n if node.up is None: continue # this is the root\n if store_old_dist:\n node._old_length = node.branch_length\n\n new_len = self.optimal_branch_length(node)\n\n if new_len < 0:\n continue\n\n self.logger(\"Optimization results: old_len=%.4f, new_len=%.4f \"\n \" Updating branch length...\"%(node.branch_length, new_len), 5)\n\n node.branch_length = new_len\n node.mutation_length=new_len\n\n # as branch lengths changed, the params must be fixed\n self.tree.root.up = None\n self.tree.root.dist2root = 0.0\n self._prepare_nodes()\n\n\n def optimal_branch_length(self, node):\n '''\n Calculate optimal branch length given the sequences of node and parent\n\n Parameters\n -----------\n\n node : PhyloTree.Clade\n TreeNode, attached to the branch.\n\n Returns\n -------\n\n new_len : float\n Optimal length of the given branch\n\n '''\n if node.up is None:\n return self.one_mutation\n\n parent = node.up\n if hasattr(node, 'compressed_sequence'):\n new_len = self.gtr.optimal_t_compressed(node.compressed_sequence['pair'],\n node.compressed_sequence['multiplicity'])\n else:\n new_len = self.gtr.optimal_t(parent.cseq, node.cseq,\n pattern_multiplicity=self.multiplicity,\n ignore_gaps=self.ignore_gaps)\n return new_len\n\n\n def prune_short_branches(self):\n \"\"\"\n If the branch length is less than the minimal value, remove the branch\n from the tree. **Requires** the ancestral sequence reconstruction\n \"\"\"\n self.logger(\"TreeAnc.prune_short_branches: pruning short branches (max prob at zero)...\", 1)\n for node in self.tree.find_clades():\n if node.up is None or node.is_terminal():\n continue\n\n # probability of the two seqs separated by zero time is not zero\n if self.gtr.prob_t(node.up.cseq, node.cseq, 0.0,\n pattern_multiplicity=self.multiplicity) > 0.1:\n # re-assign the node children directly to its parent\n node.up.clades = [k for k in node.up.clades if k != node] + node.clades\n for clade in node.clades:\n clade.up = node.up\n\n def optimize_sequences_and_branch_length(self,*args, **kwargs):\n \"\"\"This method is a schortcut for :py:meth:`optimize_seq_and_branch_len`\n\n Iteratively set branch lengths and reconstruct ancestral sequences until\n the values of either former or latter do not change. The algorithm assumes\n knowing only the topology of the tree, and requires that sequences are assigned\n to all leaves of the tree. The first step is to pre-reconstruct ancestral\n states using Fitch reconstruction algorithm or ML using existing branch length\n estimates. Then, optimize branch lengths and re-do reconstruction until\n convergence using ML method.\n \"\"\"\n self.optimize_seq_and_branch_len(*args,**kwargs)\n\n def optimize_seq_and_branch_len(self,reuse_branch_len=True,prune_short=True,\n max_iter=5, infer_gtr=False, **kwargs):\n \"\"\"\n Iteratively set branch lengths and reconstruct ancestral sequences until\n the values of either former or latter do not change. The algorithm assumes\n knowing only the topology of the tree, and requires that sequences are assigned\n to all leaves of the tree. The first step is to pre-reconstruct ancestral\n states using Fitch reconstruction algorithm or ML using existing branch length\n estimates. Then, optimize branch lengths and re-do reconstruction until\n convergence using ML method.\n\n Parameters\n -----------\n\n reuse_branch_len : bool, default True\n If True, rely on the initial branch lenghts, and start with the\n Maximum-likelihood ancestral sequence inference using existing branch\n lengths. Otherwise, initial reconstruction of ancestral states with\n Fitch algorithm, which uses only the tree topology.\n\n prune_short : bool, default True\n If True, the branches with zero optimal length will be pruned from\n the tree hence creating polytomies. The polytomies could be further\n processde using resolve_polytomies from the TreeTime class.\n\n \"\"\"\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: sequences...\", 1)\n if reuse_branch_len:\n N_diff = self.reconstruct_anc(method='ml', infer_gtr=infer_gtr, **kwargs)\n else:\n N_diff = self.reconstruct_anc(method='fitch', infer_gtr=infer_gtr, **kwargs)\n\n self.optimize_branch_len(verbose=0, store_old=False)\n\n n = 0\n while n<max_iter:\n n += 1\n if prune_short:\n self.prune_short_branches()\n N_diff = self.reconstruct_anc(method='ml', infer_gtr=False,**kwargs)\n\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: Iteration %d.\"\n \" #Nuc changed since prev reconstructions: %d\" %(n, N_diff), 2)\n\n if N_diff < 1:\n break\n self.optimize_branch_len(verbose=0, store_old=False)\n\n self.tree.unconstrained_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum()\n self._prepare_nodes() # fix dist2root and up-links after reconstruction\n self.logger(\"TreeAnc.optimize_sequences_and_branch_length: Unconstrained sequence LH:%f\" % self.tree.unconstrained_sequence_LH , 2)\n return\n\n###############################################################################\n### Utility functions\n###############################################################################\n def get_reconstructed_alignment(self):\n \"\"\"\n Get the multiple sequence alignment including reconstructed sequences for\n the internal nodes.\n \"\"\"\n from Bio.Align import MultipleSeqAlignment\n from Bio.Seq import Seq\n from Bio.SeqRecord import SeqRecord\n self.logger(\"TreeAnc.get_reconstructed_alignment ...\",2)\n if not hasattr(self.tree.root, 'sequence'):\n self.logger(\"TreeAnc.reconstructed_alignment... reconstruction not yet done\",3)\n self.reconstruct_anc('ml')\n\n new_aln = MultipleSeqAlignment([SeqRecord(id=n.name, seq=Seq(\"\".join(n.sequence)), description=\"\")\n for n in self.tree.find_clades()])\n\n return new_aln\n\n def get_tree_dict(self, keep_var_ambigs=False):\n \"\"\"\n For VCF-based objects, returns a nested dict with all information required to\n reconstruct sequences for all nodes (terminal and internal) in the format:\n {'reference':'AGCTCGA..A',\n 'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} },\n 'positions': [1,4,7,10,100...],\n 'inferred_const_sites': [7,100....] <this is optional>\n }\n self.inferred_const_sites\n\n Reference being the reference sequence to which the variable sites are mapped;\n sequence containing a dict for each sequence with the position and base of\n mutations; and positions containing a list of all the variable positions.\n If included, inferred_const_sites is positions that were constant except\n ambiguous bases, which were converted into constant sites (ex: 'AAAN' -> 'AAAA')\n\n keep_var_ambigs : boolean\n If true, generates dict sequence based on the *original* _compressed_ sequence, which\n may include ambiguities. Note sites that only have 1 unambiguous base and ambiguous\n bases (\"AAAAANN\") are stripped of ambiguous bases *before* compression, so ambiguous\n bases will *not* be preserved.\n\n EBH 7 Dec 2017\n \"\"\"\n if self.is_vcf:\n tree_dict = {}\n tree_dict['reference'] = self.ref\n tree_dict['positions'] = self.nonref_positions\n\n tree_aln = {}\n for n in self.tree.find_clades():\n if hasattr(n, 'sequence'):\n if keep_var_ambigs: #regenerate dict to include ambig bases\n tree_aln[n.name] = self.dict_sequence(n, keep_var_ambigs)\n else:\n tree_aln[n.name] = n.sequence\n\n tree_dict['sequences'] = tree_aln\n\n if len(self.inferred_const_sites) != 0:\n tree_dict['inferred_const_sites'] = self.inferred_const_sites\n\n return tree_dict\n else:\n raise(\"A dict can only be returned for trees created with VCF-input!\")\n\n\nif __name__==\"__main__\":\n\n from Bio import Phylo\n from StringIO import StringIO\n from Bio import Phylo,AlignIO\n\n tiny_tree = Phylo.read(StringIO(\"((A:.0060,B:.30)C:.030,D:.020)E:.004;\"), 'newick')\n tiny_aln = AlignIO.read(StringIO(\">A\\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\"\n \">B\\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\"\n \">C\\nAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTT\\n\"\n \">D\\nAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTTAAAACCCCGGGGTTTT\\n\"\n \">E\\nACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT\\n\"), 'fasta')\n\n mygtr = GTR.custom(alphabet = np.array(['A', 'C', 'G', 'T']),\n pi = np.array([0.25, 0.95, 0.005, 0.05]), W=np.ones((4,4)))\n\n myTree = TreeAnc(gtr=mygtr, tree = tiny_tree,\n aln =tiny_aln, verbose = 4)\n\n logLH = myTree.ancestral_likelihood()\n LH = np.exp(logLH)\n print (\"Net probability (for all possible realizations): \" + str(np.exp(logLH).sum()))\n print (np.exp(logLH))\n"
] | [
[
"numpy.zeros_like",
"numpy.ones",
"numpy.sum",
"numpy.zeros",
"numpy.stack",
"numpy.isinf",
"numpy.exp",
"numpy.copy",
"numpy.where",
"numpy.argmax",
"numpy.log",
"numpy.isnan",
"numpy.array",
"numpy.concatenate",
"numpy.fromstring",
"numpy.unique",
"numpy.choose"
]
] |
SimoneGasperini/rboost | [
"5e0108d821077da76964e1e797f0d775b3999f56"
] | [
"rboost/gui/listlabels.py"
] | [
"import pandas as pd\nfrom PySide2.QtWidgets import (\n QWidget,\n QHBoxLayout,\n QVBoxLayout,\n QFormLayout,\n QTableView,\n QPushButton,\n QComboBox,\n QHeaderView\n)\n\nfrom rboost.gui.utils.pandasmodel import PandasModel\n\n\nclass ListLabelsWindow(QWidget):\n\n def __init__(self, rboost):\n super().__init__()\n self.rboost = rboost\n\n self.layout = QVBoxLayout()\n self._add_form_layout()\n self._add_buttons_layout()\n self.table_view = None\n self._add_table_view_layout()\n self.setLayout(self.layout)\n\n def _add_form_layout(self):\n labtype_form = self._create_labtype_form()\n self.layout.addLayout(labtype_form)\n\n def _add_buttons_layout(self):\n self.buttons_layout = QHBoxLayout()\n show_button = QPushButton('Show list')\n show_button.clicked.connect(self.show_table)\n clear_button = QPushButton('Clear list')\n clear_button.clicked.connect(self.clear_table)\n self.buttons_layout.addWidget(show_button)\n self.buttons_layout.addWidget(clear_button)\n self.layout.addLayout(self.buttons_layout)\n\n def _add_table_view_layout(self, df=None):\n self.table_view_layout = QHBoxLayout()\n if self.table_view is not None:\n self.table_view_layout.removeWidget(self.table_view)\n self.table_view.deleteLater()\n self.table_view = self._create_table_view(df=df)\n self.table_view_layout.addWidget(self.table_view)\n self.layout.addLayout(self.table_view_layout)\n\n def _create_labtype_form(self):\n labtype_form = QFormLayout()\n items = [None] + sorted(list(self.rboost.labtypes))\n self.labtype_combobox = QComboBox()\n self.labtype_combobox.addItems(items)\n labtype_form.addRow('Label type', self.labtype_combobox)\n return labtype_form\n\n def _create_table_view(self, df):\n if df is None:\n df = pd.DataFrame()\n model = PandasModel(df)\n table_view = QTableView()\n table_view.setModel(model)\n table_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n return table_view\n\n def _get_labels(self):\n labels = [self.rboost.network.graph.nodes[n]['label']\n for n in self.rboost.network.graph.nodes]\n labtype = str(self.labtype_combobox.currentText())\n if labtype:\n labels = [label for label in labels if labtype in label.types]\n labels.sort(reverse=True)\n return labels\n\n def _get_dataframe(self, labels):\n columns = [self.rboost.labels_df_cols[k]\n for k in ['label', 'types', 'queries', 'uploads']]\n data = [[lab.name, lab.types, lab.queries_count, lab.uploads_count]\n for lab in labels]\n df = pd.DataFrame(data=data, columns=columns)\n return df\n\n def show_table(self):\n labels = self._get_labels()\n df = self._get_dataframe(labels=labels)\n self._add_table_view_layout(df=df)\n self.setLayout(self.layout)\n\n def clear_table(self):\n empty_df = pd.DataFrame()\n self._add_table_view_layout(df=empty_df)\n self.setLayout(self.layout)\n"
] | [
[
"pandas.DataFrame"
]
] |
eladapplbaum/IML.HUJI | [
"6a08721e143b0d766f7085c70882f32f60088550"
] | [
"IMLearn/learners/gaussian_estimators.py"
] | [
"from __future__ import annotations\nimport numpy as np\nfrom numpy.linalg import inv, det, slogdet\n\n\nclass UnivariateGaussian:\n \"\"\"\n Class for univariate Gaussian Distribution Estimator\n \"\"\"\n\n def __init__(self, biased_var: bool = False) -> UnivariateGaussian:\n \"\"\"\n Estimator for univariate Gaussian mean and variance parameters\n\n Parameters\n ----------\n biased_var : bool, default=False\n Should fitted estimator of variance be a biased or unbiased estimator\n\n Attributes\n ----------\n fitted_ : bool\n Initialized as false indicating current estimator instance has not been fitted.\n To be set as True in `UnivariateGaussian.fit` function.\n\n mu_: float\n Estimated expectation initialized as None. To be set in `UnivariateGaussian.fit`\n function.\n\n var_: float\n Estimated variance initialized as None. To be set in `UnivariateGaussian.fit`\n function.\n \"\"\"\n self.biased_ = biased_var\n self.fitted_, self.mu_, self.var_ = False, None, None\n\n def fit(self, X: np.ndarray) -> UnivariateGaussian:\n \"\"\"\n Estimate Gaussian expectation and variance from given samples\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, )\n Training data\n\n Returns\n -------\n self : returns an instance of self.\n\n Notes\n -----\n Sets `self.mu_`, `self.var_` attributes according to calculated estimation (where\n estimator is either biased or unbiased). Then sets `self.fitted_` attribute to `True`\n \"\"\"\n\n self.mu_ = sum(X) / X.size\n var_sum = 0\n for s in X:\n var_sum += (s - self.mu_) ** 2\n\n self.var_ = (var_sum / (X.size if self.biased_ else X.size - 1)) ** 0.5\n\n self.fitted_ = True\n return self\n\n def pdf(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate PDF of observations under Gaussian model with fitted estimators\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, )\n Samples to calculate PDF for\n\n Returns\n -------\n pdfs: ndarray of shape (n_samples, )\n Calculated values of given samples for PDF function of N(mu_, var_)\n\n Raises\n ------\n ValueError: In case function was called prior fitting the model\n \"\"\"\n if not self.fitted_:\n raise ValueError(\n \"Estimator must first be fitted before calling `pdf` function\")\n pdfs = np.ndarray(X.size)\n for i in range(X.size):\n pdfs[i] = np.exp(\n -((X[i] - self.mu_) ** 2) / 2 * self.var_) / np.sqrt(\n 2 * np.pi * self.var_)\n return pdfs\n\n @staticmethod\n def log_likelihood(mu: float, sigma: float, X: np.ndarray) -> float:\n \"\"\"\n Calculate the log-likelihood of the data under a specified Gaussian model\n\n Parameters\n ----------\n mu : float\n Expectation of Gaussian\n sigma : float\n Variance of Gaussian\n X : ndarray of shape (n_samples, )\n Samples to calculate log-likelihood with\n\n Returns\n -------\n log_likelihood: float\n log-likelihood calculated\n \"\"\"\n temp_sum = 0\n for i in range(X.size):\n temp_sum += (X[i] - mu) ** 2\n return -(X.size / 2) * (\n np.log(2 * np.pi) + np.log(sigma)) - temp_sum / (\n 2 * sigma)\n\n\nclass MultivariateGaussian:\n \"\"\"\n Class for multivariate Gaussian Distribution Estimator\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize an instance of multivariate Gaussian estimator\n\n Attributes\n ----------\n fitted_ : bool\n Initialized as false indicating current estimator instance has not been fitted.\n To be set as True in `MultivariateGaussian.fit` function.\n\n mu_: ndarray of shape (n_features,)\n Estimated expectation initialized as None. To be set in `MultivariateGaussian.fit`\n function.\n\n cov_: ndarray of shape (n_features, n_features)\n Estimated covariance initialized as None. To be set in `MultivariateGaussian.fit`\n function.\n \"\"\"\n self.mu_, self.cov_ = None, None\n self.fitted_ = False\n\n def fit(self, X: np.ndarray) -> MultivariateGaussian:\n \"\"\"\n Estimate Gaussian expectation and covariance from given samples\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, n_features)\n Training data\n\n Returns\n -------\n self : returns an instance of self\n\n Notes\n -----\n Sets `self.mu_`, `self.cov_` attributes according to calculated estimation.\n Then sets `self.fitted_` attribute to `True`\n \"\"\"\n rows, cols = X.shape\n self.mu_ = np.sum(X, axis=0) / rows\n X_gal = np.array([X[i] - self.mu_ for i in range(rows)])\n self.cov_ = np.dot(X_gal.transpose(), X_gal) / (rows - 1)\n self.fitted_ = True\n return self\n\n def pdf(self, X: np.ndarray):\n \"\"\"\n Calculate PDF of observations under Gaussian model with fitted estimators\n\n Parameters\n ----------\n X: ndarray of shape (n_samples, n_features)\n Samples to calculate PDF for\n\n Returns\n -------\n pdfs: ndarray of shape (n_samples, )\n Calculated values of given samples for PDF function of N(mu_, cov_)\n\n Raises\n ------\n ValueError: In case function was called prior fitting the model\n \"\"\"\n if not self.fitted_:\n raise ValueError(\n \"Estimator must first be fitted before calling `pdf` function\")\n mahalanobis = np.einsum(\"bi,ij,bj->b\", X-self.mu_, inv(self.cov_), X-self.mu_)\n\n return np.exp(-.5 * mahalanobis) / \\\n np.sqrt((2*np.pi) ** len(X) * det(self.cov_))\n\n\n @staticmethod\n def log_likelihood(mu: np.ndarray, cov: np.ndarray,\n X: np.ndarray) -> float:\n \"\"\"\n Calculate the log-likelihood of the data under a specified Gaussian model\n\n Parameters\n ----------\n mu : ndarray of shape (n_features,)\n Expectation of Gaussian\n cov : ndarray of shape (n_features, n_features)\n covariance matrix of Gaussian\n X : ndarray of shape (n_samples, n_features)\n Samples to calculate log-likelihood with\n\n Returns\n -------\n log_likelihood: float\n log-likelihood calculated over all input data and under given parameters of Gaussian\n \"\"\"\n rows, cols = X.shape\n X_gal = np.array([X[i] - mu for i in range(rows)])\n\n temp_sun = 0\n for i in range(rows):\n temp_sun += np.linalg.multi_dot([X_gal[i].transpose(),\n np.linalg.inv(cov),\n X_gal[i]])\n return -(X.size / 2) * (cols * np.log(2 * np.pi) + np.log(\n np.linalg.det(cov))) - 0.5 * temp_sun\n\n\n"
] | [
[
"numpy.sum",
"numpy.linalg.inv",
"numpy.linalg.det",
"numpy.exp",
"numpy.ndarray",
"numpy.log",
"numpy.sqrt"
]
] |
Julian-Theis/stat-kiste | [
"b436e881c5ad79781a60dc767c08aa1165e4fb8b"
] | [
"backend/stat/normality_tests.py"
] | [
"\"\"\"\nCode originates from: https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/\n\n\"\"\"\n\nfrom scipy.stats import shapiro, normaltest, anderson\n\n\"\"\"\nShapiro-Wilk Test of Normality\nThe Shapiro-Wilk Test is more appropriate for small sample sizes (< 50 samples), but can also handle sample sizes as large as 2000.\nThe Shapiro-Wilk test is used as a numerical means of assessing normality.\n\"\"\"\ndef run_shapiro_wilk_normality_test(data, alpha=0.05, print_results=True):\n stat, p = shapiro(data)\n if print_results:\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n if p > alpha:\n print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)\n else:\n print('Sample does not look Gaussian (reject H0) at significance level ', alpha)\n return stat, p\n\ndef run_dagostino_pearson_test(data, alpha, print_results=True):\n stat, p = normaltest(data)\n if print_results:\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n if p > alpha:\n print('Sample looks Gaussian (fail to reject H0) at significance level ', alpha)\n else:\n print('Sample does not look Gaussian (reject H0) at significance level ', alpha)\n return stat, p\n\ndef run_anderson_darling(data, print_results=True):\n result = anderson(data)\n print('Statistic: %.3f' % result.statistic)\n if print_results:\n for i in range(len(result.critical_values)):\n sl, cv = result.significance_level[i], result.critical_values[i]\n if result.statistic < result.critical_values[i]:\n print('%.3f: %.3f, data looks normal (fail to reject H0)' % (sl, cv))\n else:\n print('%.3f: %.3f, data does not look normal (reject H0)' % (sl, cv))\n return result\n\n\n"
] | [
[
"scipy.stats.anderson",
"scipy.stats.shapiro",
"scipy.stats.normaltest"
]
] |
JamalRahman/hybridtfidf | [
"0409aae0083b1eae32c1a049f87f484740289be1"
] | [
"hybridtfidf/utils.py"
] | [
"from numpy.linalg import norm\r\nfrom numpy import dot\r\n\r\n\r\ndef cosine_sim(vec1, vec2):\r\n \"\"\"Calculates the cosine similarity between two vectors\r\n\r\n Args:\r\n vec1 (list of float): A vector\r\n vec2 (list of float): A vector\r\n\r\n Returns:\r\n The cosine similarity between the two input vectors\r\n \"\"\"\r\n return dot(vec1, vec2) / (norm(vec1) * norm(vec2))\r\n\r\n\r\ndef select_salient_posts(post_vectors, post_weights, k=10, similarity_threshold=0.4):\r\n \"\"\"\r\n Selects the top k most salient posts in a collection of posts.\r\n To avoid redundancy, any post too similar to other-posts are disregarded. Each selected post will\r\n therefore be both highly salient and representative of unique semantics.\r\n\r\n Note:\r\n post_vectors and post_weights must be in the same order. The ith element of post_weights must reflect\r\n the ith element of post_vectors\r\n\r\n Args:\r\n post_vectors (list of (list of float)): Hybrid tfidf representation of the documents\r\n as a document-term matrix\r\n\r\n post_weights (list of float): Hybrid Tfidf weight for each document\r\n\r\n k (int): The number of posts to select as output\r\n\r\n similarity_threshold (float): The maximum cosine similiarity for a post to be selected\r\n\r\n \"\"\"\r\n\r\n sorted_keyed_vectors = [z for _, z in sorted(zip(post_weights, enumerate(post_vectors)), key=lambda i: i[0],\r\n reverse=True)] # z is (i,vi) sorted by weight\r\n\r\n i = 1\r\n\r\n veclength = len(post_vectors)\r\n loop_condition = True\r\n\r\n significant_indices = [0]\r\n unsorted_indices = [sorted_keyed_vectors[0][0]]\r\n\r\n while loop_condition:\r\n is_similar = False\r\n\r\n for j in significant_indices:\r\n sim = cosine_sim(sorted_keyed_vectors[j][1], sorted_keyed_vectors[i][1])\r\n if sim >= similarity_threshold:\r\n is_similar = True\r\n\r\n if not is_similar:\r\n significant_indices.append(i)\r\n unsorted_indices.append(sorted_keyed_vectors[i][0])\r\n\r\n if (len(significant_indices) >= k) or (i >= veclength - 1):\r\n loop_condition = False\r\n i += 1\r\n\r\n return unsorted_indices\r\n"
] | [
[
"numpy.dot",
"numpy.linalg.norm"
]
] |
jb2020-super/nunif | [
"eab6952d93e85951ed4e4cff30cd26c09e1dbb63"
] | [
"nunif/cli/waifu2x.py"
] | [
"# waifu2x\nimport os\nfrom os import path\nimport torch\nimport argparse\nimport csv\nfrom tqdm import tqdm\nfrom concurrent.futures import ThreadPoolExecutor as PoolExecutor\nfrom .. logger import logger\nfrom .. utils import load_image, save_image, ImageLoader\nfrom .. tasks.waifu2x import Waifu2x\n\nif os.getenv(\"NUNIF_MODEL_DIR\") is not None:\n MODEL_DIR = os.getenv(\"NUNIF_MODEL_DIR\")\nelse:\n MODEL_DIR = path.abspath(path.join(path.dirname(path.abspath(__file__)),\n \"..\", \"..\", \"pretrained_models\"))\nDEFAULT_MODEL_DIR = path.join(MODEL_DIR, \"waifu2x\", \"cunet\", \"art\")\n\n\ndef convert_files(ctx, files, args):\n loader = ImageLoader(files=files, max_queue_size=128)\n os.makedirs(args.output, exist_ok=True)\n with torch.no_grad(), PoolExecutor() as pool:\n for im, meta in tqdm(loader, ncols=60):\n z = ctx.convert(im, meta, args.method, args.noise_level, args.tile_size, args.batch_size, args.tta)\n output_filename = path.splitext(path.basename(meta[\"filename\"]))[0] + \".png\"\n pool.submit(save_image, z, meta, path.join(args.output, output_filename))\n\n\ndef convert_file(ctx, args):\n with torch.no_grad():\n im, meta = load_image(args.input)\n z = ctx.convert(im, meta, args.method, args.noise_level, args.tile_size, args.batch_size, args.tta)\n save_image(z, meta, args.output)\n\n\ndef load_files(txt):\n files = []\n with open(txt, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n files.append(row[0])\n return files\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-dir\", type=str, default=DEFAULT_MODEL_DIR, help=\"model dir\")\n parser.add_argument(\"--noise-level\", \"-n\", type=int, default=0, choices=[0, 1, 2, 3], help=\"noise level\")\n parser.add_argument(\"--method\", \"-m\", type=str, choices=[\"scale\", \"noise\", \"noise_scale\"], default=\"noise_scale\", help=\"method\")\n parser.add_argument(\"--gpu\", \"-g\", type=int, nargs=\"+\", default=[0], help=\"GPU device ids. -1 for CPU\")\n parser.add_argument(\"--batch-size\", type=int, default=4, help=\"minibatch_size\")\n parser.add_argument(\"--tile-size\", type=int, default=256, help=\"tile size for tiled render\")\n parser.add_argument(\"--output\", \"-o\", type=str, required=True, help=\"output file or directory\")\n parser.add_argument(\"--input\", \"-i\", type=str, required=True, help=\"input file or directory. (*.txt, *.csv) for image list\")\n parser.add_argument(\"--tta\", action=\"store_true\", help=\"TTA mode\")\n args = parser.parse_args()\n logger.debug(str(args))\n\n ctx = Waifu2x(model_dir=args.model_dir, gpus=args.gpu)\n ctx.load_model(args.method, args.noise_level)\n\n if path.isdir(args.input):\n convert_files(ctx, ImageLoader.listdir(args.input), args)\n else:\n if path.splitext(args.input)[-1] in (\".txt\", \".csv\"):\n convert_files(ctx, load_files(args.input), args)\n else:\n convert_file(ctx, args)\n\n return 0\n"
] | [
[
"torch.no_grad"
]
] |
fordanic/cmiv-ai-course | [
"c51e51485d18c38bece67d6bcb3bd7422b56da97"
] | [
"notebooks/figures/plot_interactive_tree.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.externals.six import StringIO # doctest: +SKIP\nfrom sklearn.tree import export_graphviz\nfrom scipy.misc import imread\nfrom scipy import ndimage\n\nimport re\n\nX, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)\n\n\ndef tree_image(tree, fout=None):\n try:\n import pydot\n except ImportError:\n # make a hacky white plot\n x = np.ones((10, 10))\n x[0, 0] = 0\n return x\n dot_data = StringIO()\n export_graphviz(tree, out_file=dot_data)\n data = re.sub(r\"gini = 0\\.[0-9]+\\\\n\", \"\", dot_data.getvalue())\n data = re.sub(r\"samples = [0-9]+\\\\n\", \"\", data)\n data = re.sub(r\"\\\\nsamples = [0-9]+\", \"\", data)\n\n graph = pydot.graph_from_dot_data(data)[0]\n if fout is None:\n fout = \"tmp.png\"\n graph.write_png(fout)\n return imread(fout)\n\n\ndef plot_tree(max_depth=1):\n fig, ax = plt.subplots(1, 2, figsize=(15, 7))\n h = 0.02\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n if max_depth != 0:\n tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)\n Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n Z = Z.reshape(xx.shape)\n faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))\n faces = faces.reshape(xx.shape)\n border = ndimage.laplace(faces) != 0\n ax[0].contourf(xx, yy, Z, alpha=.4)\n ax[0].scatter(xx[border], yy[border], marker='.', s=1)\n ax[0].set_title(\"max_depth = %d\" % max_depth)\n ax[1].imshow(tree_image(tree))\n ax[1].axis(\"off\")\n else:\n ax[0].set_title(\"data set\")\n ax[1].set_visible(False)\n ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)\n ax[0].set_xlim(x_min, x_max)\n ax[0].set_ylim(y_min, y_max)\n ax[0].set_xticks(())\n ax[0].set_yticks(())\n\n\ndef plot_tree_interactive():\n from ipywidgets import interactive, IntSlider\n slider = IntSlider(min=0, max=8, step=1, value=0)\n return interactive(plot_tree, max_depth=slider)"
] | [
[
"numpy.ones",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplots",
"scipy.misc.imread",
"numpy.arange",
"sklearn.externals.six.StringIO",
"numpy.array",
"scipy.ndimage.laplace",
"sklearn.tree.export_graphviz",
"sklearn.datasets.make_blobs"
]
] |
erelsgl/fair-diminishing-differences | [
"ae64ff4a4c6cfde5a1261e67484c905414607d36"
] | [
"simulations.py"
] | [
"#!python3\n\n\"\"\"\nUtilities for conducting simulations on random utility profiles.\n\nAuthor: Erel Segai-Halevi\nDate: 2019-07\n\"\"\"\n\nimport pandas, numpy as np\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom partitions import equalPartitions\nimport operator\nfrom timeit import default_timer as timer\n\nfrom PrefProfile import PrefProfile\nfrom mean_and_stderr import mean_and_stderr\n\ntrace = lambda *x: None # To enable tracing, set trace=print\n\n\ndef avergeOverRandomProfiles(checkSingleProfile,\n agents:list, items:list, lowMarketValue:float, highMarketValue:float, maxNoiseSize:float, iterations:int) -> (list,list):\n \"\"\"\n Create many random utility profiles, and calculate various stats on them.\n\n :param checkSingleProfile: a function that takes a single PrefProfile object, and returns a vector of numbers describing it.\n :param agents: a list of agent-names.\n :param items: a list of item-names.\n :param lowMarketValue, highMarketValue, maxNoiseSize: used for creating the random valuations.\n :param iterations: number of times to randomize.\n\n :return (means, stderrs):\n means is a vector of floats, representing the average of the numbers returned for all random PrefProfiles.\n stderrs is a corresponding vector of the standard-errors.\n\n\n >>> dummyCheckSingleProfile = lambda profile: [True,False,5]\n >>> list(avergeOverRandomProfiles(dummyCheckSingleProfile, [\"A\",\"B\"], [\"x\",\"y\",\"z\"], 1, 2, 0.5, 10))\n [array([ 1., 0., 5.]), array([ 0., 0., 0.])]\n \"\"\"\n generator = lambda: np.array(checkSingleProfile(PrefProfile.randomCardinal(agents, items, lowMarketValue, highMarketValue, maxNoiseSize)))\n return mean_and_stderr(iterations, generator)\n\n\ndef simulate(checkSingleProfile, columnNames:list,\n agents:list, itemCounts:list, noiseSizes:list,\n lowMarketValue:float, highMarketValue:float, iterations:int, filename:str)->DataFrame:\n \"\"\"\n Runs an experiment with random cardinal utility profiles.\n\n :param checkSingleProfile: a function that takes a single PrefProfile object, and returns a vector of numbers describing it.\n :param columnNames: a list of column-names. Should be of the same size as the vector returned by checkSingleProfile.\n\n :param agents: a list of agent-names.\n :param itemCounts: a list of different item-counts to try.\n :param noiseSizes: a list of different noise-amplitudes to try.\n :param lowMarketValue, highMarketValue: range for randomly selecting the market-value of each item.\n :param iterations: number of iterations to run randomly.\n :param filename: name of file for saving the results. Will be created in subfolder \"results/\" with extension \"csv\".\n\n :return: a DataFrame with the experiment results.\n\n >>> pandas.set_option('display.max_columns', 500)\n >>> pandas.set_option('display.width', 500)\n >>> dummyCheckSingleProfile = lambda profile: [True,5]\n >>> dummyColumns = [\"col1\",\"col2\"]\n >>> simulate(dummyCheckSingleProfile, dummyColumns, [\"A\",\"B\"], [2,3,4], [0.3,0.7], 1, 2, 10, \"doctest-simulation\")\n Agents Iterations Noise size Items per agent col1 col2 col1 err col2 err\n 0 2.0 10.0 0.3 2.0 1.0 5.0 0.0 0.0\n 1 2.0 10.0 0.3 3.0 1.0 5.0 0.0 0.0\n 2 2.0 10.0 0.3 4.0 1.0 5.0 0.0 0.0\n 3 2.0 10.0 0.7 2.0 1.0 5.0 0.0 0.0\n 4 2.0 10.0 0.7 3.0 1.0 5.0 0.0 0.0\n 5 2.0 10.0 0.7 4.0 1.0 5.0 0.0 0.0\n \"\"\"\n meanColumnNames = list(columnNames)\n stderrColumnNames = [c+\" err\" for c in columnNames]\n results = DataFrame(columns=['Agents', 'Iterations', 'Noise size', 'Items per agent'] + meanColumnNames + stderrColumnNames)\n agentCount = len(agents)\n for maxNoiseSize in noiseSizes:\n for itemCount in itemCounts:\n start = timer()\n trace(\"noise=\"+str(maxNoiseSize)+\" items=\"+str(itemCount)+\" file=\"+filename)\n (means,stderrs) = avergeOverRandomProfiles(checkSingleProfile,\n agents, range(itemCount * len(agents)),\n lowMarketValue, highMarketValue, maxNoiseSize, iterations)\n if len(means)!=len(columnNames):\n raise ValueError(\"checkSingleProfile returned {} values, but columnNames has {} values\".format(len(means),len(columnNames)))\n results.loc[len(results)] = [agentCount, iterations, maxNoiseSize, itemCount] + list(means) + list(stderrs)\n results.to_csv(\"results/\"+filename+\".csv\")\n trace(\" \" + str(timer() - start)+\" seconds\")\n return results\n\n\n\ndef simulateTwice(checkSingleProfile, columnNames:list,\n agents:list, iterations:int, filename:str)->(DataFrame,DataFrame):\n \"\"\"\n Run two simulation experiments: one with variable noise and one with variable item-count.\n\n :param agents: a list of agent names.\n :param iterations: number of iterations to randomize.\n :param filename: base filename for saving the results.\n :return: Two pandas.DataFrame objects, representing the results of two experiments:\n 1. Fixed item-count and variable noise (written to file \"<filename>-noise.csv\"),\n 2. Fixed noise and variable item-count (written to file \"<filename>-items.csv\").\n\n >>> pandas.set_option('display.max_columns', 500)\n >>> pandas.set_option('display.width', 500)\n >>> dummyCheckSingleProfile = lambda profile: [True,False,5]\n >>> dummyColumns = [\"col1\",\"col2\",\"col3\"]\n >>> (results1,results2) = simulateTwice(dummyCheckSingleProfile, dummyColumns, [\"A\",\"B\"], 10, \"doctest-simulation\")\n >>> results1\n Agents Iterations Noise size Items per agent col1 col2 col3 col1 err col2 err col3 err\n 0 2.0 10.0 0.1 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 1 2.0 10.0 0.2 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 2 2.0 10.0 0.3 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 3 2.0 10.0 0.4 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 4 2.0 10.0 0.5 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 5 2.0 10.0 0.6 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 6 2.0 10.0 0.7 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 7 2.0 10.0 0.8 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 8 2.0 10.0 0.9 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 9 2.0 10.0 1.0 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n >>> results2\n Agents Iterations Noise size Items per agent col1 col2 col3 col1 err col2 err col3 err\n 0 2.0 10.0 0.5 2.0 1.0 0.0 5.0 0.0 0.0 0.0\n 1 2.0 10.0 0.5 3.0 1.0 0.0 5.0 0.0 0.0 0.0\n 2 2.0 10.0 0.5 4.0 1.0 0.0 5.0 0.0 0.0 0.0\n 3 2.0 10.0 0.5 5.0 1.0 0.0 5.0 0.0 0.0 0.0\n 4 2.0 10.0 0.5 6.0 1.0 0.0 5.0 0.0 0.0 0.0\n 5 2.0 10.0 0.5 7.0 1.0 0.0 5.0 0.0 0.0 0.0\n 6 2.0 10.0 0.5 8.0 1.0 0.0 5.0 0.0 0.0 0.0\n \"\"\"\n agentCount = len(agents)\n\n fixedItemCount = 5 if agentCount==2 else 4\n results1 = simulate(checkSingleProfile, columnNames,\n agents,\n itemCounts = [fixedItemCount],\n noiseSizes=[.1,.2,.3,.4,.5,.6,.7,.8,.9,1],\n\n lowMarketValue=1,\n highMarketValue=2,\n iterations = iterations,\n filename = filename+\"-noise\"\n )\n trace(results1)\n\n itemCounts = [2,3,4,5,6,7,8] if agentCount==2 else [2,3,4,5]\n results2 = simulate(checkSingleProfile, columnNames,\n agents,\n itemCounts = itemCounts,\n noiseSizes=[.5],\n\n lowMarketValue=1,\n highMarketValue=2,\n iterations = iterations,\n filename = filename+\"-items\"\n )\n trace(results2)\n\n return (results1, results2)\n\n\n\ntitleFontSize = 14\nlegendFontSize = 12\naxesFontSize = 13\nmarkerSize=12\n\n\ndef plotResults(results1:DataFrame, results2:DataFrame, columnsAndStyles:list, title:str=\"probability\", errorbars:bool=False, bbox_to_anchor=None):\n\n ### Subplot 1: by noise size\n\n ax = plt.subplot(1, 2, 1)\n agentCount = int(results1['Agents'][0])\n # iterations = int(results1['Iterations'][0])\n itemCounts1 = int(results1['Items per agent'][0])\n\n ax.set_title(title+\" vs. noise, \" + str(agentCount) + ' agents, ' + str(itemCounts1) + ' items per agent',\n fontsize=titleFontSize, weight='bold')\n ax.set_xlabel('Noise size', fontsize=axesFontSize)\n\n x_values = results1['Noise size']\n for columnName,style in columnsAndStyles:\n y_values = results1[columnName]\n if errorbars:\n yerr_values = results1[columnName + \" err\"]\n ax.errorbar(x_values, y_values, yerr=yerr_values, fmt=style, markersize=markerSize)\n else:\n ax.plot(x_values, y_values, fmt=style, markersize=markerSize)\n plt.xticks(x_values.tolist(), fontsize=axesFontSize)\n plt.yticks([0,0.2,0.4,0.6,0.8,1], fontsize=axesFontSize)\n\n\n ### Subplot 2: by number of items\n\n ax = plt.subplot(1, 2, 2, sharey=ax)\n agentCount = int(results2['Agents'][0])\n iterations = int(results2['Iterations'][0])\n maxNoise = results2['Noise size'][0]\n\n ax.set_title(title+\" vs. items, \" + str(agentCount) + ' agents, |noise|<=' + str(maxNoise),\n fontsize=titleFontSize, weight='bold')\n ax.set_xlabel('Items per agent', fontsize=axesFontSize)\n x_values = results2['Items per agent']\n for columnName,style in columnsAndStyles:\n y_values = results2[columnName]\n if errorbars:\n yerr_values = results2[columnName + \" err\"]\n ax.errorbar(x_values, y_values, yerr=yerr_values, fmt=style, markersize=markerSize)\n else:\n ax.plot(x_values, y_values, fmt=style, markersize=markerSize)\n plt.xticks(x_values.tolist(), fontsize=axesFontSize)\n plt.yticks([0,0.2,0.4,0.6,0.8,1], fontsize=axesFontSize)\n\n ax.legend(prop={'size': legendFontSize}, loc='center left')\n the_legend = ax.legend()\n the_legend.set_bbox_to_anchor([1.3,0.7])\n for t in the_legend.get_texts():\n t.set_text(t.get_text().replace(title,\"\"))\n\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n import doctest\n (failures, tests) = doctest.testmod(report=True)\n print(\"{} failures, {} tests\".format(failures, tests))\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot",
"pandas.DataFrame",
"matplotlib.pyplot.yticks"
]
] |
remifan/commplax | [
"e8ee5bc86ab0dfd90773202579237ecf42488cd0"
] | [
"tests/xop_test.py"
] | [
"from commplax import xop\nimport numpy as np\nfrom jax import random, numpy as jnp\n\n\ndef conv_input_complex(n, m):\n key1 = random.PRNGKey(0)\n key2 = random.PRNGKey(1)\n k1, k2 = random.split(key1)\n k3, k4 = random.split(key2)\n x = random.normal(k1, (n,)) + 1j * random.normal(k2, (n,))\n h = random.normal(k3, (m,)) + 1j * random.normal(k4, (m,))\n return x, h\n\n\ndef conv_input_float(n, m):\n key1 = random.PRNGKey(0)\n k1, k2 = random.split(key1)\n x = random.normal(k1, (n,))\n h = random.normal(k2, (m,))\n return x, h\n\n\ndef test_convolve():\n for n, m in zip([1, 5, 5, 5, 5, 6, 6, 6, 1000, 1000, 1001, 1001],\n [1, 1, 2, 3, 4, 2, 3, 4, 7, 8, 7, 8]):\n\n for mode in ['same', 'valid', 'full']:\n x, h = conv_input_complex(n, m)\n a = np.convolve(x, h, mode=mode)\n b = xop.convolve(x, h, mode=mode)\n assert np.allclose(a, b, rtol=2e-05), \"\\nn={}, m={}, mode={}\".format(n, m, mode)\n\n for mode in ['same', 'valid', 'full']:\n x, h = conv_input_float(n, m)\n a = np.convolve(x, h, mode=mode)\n b = xop.convolve(x, h, mode=mode)\n assert np.allclose(a, b, rtol=1e-05, atol=5e-06), \"\\nn={}, m={}, mode={}\".format(n, m, mode)\n\n\n"
] | [
[
"numpy.allclose",
"numpy.convolve"
]
] |
max-stack/MWP-SS-Metrics | [
"01268f2d6da716596216b04de4197e345b96c219"
] | [
"mwp_solver/module/Graph/gcn.py"
] | [
"# Code Taken from https://github.com/LYH-YF/MWPToolkit\n# -*- encoding: utf-8 -*-\n# @Author: Yihuai Lan\n# @Time: 2021/08/29 21:49:49\n# @File: gcn.py\n\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom module.Layer.graph_layers import GraphConvolution\n\n\nclass GCN(nn.Module):\n def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout):\n super(GCN, self).__init__()\n self.gc1 = GraphConvolution(in_feat_dim, nhid)\n self.gc2 = GraphConvolution(nhid, out_feat_dim)\n self.dropout = dropout\n\n def forward(self, x, adj):\n \"\"\"\n Args:\n x (torch.Tensor): input features, shape [batch_size, node_num, in_feat_dim]\n adj (torch.Tensor): adjacency matrix, shape [batch_size, node_num, node_num]\n \n Returns:\n torch.Tensor: gcn_enhance_feature, shape [batch_size, node_num, out_feat_dim]\n \"\"\"\n x = F.relu(self.gc1(x, adj))\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n return x"
] | [
[
"torch.nn.functional.dropout"
]
] |
SubstraFoundation/distributed-learning-contributivity | [
"170ed8a660f7d7b4972c140f27782e085c4d63db"
] | [
"mplc/multi_partner_learning/basic_mpl.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nFunctions for model training and evaluation (single-partner and multi-partner cases)\n\"\"\"\n\nimport operator\nimport os\nfrom abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom timeit import default_timer as timer\n\nimport numpy as np\nimport random\nimport tensorflow as tf\nfrom loguru import logger\nfrom sklearn.metrics import confusion_matrix\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nfrom .utils import History\nfrom ..utils import project_onto_the_simplex\nfrom .. import constants\nfrom ..models import NoiseAdaptationChannel, EnsemblePredictionsModel\nfrom ..partner import Partner, PartnerMpl\n\nALLOWED_PARAMETERS = ('partners_list',\n 'epoch_count',\n 'minibatch_count',\n 'dataset',\n 'aggregation',\n 'is_early_stopping',\n 'is_save_data',\n 'save_folder',\n 'init_model_from',\n 'use_saved_weights')\n\n\nclass MultiPartnerLearning(ABC):\n name = 'abstract'\n\n def __init__(self, scenario, **kwargs):\n \"\"\"\n\n :type scenario: Scenario\n \"\"\"\n # Attributes related to the data and the model\n self.dataset = scenario.dataset\n self.partners_list = scenario.partners_list\n self.init_model_from = scenario.init_model_from\n self.use_saved_weights = scenario.use_saved_weights\n self.amounts_per_partner = scenario.amounts_per_partner\n self.val_set = scenario.val_set\n self.test_set = scenario.test_set\n\n # Attributes related to iterating at different levels\n self.epoch_count = scenario.epoch_count\n self.minibatch_count = scenario.minibatch_count\n self.is_early_stopping = scenario.is_early_stopping\n\n # Attributes to store results\n self.save_folder = scenario.save_folder\n\n # Erase the default parameters (which mostly come from the scenario) if some parameters have been specified\n self.__dict__.update((k, v) for k, v in kwargs.items() if k in ALLOWED_PARAMETERS)\n\n # Unpack dataset-related parameters\n self.val_data = (self.dataset.x_val, self.dataset.y_val)\n self.test_data = (self.dataset.x_test, self.dataset.y_test)\n self.dataset_name = self.dataset.name\n self.generate_new_model = self.dataset.generate_new_model\n\n # Initialize the model\n model = self.init_model()\n self.model_weights = model.get_weights()\n self.metrics_names = self.dataset.model_metrics_names\n\n # Initialize iterators\n self.epoch_index = 0\n self.minibatch_index = 0\n self.learning_computation_time = 0\n\n # Convert partners to Mpl partners\n for partner in self.partners_list:\n assert isinstance(partner, Partner)\n partners_list = sorted(self.partners_list, key=operator.attrgetter(\"id\"))\n logger.info(\n f\"## Preparation of model's training on partners with ids: {['#' + str(p.id) for p in partners_list]}\")\n self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]\n\n # Attributes related to the aggregation approach\n self.aggregator = self.init_aggregation_function(scenario.aggregation)\n\n # Initialize History\n self.history = History(self)\n\n # Initialize result folder\n if self.save_folder is not None:\n if 'custom_name' in kwargs:\n self.save_folder = self.save_folder / kwargs[\"custom_name\"]\n else:\n self.save_folder = self.save_folder / 'multi_partner_learning'\n self.save_folder.mkdir(parents=True, exist_ok=False)\n\n logger.debug(\"MultiPartnerLearning object instantiated.\")\n\n def __str__(self):\n return f'{self.name}'\n\n @property\n def partners_count(self):\n return len(self.partners_list)\n\n def init_aggregation_function(self, aggregator):\n return aggregator(self)\n\n def build_model(self):\n return self.build_model_from_weights(self.model_weights)\n\n def build_model_from_weights(self, new_weights):\n \"\"\"Generate a new model initialized with weights passed as arguments\"\"\"\n new_model = self.generate_new_model()\n new_model.set_weights(new_weights)\n return new_model\n\n def init_model(self):\n new_model = self.generate_new_model()\n\n if self.use_saved_weights:\n logger.info(\"Init model with previous coalition model\")\n new_model.load_weights(self.init_model_from)\n else:\n logger.info(\"Init new model\")\n\n return new_model\n\n def save_final_model(self):\n \"\"\"Save final model weights\"\"\"\n\n model_folder = os.path.join(self.save_folder, 'model')\n\n if not os.path.isdir(model_folder):\n os.makedirs(model_folder)\n\n np.save(os.path.join(model_folder, self.dataset_name + '_final_weights.npy'), self.model_weights)\n\n model_to_save = self.build_model()\n model_to_save.save_weights(os.path.join(model_folder, self.dataset_name + '_final_weights.h5'))\n\n def save_data(self):\n if self.save_folder is None:\n raise ValueError(\"The path to the save folder is None, history data cannot be saved, nor model weights\")\n\n self.save_final_model()\n self.history.save_data()\n\n def log_partner_perf(self, partner_id, partner_index, history):\n for key_history in self.history.metrics:\n self.history.history[partner_id][key_history][self.epoch_index,\n self.minibatch_index] = history[key_history][-1]\n\n epoch_nb_str = f\"Epoch {str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}\"\n mb_nb_str = f\"Minibatch {str(self.minibatch_index).zfill(2)}/{str(self.minibatch_count - 1).zfill(2)}\"\n partner_id_str = f\"Partner partner_id #{partner_id} ({partner_index}/{self.partners_count - 1})\"\n val_acc_str = f\"{round(history['val_accuracy'][-1], 2)}\"\n\n logger.debug(f\"{epoch_nb_str} > {mb_nb_str} > {partner_id_str} > val_acc: {val_acc_str}\")\n\n def eval_and_log_model_val_perf(self):\n\n model = self.build_model()\n\n if self.val_set == 'global':\n hist = model.evaluate(self.val_data[0],\n self.val_data[1],\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n elif self.val_set == 'local':\n hist = [0.0, 0.0]\n for p in self.partners_list:\n hist_partner = model.evaluate(p.x_val,\n p.y_val,\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n hist[0] += hist_partner[0] / self.partners_count\n hist[1] += hist_partner[1] / self.partners_count\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n self.history.history['mpl_model']['val_loss'][self.epoch_index, self.minibatch_index] = hist[0]\n self.history.history['mpl_model']['val_accuracy'][self.epoch_index, self.minibatch_index] = hist[1]\n\n if self.minibatch_index >= self.minibatch_count - 1:\n epoch_nb_str = f\"{str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}\"\n logger.info(f\" Model evaluation at the end of the epoch \"\n f\"{epoch_nb_str}: \"\n f\"{['%.3f' % elem for elem in hist]}\")\n\n def eval_and_log_final_model_test_perf(self):\n logger.info(\"### Evaluating model on test data:\")\n model = self.build_model()\n if self.test_set == 'global':\n hist = model.evaluate(self.test_data[0],\n self.test_data[1],\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n elif self.test_set == 'local':\n hist = [0.0, 0.0]\n for p in self.partners_list:\n hist_partner = model.evaluate(p.x_test,\n p.y_test,\n batch_size=constants.DEFAULT_BATCH_SIZE,\n verbose=0,\n )\n hist[0] += hist_partner[0] / self.partners_count\n hist[1] += hist_partner[1] / self.partners_count\n else:\n raise ValueError(\"test set should be 'local' or 'global', not {self.val_set}\")\n\n self.history.score = hist[1]\n self.history.nb_epochs_done = self.epoch_index + 1\n logger.info(f\" Model metrics names: {self.metrics_names}\")\n logger.info(f\" Model metrics values: {['%.3f' % elem for elem in hist]}\")\n\n def split_in_minibatches(self):\n \"\"\"Split the dataset passed as argument in mini-batches\"\"\"\n\n for partner in self.partners_list:\n partner.split_minibatches()\n\n def early_stop(self):\n logger.debug(\" Checking if early stopping criteria are met:\")\n if self.is_early_stopping:\n # Early stopping parameters\n if (\n self.epoch_index >= constants.PATIENCE\n and self.history.history['mpl_model']['val_loss'][self.epoch_index,\n self.minibatch_index] >\n self.history.history['mpl_model']['val_loss'][self.epoch_index - constants.PATIENCE,\n self.minibatch_index]\n ):\n logger.debug(\" -> Early stopping criteria are met, stopping here.\")\n return True\n else:\n logger.debug(\" -> Early stopping criteria are not met, continuing with training.\")\n else:\n return False\n\n def fit(self):\n \"\"\"Return the score on test data of a final aggregated model trained in a federated way on each partner\"\"\"\n\n start = timer()\n # Train model (iterate for each epoch and mini-batch)\n while self.epoch_index < self.epoch_count:\n\n self.fit_epoch() # perform an epoch on the self.model\n\n if self.early_stop():\n break\n self.epoch_index += 1\n\n # After last epoch or if early stopping was triggered, evaluate model on the global testset\n self.eval_and_log_final_model_test_perf()\n\n end = timer()\n self.learning_computation_time = end - start\n logger.info(f\"Training and evaluation on multiple partners: \"\n f\"done. ({np.round(self.learning_computation_time, 3)} seconds)\")\n if self.save_folder is not None:\n self.save_data() # Save the model weights and the history data\n\n @abstractmethod\n def fit_epoch(self):\n while self.minibatch_index < self.minibatch_count:\n self.fit_minibatch()\n self.minibatch_index += 1\n self.eval_and_log_model_val_perf()\n\n @abstractmethod\n def fit_minibatch(self):\n pass\n\n\nclass SinglePartnerLearning(MultiPartnerLearning):\n name = 'Single Partner learning'\n\n def __init__(self, scenario, **kwargs):\n super(SinglePartnerLearning, self).__init__(scenario, **kwargs)\n if self.partners_count != 1:\n raise ValueError('More than one partner is provided')\n self.partner = self.partners_list[0]\n\n def fit(self):\n \"\"\"Return the score on test data of a model trained on a single partner\"\"\"\n\n start = timer()\n logger.info(f\"## Training and evaluating model on partner with partner_id #{self.partner.id}\")\n\n # Set if early stopping if needed\n cb = []\n es = None\n if self.is_early_stopping:\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)\n cb.append(es)\n\n # Train model\n logger.info(\" Training model...\")\n model = self.build_model()\n if self.val_set == 'global':\n history = model.fit(self.partner.x_train,\n self.partner.y_train,\n batch_size=self.partner.batch_size,\n epochs=self.epoch_count,\n verbose=0,\n validation_data=self.val_data,\n callbacks=cb)\n elif self.val_set == 'local':\n history = model.fit(self.partner.x_train,\n self.partner.y_train,\n batch_size=self.partner.batch_size,\n epochs=self.epoch_count,\n verbose=0,\n validation_data=(self.partner.x_val, self.partner.y_val),\n callbacks=cb)\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n self.model_weights = model.get_weights()\n self.log_partner_perf(self.partner.id, 0, history.history)\n del self.history.history['mpl_model']\n # Evaluate trained model on test data\n self.eval_and_log_final_model_test_perf()\n self.history.nb_epochs_done = (es.stopped_epoch + 1) if es.stopped_epoch != 0 else self.epoch_count\n\n end = timer()\n self.learning_computation_time = end - start\n\n def fit_epoch(self):\n pass\n\n def fit_minibatch(self):\n pass\n\n\nclass FederatedAverageLearning(MultiPartnerLearning):\n name = 'Federated averaging'\n\n def __init__(self, scenario, **kwargs):\n # First, if only one partner, fall back to dedicated single partner function\n super(FederatedAverageLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each minibatch,aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a federated averaging approach\"\"\"\n\n logger.debug(\"Start new fedavg collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n # Reference the partner's model\n partner_model = partner.build_model()\n\n # Train on partner local data set\n if self.val_set == 'global':\n history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n elif self.val_set == 'local':\n history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=(partner.x_val, partner.y_val))\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.model_weights = partner_model.get_weights()\n\n logger.debug(\"End of fedavg collaborative round.\")\n\n\nclass DistributionallyRobustFederatedAveragingLearning(MultiPartnerLearning):\n \"\"\"\n - This class implements the Distributionally Robust Federated Averaging (DRFA) Algorithm,\n only a subset of partners are chosen to participate in a given collaborative\n learning round. based on a global mixing parameter called lambda\n - Lambda is updated at the end of each collaborative learning round using its own update rule\n - DRFA is considered a framework under which we can implement other FL algorithms such as FedAvg\n - Link to the paper : https://arxiv.org/abs/2102.12660\n \"\"\"\n name = \"Distributionally Robust Federated Averaging\"\n\n def __init__(self, scenario, **kwargs):\n super(DistributionallyRobustFederatedAveragingLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n self.active_partners_count = scenario.active_partners_count\n\n self.lambda_vector = self.init_lambda()\n self.active_partners_list = list()\n self.update_active_partners_list()\n\n self.local_steps = scenario.gradient_updates_per_pass_count\n self.partners_training_data = {}\n self.partners_participation = self.initialize_participation_dict()\n self.lambda_learning_rate = 8e-3\n\n self.local_steps_index = 0\n self.local_steps_index_t = 0\n self.global_model_at_index_t = None\n self.model_weights_at_index_t = list()\n self.loss_for_model_at_index_t = np.zeros(self.partners_count)\n\n self.subset_u_partners = list()\n self.loss_vector_v = list()\n\n def fit_epoch(self):\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # convert partners training data into tf Dataset, reference: fast_mpl\n for partner_id, partner in enumerate(self.partners_list):\n self.partners_training_data[partner.id] = list()\n for minibatch_index in range(self.minibatch_count):\n # convert training data\n data_train = tf.data.Dataset.from_tensor_slices((partner.minibatched_x_train[minibatch_index],\n partner.minibatched_y_train[minibatch_index]))\n data_train = data_train.shuffle(len(partner.minibatched_x_train[minibatch_index]))\n data_train = data_train.batch(partner.batch_size)\n data_train = data_train.prefetch(1)\n self.partners_training_data[partner.id].append(data_train)\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n\n self.local_steps_index = 0\n self.local_steps_index_t = np.random.randint(0, self.local_steps - 1)\n\n logger.info(\n f\"Active partner in this round \"\n f\"{['#'+str(active_partner.id) for active_partner in self.active_partners_list]} \"\n f\"according to lambda vector > {self.lambda_vector}\")\n logger.info(f\"Local step index t > {self.local_steps_index_t}\")\n\n self.fit_minibatch()\n\n # update partner participations\n self.partners_participation[self.epoch_index][self.minibatch_index][[p.id for p\n in self.active_partners_list]] = 1\n\n self.update_lambda()\n self.update_active_partners_list()\n self.log_partners_participation_rate()\n\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a distributionally robust federated averaging approach\"\"\"\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training\n for partner_index, partner in enumerate(self.active_partners_list):\n partner_model = partner.build_model()\n # loop through each partner's minibatch\n minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]\n for idx, batch_x_y in enumerate(minibatched_x_y):\n with tf.GradientTape() as tape:\n p_pred = partner_model(batch_x_y[0])\n loss = partner_model.compiled_loss(batch_x_y[1], p_pred)\n\n partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)\n\n self.local_steps_index += 1\n if self.local_steps_index == self.local_steps_index_t:\n # save model weights for each partner at local step t\n self.model_weights_at_index_t.append(partner.model_weights)\n\n partner.model_weights = partner_model.get_weights()\n self.local_steps_index = 0\n\n # aggregate final global model weights\n self.model_weights = self.aggregate_model_weights(self.active_partners_list)\n\n # build the model for each partner using weights gathered at index t\n for active_partner, weights_t in zip(self.active_partners_list, self.model_weights_at_index_t):\n active_partner.model_weights = weights_t\n\n # aggregate global model weights at index t\n self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)\n\n # sample a new subset of partners of size active_partners_count\n subset_index = random.sample(range(self.partners_count), self.active_partners_count)\n self.subset_u_partners = [self.partners_list[index] for index in subset_index]\n logger.info(\n f\"Subset of partners chosen for lambda update \"\n f\"{['#'+ str(partner.id) for partner in self.subset_u_partners]}\")\n\n # compute losses over a random batch using the global model at index t\n for partner, index in zip(self.subset_u_partners, subset_index):\n random_minibatch_index = np.random.randint(0, self.minibatch_count - 1)\n random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]\n random_batch_index = np.random.randint(0, len(random_minibatch) - 1)\n random_batch = list(random_minibatch)[random_batch_index]\n partner_model = self.build_model_from_weights(self.global_model_at_index_t)\n loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))\n # compute (n/m)*loss and add it to the loss vector\n # n is the total number of partners, m is the number of active partners\n self.loss_for_model_at_index_t[index] = \\\n ((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))\n\n def init_lambda(self):\n \"\"\"\n - initialize lambda vector according to each partner's dataset size\n - this is a probability vector of size partners_count\n \"\"\"\n return np.array(self.amounts_per_partner)\n\n def update_lambda(self):\n \"\"\"\n The update rule for lambda is : lambda_vector(i) =\n Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))\n \"\"\"\n self.lambda_vector += (self.local_steps_index_t * self.lambda_learning_rate * self.loss_for_model_at_index_t)\n self.lambda_vector = project_onto_the_simplex(self.lambda_vector)\n\n # The projection can produce zero probabilities for certain partners which prevents them from\n # participating in the training. To avoid this, we assign 1e-3 to each probability smaller than this value.\n if any(self.lambda_vector < 1e-3):\n self.lambda_vector[self.lambda_vector < 1e-3] = 1e-3\n # normalize the probability vector\n self.lambda_vector = self.lambda_vector / np.sum(self.lambda_vector)\n\n def update_active_partners_list(self):\n \"\"\"\n Update the active partners list according to lambda vector\n \"\"\"\n active_partners_indices = (-self.lambda_vector).argsort()[:self.active_partners_count]\n self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]\n\n def initialize_participation_dict(self):\n participation = {}\n for epoch_index in range(self.epoch_count):\n participation[epoch_index] = {}\n for minibatch_index in range(self.minibatch_count):\n participation[epoch_index][minibatch_index] = np.zeros(self.partners_count)\n return participation\n\n def log_partners_participation_rate(self):\n epoch_participation_vector = np.zeros(self.partners_count)\n percentages = []\n for minibatch_index, vect in self.partners_participation[self.epoch_index].items():\n epoch_participation_vector += vect\n percentages = [str(np.round(p_v / self.minibatch_count, 2) * 100) + ' %'\n for p_v in list(epoch_participation_vector)]\n logger.info(f\"Partners {['#' + str(p.id) for p in self.partners_list]} \"\n f\"have the following participation rates, respectively : \"\n f\"{percentages} \"\n f\"at the end of Epoch > {self.epoch_index}\")\n\n final_participation_vector = np.zeros(self.partners_count)\n if self.epoch_index == self.epoch_count - 1:\n for epoch_index in range(self.epoch_count):\n for minibatch_index, vect in self.partners_participation[epoch_index].items():\n final_participation_vector += vect\n percentages = [str(np.round(f_p_v / (self.minibatch_count * self.epoch_count), 2) * 100) + '%'\n for f_p_v in list(final_participation_vector)]\n logger.info(f\"Partners {['#' + str(p.id) for p in self.partners_list]} \"\n f\"have the following participation rates : \"\n f\"{percentages} \"\n f\"during the training\")\n\n @staticmethod\n def aggregate_model_weights(partners_list):\n \"\"\" This method is identical to the one in the aggregator class with few modifications.\n I couldn't use the original aggregator method since it operates on the entire list of partners and\n DRFA requires model aggregation over a subset of partners list only\n \"\"\"\n aggregation_weights = np.ones(len(partners_list), dtype='float32')\n weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))\n new_weights = list()\n\n for weights_for_layer in weights_per_layer:\n avg_weights_for_layer = np.average(\n np.array(weights_for_layer), axis=0, weights=aggregation_weights\n )\n new_weights.append(avg_weights_for_layer)\n\n return new_weights\n\n\nclass SequentialLearning(MultiPartnerLearning): # seq-pure\n name = 'Sequential learning'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n logger.info(f\"(seq-pure) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}\")\n self.fit_minibatch()\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a sequential averaging approach\"\"\"\n\n logger.debug(\"Start new seq collaborative round ...\")\n\n model_for_round = self.build_model()\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n # Iterate over partners for training each individual model\n shuffled_indexes = np.random.permutation(self.partners_count)\n logger.debug(f\"(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}\")\n for idx, partner_index in enumerate(shuffled_indexes):\n partner = self.partners_list[partner_index]\n\n # Train on partner local data set\n if self.val_set == 'global':\n history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n elif self.val_set == 'local':\n history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index],\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=(partner.x_val, partner.y_val))\n else:\n raise ValueError(\"validation set should be 'local' or 'global', not {self.val_set}\")\n\n # Log results\n self.log_partner_perf(partner.id, idx, history.history)\n\n # Save the partner's model in the models' list\n partner.model_weights = model_for_round.get_weights()\n self.model_weights = model_for_round.get_weights()\n\n logger.debug(\"End of seq collaborative round.\")\n\n\nclass SequentialWithFinalAggLearning(SequentialLearning):\n name = 'Sequential learning with final aggregation'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialWithFinalAggLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n logger.info(f\"(seq-final-agg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init model with a copy of the global model\")\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each epoch, aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n\n\nclass SequentialAverageLearning(SequentialLearning):\n name = 'Sequential averaged learning'\n\n def __init__(self, scenario, **kwargs):\n super(SequentialAverageLearning, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n logger.info(f\"(seqavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init model with a copy of the global model\")\n self.minibatch_index = i\n self.fit_minibatch()\n\n # At the end of each minibatch, aggregate the models\n self.model_weights = self.aggregator.aggregate_model_weights()\n\n\nclass FedAvgSmodel(FederatedAverageLearning):\n name = 'Federated learning with label flipping'\n\n def __init__(self, scenario, pretrain_epochs=0, epsilon=0.5, **kwargs):\n super(FedAvgSmodel, self).__init__(scenario, **kwargs)\n self.pretrain_epochs = pretrain_epochs\n self.epsilon = epsilon\n if pretrain_epochs > 0:\n self.pretrain_mpl = FederatedAverageLearning(scenario=scenario,\n epoch_count=self.pretrain_epochs,\n is_save_data=False)\n\n def fit(self):\n if self.pretrain_epochs > 0:\n logger.info('Start pre-train...')\n self.pretrain_mpl.fit()\n pretrain_model = self.pretrain_mpl.build_model()\n for p in self.partners_list:\n confusion = confusion_matrix(np.argmax(p.y_train, axis=1),\n np.argmax(pretrain_model.predict(p.x_train), axis=1),\n normalize='pred')\n p.noise_layer_weights = [np.log(confusion.T + 1e-8)]\n self.model_weights[:-1] = self.pretrain_mpl.model_weights[:-1]\n else:\n for p in self.partners_list:\n confusion = np.identity(10) * (1 - self.epsilon) + (self.epsilon / 10)\n p.noise_layer_weights = [np.log(confusion + 1e-8)]\n super(FedAvgSmodel, self).fit()\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a S-Model federated averaging approach\"\"\"\n\n logger.debug(\"Start new S-Model collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n partner.model_weights = self.model_weights\n\n # Evaluate and store accuracy of mini-batch start model\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n # Reference the partner's model\n partner_model = partner.build_model()\n x_batch = partner.minibatched_x_train[self.minibatch_index]\n y_batch = partner.minibatched_y_train[self.minibatch_index]\n\n model_input = Input(shape=self.dataset.input_shape)\n x = partner_model(model_input)\n outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)\n full_model = Model(inputs=model_input, outputs=outputs, name=f\"full_model_partner_{partner_index}\")\n\n full_model.compile(\n loss=partner_model.loss,\n optimizer=partner_model.optimizer,\n metrics='accuracy',\n )\n\n # Train on partner local data set\n history = full_model.fit(x_batch,\n y_batch,\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()\n partner.model_weights = partner_model.get_weights()\n\n logger.debug(\"End of S-Model collaborative round.\")\n\n\nclass FederatedGradients(MultiPartnerLearning):\n def __init__(self, scenario, **kwargs):\n super(FederatedGradients, self).__init__(scenario, **kwargs)\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n self.model = self.build_model()\n\n def fit_epoch(self):\n # Split the train dataset in mini-batches\n self.split_in_minibatches()\n # Iterate over mini-batches and train\n for i in range(self.minibatch_count):\n self.minibatch_index = i\n self.fit_minibatch()\n\n self.minibatch_index = 0\n\n def fit_minibatch(self):\n \"\"\"Proceed to a collaborative round with a federated averaging approach\"\"\"\n\n logger.debug(\"Start new gradients fusion collaborative round ...\")\n\n # Starting model for each partner is the aggregated model from the previous mini-batch iteration\n logger.info(f\"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, \"\n f\"init each partner's models with a copy of the global model\")\n\n for partner in self.partners_list:\n # Evaluate and store accuracy of mini-batch start model\n partner.model_weights = self.model_weights\n self.eval_and_log_model_val_perf()\n\n # Iterate over partners for training each individual model\n for partner_index, partner in enumerate(self.partners_list):\n with tf.GradientTape() as tape:\n loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index],\n self.model(partner.minibatched_x_train[self.minibatch_index]))\n partner.grads = tape.gradient(loss, self.model.trainable_weights)\n\n global_grad = self.aggregator.aggregate_gradients()\n self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))\n self.model_weights = self.model.get_weights()\n\n for partner_index, partner in enumerate(self.partners_list):\n val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)\n history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index],\n partner.minibatched_y_train[self.minibatch_index], verbose=False)\n history = {\n \"loss\": [history[0]],\n 'accuracy': [history[1]],\n 'val_loss': [val_history[0]],\n 'val_accuracy': [val_history[1]]\n }\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history)\n\n logger.debug(\"End of grads-fusion collaborative round.\")\n\n\nclass EnsemblePredictions(MultiPartnerLearning):\n \"\"\"\n Ensemble (average) prediction of several input models\n This approach can only be used with the EnsemblePredictionsModel\n \"\"\"\n\n def __init__(self, scenario, **kwargs):\n super(EnsemblePredictions, self).__init__(scenario, **kwargs)\n\n # First, if only one partner, fall back to dedicated single partner function\n if self.partners_count == 1:\n raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')\n\n partner_model_list = [self.dataset.generate_new_model() for _ in range(self.partners_count)]\n self.model = EnsemblePredictionsModel(partner_model_list)\n\n for partner in self.partners_list:\n partner.model_weights = deepcopy(self.model_weights)\n print(id(partner.model_weights))\n\n logger.info(\"Init EnsemblePredictionsModel model\")\n\n def build_model(self):\n partner_model_list = [partner.build_model() for partner in self.partners_list]\n return EnsemblePredictionsModel(partner_model_list)\n\n def fit_epoch(self):\n # Clear Keras' old models\n clear_session()\n\n self.eval_and_log_model_val_perf()\n\n for partner_index, partner in enumerate(self.partners_list):\n\n partner_model = partner.build_model()\n\n # Train on partner local data set\n history = partner_model.fit(partner.x_train,\n partner.y_train,\n batch_size=partner.batch_size,\n verbose=0,\n validation_data=self.val_data)\n\n # Log results of the round\n self.log_partner_perf(partner.id, partner_index, history.history)\n\n # Update the partner's model in the models' list\n partner.model_weights = partner_model.get_weights()\n\n def fit_minibatch(self):\n pass\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.random.permutation",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.Model",
"numpy.argmax",
"tensorflow.GradientTape",
"numpy.log",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.identity",
"numpy.array",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.random.randint",
"numpy.round",
"tensorflow.keras.Input"
]
] |
aws-samples/amazon-sagemaker-predict-training-resource-usage | [
"a2926c7b5727197e2123679ddc8a6993425df2ec"
] | [
"Canary_Training/quick_start_example_notebooks/3_bert_fine_tuning_canary_train_example/code/.ipynb_checkpoints/train_deploy-checkpoint.py"
] | [
"import argparse\nimport json\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.distributed as dist\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom torch.utils.data import DataLoader, RandomSampler, TensorDataset\nfrom transformers import AdamW, BertForSequenceClassification, BertTokenizer\nimport glob\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\nMAX_LEN = 64 # this is the max length of the sentence\n\nprint(\"Loading BERT tokenizer...\")\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\ndef _get_train_data_loader(batch_size, training_dir, is_distributed):\n logger.info(\"Get train data loader\")\n\n dataset = pd.concat(map(pd.read_csv, glob.glob(os.path.join(training_dir, \"*.csv\")))) #current dir and all csvs\n sentences = dataset.sentence.values\n labels = dataset.label.values\n\n input_ids = []\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent, add_special_tokens=True)\n input_ids.append(encoded_sent)\n\n # pad shorter sentences\n input_ids_padded = []\n for i in input_ids:\n while len(i) < MAX_LEN:\n i.append(0)\n input_ids_padded.append(i)\n input_ids = input_ids_padded\n\n # mask; 0: added, 1: otherwise\n attention_masks = []\n # For each sentence...\n for sent in input_ids:\n att_mask = [int(token_id > 0) for token_id in sent]\n attention_masks.append(att_mask)\n\n # convert to PyTorch data types.\n train_inputs = torch.tensor(input_ids)\n train_labels = torch.tensor(labels)\n train_masks = torch.tensor(attention_masks)\n\n train_data = TensorDataset(train_inputs, train_masks, train_labels)\n if is_distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n else:\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n\n return train_dataloader\n\n\ndef _get_test_data_loader(test_batch_size, training_dir):\n dataset = pd.concat(map(pd.read_csv, glob.glob(os.path.join(training_dir, \"*.csv\")))) #current dir and all csvs\n sentences = dataset.sentence.values\n labels = dataset.label.values\n\n input_ids = []\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent, add_special_tokens=True)\n input_ids.append(encoded_sent)\n\n # pad shorter sentences\n input_ids_padded = []\n for i in input_ids:\n while len(i) < MAX_LEN:\n i.append(0)\n input_ids_padded.append(i)\n input_ids = input_ids_padded\n\n # mask; 0: added, 1: otherwise\n attention_masks = []\n # For each sentence...\n for sent in input_ids:\n att_mask = [int(token_id > 0) for token_id in sent]\n attention_masks.append(att_mask)\n\n # convert to PyTorch data types.\n train_inputs = torch.tensor(input_ids)\n train_labels = torch.tensor(labels)\n train_masks = torch.tensor(attention_masks)\n\n train_data = TensorDataset(train_inputs, train_masks, train_labels)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=test_batch_size)\n\n return train_dataloader\n\n\ndef train(args):\n is_distributed = len(args.hosts) > 1 and args.backend is not None\n logger.debug(\"Distributed training - %s\", is_distributed)\n use_cuda = args.num_gpus > 0\n logger.debug(\"Number of gpus available - %d\", args.num_gpus)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if is_distributed:\n # Initialize the distributed environment.\n world_size = len(args.hosts)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n host_rank = args.hosts.index(args.current_host)\n os.environ[\"RANK\"] = str(host_rank)\n dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)\n logger.info(\n \"Initialized the distributed environment: '%s' backend on %d nodes. \"\n \"Current host rank is %d. Number of gpus: %d\",\n args.backend, dist.get_world_size(),\n dist.get_rank(), args.num_gpus\n )\n\n # set the seed for generating random numbers\n torch.manual_seed(args.seed)\n if use_cuda:\n torch.cuda.manual_seed(args.seed)\n\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir, is_distributed)\n test_loader = _get_test_data_loader(args.test_batch_size, args.test)\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of train data\".format(\n len(train_loader.sampler),\n len(train_loader.dataset),\n 100.0 * len(train_loader.sampler) / len(train_loader.dataset),\n )\n )\n\n logger.debug(\n \"Processes {}/{} ({:.0f}%) of test data\".format(\n len(test_loader.sampler),\n len(test_loader.dataset),\n 100.0 * len(test_loader.sampler) / len(test_loader.dataset),\n )\n )\n\n logger.info(\"Starting BertForSequenceClassification\\n\")\n model = BertForSequenceClassification.from_pretrained(\n \"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n num_labels=args.num_labels, # The number of output labels--2 for binary classification.\n output_attentions=False, # Whether the model returns attentions weights.\n output_hidden_states=False, # Whether the model returns all hidden-states.\n )\n\n model = model.to(device)\n if is_distributed and use_cuda:\n # multi-machine multi-gpu case\n model = torch.nn.parallel.DistributedDataParallel(model)\n else:\n # single-machine multi-gpu case or single-machine or multi-machine cpu case\n model = torch.nn.DataParallel(model)\n optimizer = AdamW(\n model.parameters(),\n lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n eps=1e-8, # args.adam_epsilon - default is 1e-8.\n )\n\n logger.info(\"End of defining BertForSequenceClassification\\n\")\n for epoch in range(1, args.epochs + 1):\n total_loss = 0\n model.train()\n for step, batch in enumerate(train_loader):\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n model.zero_grad()\n\n outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)\n loss = outputs[0]\n\n total_loss += loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n # modified based on their gradients, the learning rate, etc.\n optimizer.step()\n if step % args.log_interval == 0:\n logger.info(\n \"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\".format(\n epoch,\n step * len(batch[0]),\n len(train_loader.sampler),\n 100.0 * step / len(train_loader),\n loss.item(),\n )\n )\n\n logger.info(\"Average training loss: %f\\n\", total_loss / len(train_loader))\n\n test(model, test_loader, device)\n\n logger.info(\"Saving tuned model.\")\n model_2_save = model.module if hasattr(model, \"module\") else model\n model_2_save.save_pretrained(save_directory=args.model_dir)\n\n\ndef test(model, test_loader, device):\n model.eval()\n _, eval_accuracy = 0, 0\n\n with torch.no_grad():\n for batch in test_loader:\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n\n outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)\n logits = outputs[0]\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to(\"cpu\").numpy()\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n\n logger.info(\"Test set: Accuracy: %f\\n\", tmp_eval_accuracy)\n\n\ndef model_fn(model_dir):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"================ objects in model_dir ===================\")\n print(os.listdir(model_dir))\n model = BertForSequenceClassification.from_pretrained(model_dir)\n print(\"================ model loaded ===========================\")\n return model.to(device)\n\n\n\n\ndef input_fn(request_body, request_content_type):\n \"\"\"An input_fn that loads a pickled tensor\"\"\"\n if request_content_type == \"application/json\":\n data = json.loads(request_body)\n print(\"================ input sentences ===============\")\n print(data)\n \n if isinstance(data, str):\n data = [data]\n elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], str):\n pass\n else:\n raise ValueError(\"Unsupported input type. Input type can be a string or an non-empty list. \\\n I got {}\".format(data))\n \n #encoded = [tokenizer.encode(x, add_special_tokens=True) for x in data]\n #encoded = tokenizer(data, add_special_tokens=True) \n \n # for backward compatibility use the following way to encode \n # https://github.com/huggingface/transformers/issues/5580\n input_ids = [tokenizer.encode(x, add_special_tokens=True) for x in data]\n \n print(\"================ encoded sentences ==============\")\n print(input_ids)\n\n # pad shorter sentence\n padded = torch.zeros(len(input_ids), MAX_LEN) \n for i, p in enumerate(input_ids):\n padded[i, :len(p)] = torch.tensor(p)\n \n # create mask\n mask = (padded != 0)\n \n print(\"================= padded input and attention mask ================\")\n print(padded, '\\n', mask)\n\n return padded.long(), mask.long()\n raise ValueError(\"Unsupported content type: {}\".format(request_content_type))\n \n\ndef predict_fn(input_data, model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n model.eval()\n\n input_id, input_mask = input_data\n input_id = input_id.to(device)\n input_mask = input_mask.to(device)\n print(\"============== encoded data =================\")\n print(input_id, input_mask)\n with torch.no_grad():\n y = model(input_id, attention_mask=input_mask)[0]\n print(\"=============== inference result =================\")\n print(y)\n return y\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Data and model checkpoints directories\n parser.add_argument(\n \"--num_labels\", type=int, default=2, metavar=\"N\", help=\"input batch size for training (default: 64)\"\n )\n\n parser.add_argument(\n \"--batch-size\", type=int, default=64, metavar=\"N\", help=\"input batch size for training (default: 64)\"\n )\n parser.add_argument(\n \"--test-batch-size\", type=int, default=1000, metavar=\"N\", help=\"input batch size for testing (default: 1000)\"\n )\n parser.add_argument(\"--epochs\", type=int, default=2, metavar=\"N\", help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=0.01, metavar=\"LR\", help=\"learning rate (default: 0.01)\")\n parser.add_argument(\"--momentum\", type=float, default=0.5, metavar=\"M\", help=\"SGD momentum (default: 0.5)\")\n parser.add_argument(\"--seed\", type=int, default=1, metavar=\"S\", help=\"random seed (default: 1)\")\n parser.add_argument(\n \"--log-interval\",\n type=int,\n default=50,\n metavar=\"N\",\n help=\"how many batches to wait before logging training status\",\n )\n parser.add_argument(\n \"--backend\",\n type=str,\n default=None,\n help=\"backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)\",\n )\n\n # Container environment\n parser.add_argument(\"--hosts\", type=list, default=json.loads(os.environ[\"SM_HOSTS\"]))\n parser.add_argument(\"--current-host\", type=str, default=os.environ[\"SM_CURRENT_HOST\"])\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--data-dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAIN\"])\n parser.add_argument(\"--test\", type=str, default=os.environ[\"SM_CHANNEL_TESTING\"])\n parser.add_argument(\"--num-gpus\", type=int, default=os.environ[\"SM_NUM_GPUS\"])\n\n train(parser.parse_args())\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.sum",
"torch.distributed.get_world_size",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.get_rank",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.distributed.init_process_group",
"torch.tensor",
"torch.nn.DataParallel",
"torch.no_grad",
"numpy.argmax",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.utils.data.RandomSampler",
"torch.device"
]
] |
dabreegster/RAMP-UA | [
"04b7473aed441080ee10b6f68eb8b9135dac6879"
] | [
"tests/opencl/test_summary.py"
] | [
"import numpy as np\r\n\r\nfrom microsim.opencl.ramp.summary import Summary\r\nfrom microsim.opencl.ramp.snapshot import Snapshot\r\n\r\n\r\ndef test_summary_update():\r\n npeople = 50 + 34 + 101 + 551\r\n summary = Summary(snapshot=Snapshot.random(nplaces=10, npeople=npeople, nslots=10), max_time=20)\r\n\r\n time = 10\r\n\r\n statuses = np.concatenate((\r\n np.full(50, 0),\r\n np.full(34, 1),\r\n np.full(101, 4),\r\n np.full(551, 6),\r\n ))\r\n np.random.shuffle(statuses)\r\n\r\n summary.update(time, statuses)\r\n\r\n assert summary.total_counts[0][time] == 50\r\n assert summary.total_counts[1][time] == 34\r\n assert summary.total_counts[2][time] == 0\r\n assert summary.total_counts[3][time] == 0\r\n assert summary.total_counts[4][time] == 101\r\n assert summary.total_counts[5][time] == 0\r\n assert summary.total_counts[6][time] == 551\r\n"
] | [
[
"numpy.random.shuffle",
"numpy.full"
]
] |
vcfgv/mars | [
"ef9e2282208798a5a82e9f9a19538ac92bafee8d"
] | [
"mars/dataframe/datasource/tests/test_datasource_execution.py"
] | [
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport time\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom string import printable\n\nimport numpy as np\nimport pandas as pd\nimport pytest\ntry:\n import pyarrow as pa\nexcept ImportError: # pragma: no cover\n pa = None\ntry:\n import fastparquet\nexcept ImportError: # pragma: no cover\n fastparquet = None\ntry:\n import sqlalchemy\nexcept ImportError: # pragma: no cover\n sqlalchemy = None\n\nfrom .... import tensor as mt\nfrom .... import dataframe as md\nfrom ....config import option_context\nfrom ....tests.core import require_cudf\nfrom ....utils import arrow_array_to_objects\nfrom ..dataframe import from_pandas as from_pandas_df\nfrom ..series import from_pandas as from_pandas_series\nfrom ..index import from_pandas as from_pandas_index, from_tileable\nfrom ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables\nfrom ..from_records import from_records\n\n\ndef test_from_pandas_dataframe_execution(setup):\n # test empty DataFrame\n pdf = pd.DataFrame()\n df = from_pandas_df(pdf)\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n pdf = pd.DataFrame(columns=list('ab'))\n df = from_pandas_df(pdf)\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n pdf = pd.DataFrame(np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)])\n df = from_pandas_df(pdf, chunk_size=(13, 21))\n\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n\ndef test_from_pandas_series_execution(setup):\n # test empty Series\n ps = pd.Series(name='a')\n series = from_pandas_series(ps, chunk_size=13)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n series = from_pandas_series(ps)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n ps = pd.Series(np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name='a')\n series = from_pandas_series(ps, chunk_size=13)\n\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n\ndef test_from_pandas_index_execution(setup):\n pd_index = pd.timedelta_range('1 days', periods=10)\n index = from_pandas_index(pd_index, chunk_size=7)\n\n result = index.execute().fetch()\n pd.testing.assert_index_equal(pd_index, result)\n\n\ndef test_index_execution(setup):\n rs = np.random.RandomState(0)\n pdf = pd.DataFrame(rs.rand(20, 10), index=np.arange(20, 0, -1),\n columns=['a' + str(i) for i in range(10)])\n df = from_pandas_df(pdf, chunk_size=13)\n\n # test df.index\n result = df.index.execute().fetch()\n pd.testing.assert_index_equal(result, pdf.index)\n\n result = df.columns.execute().fetch()\n pd.testing.assert_index_equal(result, pdf.columns)\n\n # df has unknown chunk shape on axis 0\n df = df[df.a1 < 0.5]\n\n # test df.index\n result = df.index.execute().fetch()\n pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)\n\n s = pd.Series(pdf['a1'], index=pd.RangeIndex(20))\n series = from_pandas_series(s, chunk_size=13)\n\n # test series.index which has value\n result = series.index.execute().fetch()\n pd.testing.assert_index_equal(result, s.index)\n\n s = pdf['a2']\n series = from_pandas_series(s, chunk_size=13)\n\n # test series.index\n result = series.index.execute().fetch()\n pd.testing.assert_index_equal(result, s.index)\n\n # test tensor\n raw = rs.random(20)\n t = mt.tensor(raw, chunk_size=13)\n\n result = from_tileable(t).execute().fetch()\n pd.testing.assert_index_equal(result, pd.Index(raw))\n\n\ndef test_initializer_execution(setup):\n arr = np.random.rand(20, 30)\n\n pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])\n df = md.DataFrame(pdf, chunk_size=(15, 10))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n df = md.DataFrame(arr, index=md.date_range('2020-1-1', periods=20))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(\n result, pd.DataFrame(arr, index=pd.date_range('2020-1-1', periods=20)))\n\n df = md.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n index=md.date_range('1/1/2010', periods=6, freq='D'))\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(\n result, pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n index=pd.date_range('1/1/2010', periods=6, freq='D')))\n\n s = np.random.rand(20)\n\n ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name='a')\n series = md.Series(ps, chunk_size=7)\n result = series.execute().fetch()\n pd.testing.assert_series_equal(ps, result)\n\n series = md.Series(s, index=md.date_range('2020-1-1', periods=20))\n result = series.execute().fetch()\n pd.testing.assert_series_equal(\n result, pd.Series(s, index=pd.date_range('2020-1-1', periods=20)))\n\n pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])\n index = md.Index(md.Index(pi))\n result = index.execute().fetch()\n pd.testing.assert_index_equal(pi, result)\n\n\ndef test_index_only(setup):\n df = md.DataFrame(index=[1, 2, 3])\n pd.testing.assert_frame_equal(df.execute().fetch(),\n pd.DataFrame(index=[1, 2, 3]))\n\n s = md.Series(index=[1, 2, 3])\n pd.testing.assert_series_equal(s.execute().fetch(),\n pd.Series(index=[1, 2, 3]))\n\n df = md.DataFrame(index=md.Index([1, 2, 3]))\n pd.testing.assert_frame_equal(df.execute().fetch(),\n pd.DataFrame(index=[1, 2, 3]))\n\n s = md.Series(index=md.Index([1, 2, 3]), dtype=object)\n pd.testing.assert_series_equal(s.execute().fetch(),\n pd.Series(index=[1, 2, 3], dtype=object))\n\n\ndef test_series_from_tensor(setup):\n data = np.random.rand(10)\n series = md.Series(mt.tensor(data), name='a')\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a'))\n\n series = md.Series(mt.tensor(data, chunk_size=3))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data))\n\n series = md.Series(mt.ones((10,), chunk_size=4))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(np.ones(10,)))\n\n index_data = np.random.rand(10)\n series = md.Series(mt.tensor(data, chunk_size=3), name='a',\n index=mt.tensor(index_data, chunk_size=4))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a', index=index_data))\n\n series = md.Series(mt.tensor(data, chunk_size=3), name='a',\n index=md.date_range('2020-1-1', periods=10))\n pd.testing.assert_series_equal(series.execute().fetch(),\n pd.Series(data, name='a', index=pd.date_range('2020-1-1', periods=10)))\n\n\ndef test_from_tensor_execution(setup):\n tensor = mt.random.rand(10, 10, chunk_size=5)\n df = dataframe_from_tensor(tensor)\n tensor_res = tensor.execute().fetch()\n pdf_expected = pd.DataFrame(tensor_res)\n df_result = df.execute().fetch()\n pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))\n pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))\n pd.testing.assert_frame_equal(df_result, pdf_expected)\n\n # test from tensor with unknown shape\n tensor2 = tensor[tensor[:, 0] < 0.9]\n df = dataframe_from_tensor(tensor2)\n df_result = df.execute().fetch()\n tensor_res = tensor2.execute().fetch()\n pdf_expected = pd.DataFrame(tensor_res)\n pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)\n\n # test converted with specified index_value and columns\n tensor2 = mt.random.rand(2, 2, chunk_size=1)\n df2 = dataframe_from_tensor(tensor2, index=pd.Index(['a', 'b']), columns=pd.Index([3, 4]))\n df_result = df2.execute().fetch()\n pd.testing.assert_index_equal(df_result.index, pd.Index(['a', 'b']))\n pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))\n\n # test converted from 1-d tensor\n tensor3 = mt.array([1, 2, 3])\n df3 = dataframe_from_tensor(tensor3)\n result3 = df3.execute().fetch()\n pdf_expected = pd.DataFrame(np.array([1, 2, 3]))\n pd.testing.assert_frame_equal(pdf_expected, result3)\n\n # test converted from identical chunks\n tensor4 = mt.ones((10, 10), chunk_size=3)\n df4 = dataframe_from_tensor(tensor4)\n result4 = df4.execute().fetch()\n pdf_expected = pd.DataFrame(tensor4.execute().fetch())\n pd.testing.assert_frame_equal(pdf_expected, result4)\n\n # from tensor with given index\n tensor5 = mt.ones((10, 10), chunk_size=3)\n df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))\n result5 = df5.execute().fetch()\n pdf_expected = pd.DataFrame(tensor5.execute().fetch(),\n index=np.arange(0, 20, 2))\n pd.testing.assert_frame_equal(pdf_expected, result5)\n\n # from tensor with given index that is a tensor\n raw7 = np.random.rand(10, 10)\n tensor7 = mt.tensor(raw7, chunk_size=3)\n index_raw7 = np.random.rand(10)\n index7 = mt.tensor(index_raw7, chunk_size=4)\n df7 = dataframe_from_tensor(tensor7, index=index7)\n result7 = df7.execute().fetch()\n pdf_expected = pd.DataFrame(raw7, index=index_raw7)\n pd.testing.assert_frame_equal(pdf_expected, result7)\n\n # from tensor with given index is a md.Index\n raw10 = np.random.rand(10, 10)\n tensor10 = mt.tensor(raw10, chunk_size=3)\n index10 = md.date_range('2020-1-1', periods=10, chunk_size=3)\n df10 = dataframe_from_tensor(tensor10, index=index10)\n result10 = df10.execute().fetch()\n pdf_expected = pd.DataFrame(raw10, index=pd.date_range('2020-1-1', periods=10))\n pd.testing.assert_frame_equal(pdf_expected, result10)\n\n # from tensor with given columns\n tensor6 = mt.ones((10, 10), chunk_size=3)\n df6 = dataframe_from_tensor(tensor6, columns=list('abcdefghij'))\n result6 = df6.execute().fetch()\n pdf_expected = pd.DataFrame(tensor6.execute().fetch(),\n columns=list('abcdefghij'))\n pd.testing.assert_frame_equal(pdf_expected, result6)\n\n # from 1d tensors\n raws8 = [('a', np.random.rand(8)), ('b', np.random.randint(10, size=8)),\n ('c', [''.join(np.random.choice(list(printable), size=6)) for _ in range(8)])]\n tensors8 = OrderedDict((r[0], mt.tensor(r[1], chunk_size=3)) for r in raws8)\n raws8.append(('d', 1))\n raws8.append(('e', pd.date_range('2020-1-1', periods=8)))\n tensors8['d'] = 1\n tensors8['e'] = raws8[-1][1]\n df8 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8])\n result = df8.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8))\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n # from 1d tensors and specify index with a tensor\n index_raw9 = np.random.rand(8)\n index9 = mt.tensor(index_raw9, chunk_size=4)\n df9 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8],\n index=index9)\n result = df9.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8), index=index_raw9)\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n # from 1d tensors and specify index\n df11 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8],\n index=md.date_range('2020-1-1', periods=8))\n result = df11.execute().fetch()\n pdf_expected = pd.DataFrame(OrderedDict(raws8),\n index=pd.date_range('2020-1-1', periods=8))\n pd.testing.assert_frame_equal(result, pdf_expected)\n\n\ndef test_from_records_execution(setup):\n dtype = np.dtype([('x', 'int'), ('y', 'double'), ('z', '<U16')])\n\n ndarr = np.ones((10,), dtype=dtype)\n pdf_expected = pd.DataFrame.from_records(ndarr, index=pd.RangeIndex(10))\n\n # from structured array of mars\n tensor = mt.ones((10,), dtype=dtype, chunk_size=3)\n df1 = from_records(tensor)\n df1_result = df1.execute().fetch()\n pd.testing.assert_frame_equal(df1_result, pdf_expected)\n\n # from structured array of numpy\n df2 = from_records(ndarr)\n df2_result = df2.execute().fetch()\n pd.testing.assert_frame_equal(df2_result, pdf_expected)\n\n\ndef test_read_csv_execution(setup):\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64), columns=['a', 'b', 'c'])\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n r = md.read_csv(file_path, index_col=0)\n mdf = r.execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n # size_res = self.executor.execute_dataframe(r, mock=True)\n # assert sum(s[0] for s in size_res) == os.stat(file_path).st_size\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n mdf = md.read_csv(file_path, index_col=0, nrows=1).execute().fetch()\n pd.testing.assert_frame_equal(df[:1], mdf)\n\n # test names and usecols\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),\n columns=['a', 'b', 'c'])\n df.to_csv(file_path, index=False)\n\n mdf = md.read_csv(file_path, usecols=['c', 'b']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, usecols=['c', 'b']), mdf)\n\n mdf = md.read_csv(file_path, names=['a', 'b', 'c'],\n usecols=['c', 'b']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, names=['a', 'b', 'c'], usecols=['c', 'b']), mdf)\n\n mdf = md.read_csv(file_path, names=['a', 'b', 'c'],\n usecols=['a', 'c']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, names=['a', 'b', 'c'], usecols=['a', 'c']), mdf)\n\n mdf = md.read_csv(file_path, usecols=['a', 'c']).execute().fetch()\n pd.testing.assert_frame_equal(\n pd.read_csv(file_path, usecols=['a', 'c']), mdf)\n\n # test sep\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])\n df.to_csv(file_path, sep=';')\n\n pdf = pd.read_csv(file_path, sep=';', index_col=0)\n mdf = md.read_csv(file_path, sep=';', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, sep=';', index_col=0, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test missing value\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({'c1': [np.nan, 'a', 'b', 'c'], 'c2': [1, 2, 3, np.nan],\n 'c3': [np.nan, np.nan, 3.4, 2.2]})\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=12).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n index = pd.date_range(start='1/1/2018', periods=100)\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=100).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test nan\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({\n 'col1': np.random.rand(100, ),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n })\n df.iloc[20:, :] = pd.NA\n df.to_csv(file_path)\n\n pdf = pd.read_csv(file_path, index_col=0)\n mdf = md.read_csv(file_path, index_col=0, head_lines=10, chunk_bytes=200)\n result = mdf.execute().fetch()\n pd.testing.assert_frame_equal(pdf, result)\n\n # dtypes is inferred as expected\n pd.testing.assert_series_equal(mdf.dtypes, pd.Series(['float64', 'object', 'int64'],\n index=df.columns))\n\n # test compression\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.gzip')\n\n index = pd.date_range(start='1/1/2018', periods=100)\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df.to_csv(file_path, compression='gzip')\n\n pdf = pd.read_csv(file_path, compression='gzip', index_col=0)\n mdf = md.read_csv(file_path, compression='gzip', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, compression='gzip', index_col=0,\n chunk_bytes='1k').execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n # test multiple files\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(tempdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n mdf = md.read_csv(file_paths, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf)\n\n mdf2 = md.read_csv(file_paths, index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2)\n\n # test wildcards in path\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(tempdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n # As we can not guarantee the order in which these files are processed,\n # the result may not keep the original order.\n mdf = md.read_csv(f'{tempdir}/*.csv', index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf.sort_index())\n\n mdf2 = md.read_csv(f'{tempdir}/*.csv', index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2.sort_index())\n\n # test read directory\n with tempfile.TemporaryDirectory() as tempdir:\n testdir = os.path.join(tempdir, 'test_dir')\n os.makedirs(testdir, exist_ok=True)\n\n df = pd.DataFrame(np.random.rand(300, 3), columns=['a', 'b', 'c'])\n\n file_paths = [os.path.join(testdir, f'test{i}.csv') for i in range(3)]\n df[:100].to_csv(file_paths[0])\n df[100:200].to_csv(file_paths[1])\n df[200:].to_csv(file_paths[2])\n\n # As we can not guarantee the order in which these files are processed,\n # the result may not keep the original order.\n mdf = md.read_csv(testdir, index_col=0).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf.sort_index())\n\n mdf2 = md.read_csv(testdir, index_col=0, chunk_bytes=50).execute().fetch()\n pd.testing.assert_frame_equal(df, mdf2.sort_index())\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_csv_use_arrow_dtype(setup):\n rs = np.random.RandomState(0)\n df = pd.DataFrame({\n 'col1': rs.rand(100),\n 'col2': rs.choice(['a' * 2, 'b' * 3, 'c' * 4], (100,)),\n 'col3': np.arange(100)\n })\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path, use_arrow_dtype=True)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n with tempfile.TemporaryDirectory() as tempdir:\n with option_context({'dataframe.use_arrow_dtype': True}):\n file_path = os.path.join(tempdir, 'test.csv')\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n # test compression\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.gzip')\n df.to_csv(file_path, compression='gzip', index=False)\n\n pdf = pd.read_csv(file_path, compression='gzip')\n mdf = md.read_csv(file_path, compression='gzip', use_arrow_dtype=True)\n result = mdf.execute().fetch()\n assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)\n\n\n@require_cudf\ndef test_read_csv_gpu_execution(setup_gpu):\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n })\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path, gpu=True).execute().fetch()\n pd.testing.assert_frame_equal(pdf.reset_index(drop=True), mdf.to_pandas().reset_index(drop=True))\n\n mdf2 = md.read_csv(file_path, gpu=True, chunk_bytes=200).execute().fetch()\n pd.testing.assert_frame_equal(pdf.reset_index(drop=True), mdf2.to_pandas().reset_index(drop=True))\n\n\ndef test_read_csv_without_index(setup):\n # test csv file without storing index\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])\n df.to_csv(file_path, index=False)\n\n pdf = pd.read_csv(file_path)\n mdf = md.read_csv(file_path).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf)\n\n mdf2 = md.read_csv(file_path, chunk_bytes=10).execute().fetch()\n pd.testing.assert_frame_equal(pdf, mdf2)\n\n file_path2 = os.path.join(tempdir, 'test.csv')\n df = pd.DataFrame(np.random.RandomState(0).rand(100, 10),\n columns=[f'col{i}' for i in range(10)])\n df.to_csv(file_path2, index=False)\n\n mdf3 = md.read_csv(file_path2, chunk_bytes=os.stat(file_path2).st_size / 5)\n result = mdf3.execute().fetch()\n expected = pd.read_csv(file_path2)\n pd.testing.assert_frame_equal(result, expected)\n\n # test incremental_index = False\n mdf4 = md.read_csv(file_path2, chunk_bytes=os.stat(file_path2).st_size / 5,\n incremental_index=False)\n result = mdf4.execute().fetch()\n assert not result.index.is_monotonic_increasing\n expected = pd.read_csv(file_path2)\n pd.testing.assert_frame_equal(result.reset_index(drop=True), expected)\n\n\[email protected](sqlalchemy is None, reason='sqlalchemy not installed')\ndef test_read_sql_execution(setup):\n import sqlalchemy as sa\n\n rs = np.random.RandomState(0)\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': rs.rand(10),\n 'd': [datetime.fromtimestamp(time.time() + 3600 * (i - 5))\n for i in range(10)]})\n\n with tempfile.TemporaryDirectory() as d:\n table_name = 'test'\n table_name2 = 'test2'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n test_df.to_sql(table_name, uri, index=False)\n\n # test read with table name\n r = md.read_sql_table('test', uri, chunk_size=4)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n\n # test read with sql string and offset method\n r = md.read_sql_query('select * from test where c > 0.5', uri,\n parse_dates=['d'], chunk_size=4)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.c > 0.5].reset_index(drop=True))\n\n # test read with sql string and partition method with integer cols\n r = md.read_sql('select * from test where b > \\'s5\\'', uri,\n parse_dates=['d'], partition_col='a', num_partitions=3)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].reset_index(drop=True))\n\n # test read with sql string and partition method with datetime cols\n r = md.read_sql_query('select * from test where b > \\'s5\\'', uri,\n parse_dates={'d': '%Y-%m-%d %H:%M:%S'},\n partition_col='d', num_partitions=3)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].reset_index(drop=True))\n\n # test read with sql string and partition method with datetime cols\n r = md.read_sql_query('select * from test where b > \\'s5\\'', uri,\n parse_dates=['d'], partition_col='d', num_partitions=3,\n index_col='d')\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df[test_df.b > 's5'].set_index('d'))\n\n # test SQL that return no result\n r = md.read_sql_query('select * from test where a > 1000', uri)\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, pd.DataFrame(columns=test_df.columns))\n\n engine = sa.create_engine(uri)\n m = sa.MetaData()\n try:\n # test index_col and columns\n r = md.read_sql_table('test', engine.connect(), chunk_size=4,\n index_col='a', columns=['b', 'd'])\n result = r.execute().fetch()\n expected = test_df.copy(deep=True)\n expected.set_index('a', inplace=True)\n del expected['c']\n pd.testing.assert_frame_equal(result, expected)\n\n # do not specify chunk_size\n r = md.read_sql_table('test', engine.connect(),\n index_col='a', columns=['b', 'd'])\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, expected)\n\n table = sa.Table(table_name, m, autoload=True,\n autoload_with=engine)\n r = md.read_sql_table(table, engine, chunk_size=4,\n index_col=[table.columns['a'], table.columns['b']],\n columns=[table.columns['c'], 'd'])\n result = r.execute().fetch()\n expected = test_df.copy(deep=True)\n expected.set_index(['a', 'b'], inplace=True)\n pd.testing.assert_frame_equal(result, expected)\n\n # test table with primary key\n sa.Table(table_name2, m,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('a', sa.Integer),\n sa.Column('b', sa.String),\n sa.Column('c', sa.Float),\n sa.Column('d', sa.DateTime))\n m.create_all(engine)\n test_df = test_df.copy(deep=True)\n test_df.index.name = 'id'\n test_df.to_sql(table_name2, uri, if_exists='append')\n\n r = md.read_sql_table(table_name2, engine, chunk_size=4, index_col='id')\n result = r.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n finally:\n engine.dispose()\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_sql_use_arrow_dtype(setup):\n rs = np.random.RandomState(0)\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': rs.rand(10),\n 'd': [datetime.fromtimestamp(time.time() + 3600 * (i - 5))\n for i in range(10)]})\n\n with tempfile.TemporaryDirectory() as d:\n table_name = 'test'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n test_df.to_sql(table_name, uri, index=False)\n\n r = md.read_sql_table('test', uri, chunk_size=4, use_arrow_dtype=True)\n result = r.execute().fetch()\n assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), test_df)\n\n # test read with sql string and offset method\n r = md.read_sql_query('select * from test where c > 0.5', uri,\n parse_dates=['d'], chunk_size=4,\n use_arrow_dtype=True)\n result = r.execute().fetch()\n assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result),\n test_df[test_df.c > 0.5].reset_index(drop=True))\n\n\ndef test_date_range_execution(setup):\n for closed in [None, 'left', 'right']:\n # start, periods, freq\n dr = md.date_range('2020-1-1', periods=10, chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # end, periods, freq\n dr = md.date_range(end='2020-1-10', periods=10, chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range(end='2020-1-10', periods=10, closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end, freq\n dr = md.date_range('2020-1-1', '2020-1-10', chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end and periods\n dr = md.date_range('2020-1-1', '2020-1-10', periods=19,\n chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', periods=19,\n closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # start, end and freq\n dr = md.date_range('2020-1-1', '2020-1-10', freq='12H',\n chunk_size=3, closed=closed)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-10', freq='12H',\n closed=closed)\n pd.testing.assert_index_equal(result, expected)\n\n # test timezone\n dr = md.date_range('2020-1-1', periods=10, tz='Asia/Shanghai', chunk_size=7)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, tz='Asia/Shanghai')\n pd.testing.assert_index_equal(result, expected)\n\n # test periods=0\n dr = md.date_range('2020-1-1', periods=0)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=0)\n pd.testing.assert_index_equal(result, expected)\n\n # test start == end\n dr = md.date_range('2020-1-1', '2020-1-1', periods=1)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', '2020-1-1', periods=1)\n pd.testing.assert_index_equal(result, expected)\n\n # test normalize=True\n dr = md.date_range('2020-1-1', periods=10, normalize=True, chunk_size=4)\n\n result = dr.execute().fetch()\n expected = pd.date_range('2020-1-1', periods=10, normalize=True)\n pd.testing.assert_index_equal(result, expected)\n\n # test freq\n dr = md.date_range(start='1/1/2018', periods=5, freq='M', chunk_size=3)\n\n result = dr.execute().fetch()\n expected = pd.date_range(start='1/1/2018', periods=5, freq='M')\n pd.testing.assert_index_equal(result, expected)\n\n\[email protected](pa is None, reason='pyarrow not installed')\ndef test_read_parquet_arrow(setup):\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': np.random.rand(10), })\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n test_df.to_parquet(file_path)\n\n df = md.read_parquet(file_path)\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n # size_res = self.executor.execute_dataframe(df, mock=True)\n # assert sum(s[0] for s in size_res) > test_df.memory_usage(deep=True).sum()\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.parquet')\n test_df.to_parquet(file_path, row_group_size=3)\n\n df = md.read_parquet(file_path, groups_as_chunks=True, columns=['a', 'b'])\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result.reset_index(drop=True), test_df[['a', 'b']])\n\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.parquet')\n test_df.to_parquet(file_path, row_group_size=5)\n\n df = md.read_parquet(file_path, groups_as_chunks=True,\n use_arrow_dtype=True,\n incremental_index=True)\n result = df.execute().fetch()\n assert isinstance(df.dtypes.iloc[1], md.ArrowStringDtype)\n assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)\n pd.testing.assert_frame_equal(arrow_array_to_objects(result), test_df)\n\n # test wildcards in path\n with tempfile.TemporaryDirectory() as tempdir:\n df = pd.DataFrame({'a': np.arange(300).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(300)],\n 'c': np.random.rand(300), })\n\n file_paths = [os.path.join(tempdir, f'test{i}.parquet') for i in range(3)]\n df[:100].to_parquet(file_paths[0], row_group_size=50)\n df[100:200].to_parquet(file_paths[1], row_group_size=30)\n df[200:].to_parquet(file_paths[2])\n\n mdf = md.read_parquet(f'{tempdir}/*.parquet')\n r = mdf.execute().fetch()\n pd.testing.assert_frame_equal(df, r.sort_values('a').reset_index(drop=True))\n\n mdf = md.read_parquet(f'{tempdir}/*.parquet', groups_as_chunks=True)\n r = mdf.execute().fetch()\n pd.testing.assert_frame_equal(df, r.sort_values('a').reset_index(drop=True))\n\n\[email protected](fastparquet is None, reason='fastparquet not installed')\ndef test_read_parquet_fast_parquet(setup):\n test_df = pd.DataFrame({'a': np.arange(10).astype(np.int64, copy=False),\n 'b': [f's{i}' for i in range(10)],\n 'c': np.random.rand(10), })\n\n # test fastparquet engine\n with tempfile.TemporaryDirectory() as tempdir:\n file_path = os.path.join(tempdir, 'test.csv')\n test_df.to_parquet(file_path, compression=None)\n\n df = md.read_parquet(file_path, engine='fastparquet')\n result = df.execute().fetch()\n pd.testing.assert_frame_equal(result, test_df)\n # size_res = self.executor.execute_dataframe(df, mock=True)\n # assert sum(s[0] for s in size_res) > test_df.memory_usage(deep=True).sum()\n"
] | [
[
"pandas.timedelta_range",
"numpy.ones",
"pandas.Series",
"numpy.dtype",
"numpy.random.RandomState",
"pandas.RangeIndex",
"numpy.random.choice",
"numpy.random.rand",
"pandas.testing.assert_frame_equal",
"pandas.date_range",
"pandas.read_csv",
"numpy.arange",
"pandas.testing.assert_series_equal",
"pandas.Index",
"pandas.DataFrame",
"pandas.IntervalIndex.from_tuples",
"pandas.testing.assert_index_equal",
"numpy.array",
"numpy.random.randint"
]
] |
less-lab-uva/CS4501-Website | [
"7583e2d800c4450192ea5c22e8e815f6d2ab7edb"
] | [
"labs/images/lab5/train_model.py"
] | [
"# Thanks: https://machinelearningmastery.com/how-to-develop-a-cnn-from-scratch-for-fashion-mnist-clothing-classification/\n\n# model with double the filters for the fashion mnist dataset\nimport cv2\nimport glob\nimport argparse\nimport numpy as np\n\nfrom numpy import mean\nfrom numpy import std\nfrom numpy import argmax\nfrom matplotlib import pyplot\nfrom sklearn.model_selection import KFold\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.layers import Conv2D, Dropout, MaxPooling2D, Dense, Flatten\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=50)\nparser.add_argument('--batch_size', type=int, default=64)\nparser.add_argument('--h', type=int, default=48)\nparser.add_argument('--w', type=int, default=48)\nargs = parser.parse_args()\n\n# define dnn model (simple)\ndef define_model(number_classes):\n model = Sequential()\n # model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='he_uniform', input_shape=(args.h, args.w, 1)))\n # model.add(MaxPooling2D((2, 2)))\n model.add(Flatten(input_shape=(args.h, args.w, 1)))\n model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))\n # model.add(Dropout(0.2))\n model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))\n # model.add(Dropout(0.2))\n model.add(Dense(number_classes, activation='softmax'))\n opt = Adam(lr=0.0001)\n # compile model\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n# get the classes\ndef get_classes(dataset):\n # Get the class names from the folder names\n classes = glob.glob(dataset)\n classes.sort()\n for i in range(len(classes)):\n classes[i] = classes[i][:-1]\n pos = classes[i].rfind('/')\n classes[i] = classes[i][pos+1:]\n return classes\n\n# load and prepare the image\ndef load_image(filename):\n # load the image\n img = load_img(filename, grayscale=True, target_size=(args.h, args.w))\n # convert to array\n img = img_to_array(img)\n # reshape into a single sample with 1 channel\n img = img.reshape(1, args.h, args.w, 1)\n # prepare pixel data\n img = img.astype('float32')\n img = img / 255.0\n return img\n\n# convert a folder to an array\ndef folder_to_array(file_names, classes):\n x = []\n y = []\n for f in file_names:\n # Create data\n image = load_image(f)\n x.append(image)\n # Create label\n label = []\n # Get the subfolder\n folder_name = f\n pos = folder_name.rfind('/')\n folder_name = folder_name[:pos]\n pos = folder_name.rfind('/')\n folder_name = folder_name[pos+1:]\n # Check if the name is in the subfolder\n for c in classes:\n if c in folder_name:\n label.append(1)\n else:\n label.append(0)\n y.append(label)\n\n x = np.array(x, dtype='float64')\n y = np.array(y, dtype='int64')\n\n return x, y\n\n# load the dataset from the folders\ndef load_dataset():\n\n # Get the classes\n classes = get_classes(\"./training_data/*/\")\n print(\"Classes: \" + str(classes))\n\n # Create the training data\n training_files = glob.glob (\"./training_data/*/*.jp*\") # your image path\n trainX, trainY = folder_to_array(training_files, classes)\n\n # Create the testing data\n testing_files = glob.glob (\"./testing_data/*/*.jp*\") # your image path\n testX, testY = folder_to_array(testing_files, classes)\n\n # Shuffle the data\n idx = np.random.permutation(len(trainX))\n trainX, trainY = trainX[idx], trainY[idx]\n\n trainX = trainX.reshape((trainX.shape[0], args.h, args.w, 1))\n testX = testX.reshape((testX.shape[0], args.h, args.w, 1))\n\n print(\"Training data shape: \" + str(trainX.shape))\n print(\"Training label shape: \" + str(trainY.shape))\n\n print(\"Test data shape: \" + str(testX.shape))\n print(\"Test label shape: \" + str(testY.shape))\n\n\n return trainX, trainY, testX, testY\n\n# plot diagnostic learning curves\ndef summarize_diagnostics(history):\n # plot loss\n pyplot.subplot(111)\n pyplot.title('Classification Accuracy')\n pyplot.plot(history.history['acc'], color='blue', label='training accuracy')\n pyplot.plot(history.history['val_acc'], color='orange', label='validation accuracy')\n pyplot.legend()\n pyplot.show()\n\n# summarize model performance\ndef summarize_performance(scores):\n # print summary\n print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))\n # box and whisker plots of results\n pyplot.boxplot(scores)\n pyplot.show()\n\n# run the training and save a model\ndef run_training():\n # load dataset\n trainX, trainY, testX, testY = load_dataset()\n # define model\n model = define_model(number_classes=len(testY[0]))\n # Define early stopping\n callback = EarlyStopping(monitor=\"val_acc\", patience=250)\n # fit model\n history = model.fit(trainX, trainY, epochs=args.epochs, batch_size=args.batch_size, verbose=1, validation_split=0.1, shuffle=True, callbacks=[callback])\n # save model\n print(model.summary())\n model.save('marine_model.h5')\n # Display the training data\n summarize_diagnostics(history)\n\n# run for evaluating a model\ndef run_testing():\n # load dataset\n trainX, trainY, testX, testY = load_dataset()\n # load model\n model = load_model('marine_model.h5')\n # evaluate model on test dataset\n _, acc = model.evaluate(testX, testY, verbose=1)\n print('Test Accuracy: ' + str(acc * 100.0))\n\n# load an image and predict the class\ndef run_single_image():\n classes = get_classes(\"./training_data/*/\")\n # load model\n model = load_model('marine_model.h5')\n # For all images in single_prediction\n sample_images = glob.glob(\"./testing_data/*.jp*\")\n for img_name in sample_images:\n # Load the image\n image = load_image(img_name)\n # predict the class\n prediction = model.predict(image)\n result = argmax(prediction, axis=-1)\n print('Single image class (' + img_name + '): ' + str(classes[result[0]]))\n\n# Running the code\nrun_training()\nrun_testing()\nrun_single_image()\n\n\n"
] | [
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.legend",
"numpy.std",
"numpy.argmax",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.mean"
]
] |
TescaF/point_cloud_io | [
"a5848d48f341b88b43f6b28b88d8b048eeefcf8a"
] | [
"src/pub_pose.py"
] | [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion\nimport numpy as np\nimport math\n\ndef publish():\n pub = rospy.Publisher('pose_truth', PoseStamped, queue_size=10)\n rospy.init_node('talker', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n #pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]\n # Sciossors_01_28 pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01]\n #Shears_02_01 pt = [0.189,-0.015,0.4,-0.4,-0.6,-0.01]\n pt = [0.188,-0.015,0.4,-0.45,-0.6,-0.01]\n # Scissors_08_01 pt = [0.2,-0.012,0.4,0,-1,0]\n\n ests = [['scissors_01_00000027', [0.024235617160797116,-0.011359463453292846,0.019534289836883545]], \n['scissors_01_00000060', [0.0011834951639175398,-0.013148486614227295,-0.005846852660179138]], \n['scissors_01_00000003', [0.024251672744750975,-0.011589790105819703,0.0003066921234130859]], \n['shears_01_00000009', [-0.009251792550086976,-0.017923964738845825,0.010005302429199218]], \n['shears_01_00000033', [-0.027354883074760434,-0.012586298942565919,0.031511585712432864]], \n['shears_01_00000090', [-0.03358910477161407,-0.013879684925079346,-0.014482853412628173]]] \n pt = ests[0][1] + [0,0,1]\n #pt[2] += 0.05\n\n pos = pose_from_vec(pt)\n pose = PoseStamped()\n pose.pose = pos\n pose.header.frame_id = \"base_link\"\n\n while not rospy.is_shutdown():\n pub.publish(pose)\n rate.sleep()\n\ndef pose_from_vec(waypoint):\n pose = Pose()\n pose.position.x = waypoint[0]\n pose.position.y = waypoint[1]\n pose.position.z = waypoint[2] \n\n u = [1,0,0]\n norm = np.linalg.norm(np.array(waypoint[3:]))\n v = np.array(waypoint[3:])/norm \n if (np.array_equal(u, v)):\n pose.orientation.w = 1\n pose.orientation.x = 0\n pose.orientation.y = 0\n pose.orientation.z = 0\n elif (np.array_equal(u, np.negative(v))):\n pose.orientation.w = 0\n pose.orientation.x = 0\n pose.orientation.y = 0\n pose.orientation.z = 1\n else:\n half = [u[0]+v[0], u[1]+v[1], u[2]+v[2]]\n pose.orientation.w = np.dot(u, half)\n temp = np.cross(u, half)\n pose.orientation.x = temp[0]\n pose.orientation.y = temp[1]\n pose.orientation.z = temp[2]\n norm = math.sqrt(pose.orientation.x*pose.orientation.x + pose.orientation.y*pose.orientation.y + \n pose.orientation.z*pose.orientation.z + pose.orientation.w*pose.orientation.w)\n if norm == 0:\n norm = 1\n pose.orientation.x /= norm\n pose.orientation.y /= norm\n pose.orientation.z /= norm\n pose.orientation.w /= norm\n return pose\n\nif __name__ == '__main__':\n try:\n publish()\n except rospy.ROSInterruptException:\n pass\n"
] | [
[
"numpy.cross",
"numpy.array_equal",
"numpy.negative",
"numpy.array",
"numpy.dot"
]
] |
d3netxer/peartree | [
"577b077c169c7f102d5947b5f9f273fc965eb41f"
] | [
"peartree/paths.py"
] | [
"from typing import Any, Dict, List\n\nimport networkx as nx\nimport numpy as np\nimport partridge as ptg\n\nfrom .graph import (generate_empty_md_graph, generate_summary_graph_elements,\n make_synthetic_system_network, populate_graph)\nfrom .synthetic import SyntheticTransitNetwork\nfrom .toolkit import generate_random_name\nfrom .utilities import generate_nodes_gdf_from_graph, log\n\nFALLBACK_STOP_COST_DEFAULT = (30 * 60) # 30 minutes, converted to seconds\n\n\nclass InvalidGTFS(Exception):\n # Let's have a custom exception for when we read in GTFS files\n pass\n\n\nclass InvalidTimeBracket(Exception):\n pass\n\n\ndef _calculate_means_default(\n target_time_start: float,\n target_time_end: float,\n arrival_times: List) -> float:\n # This is the default method that is provided to the load feed operation\n # and applied to the observed arrival times at a given stop. From this\n # array of arrival times, the average delay between stops is calcualted\n if len(arrival_times) < 2:\n return np.nan\n\n # Make sure that values are in ascending order (also converts to list)\n arrival_times = np.array(arrival_times)\n arrival_times.sort()\n\n # Recast as numpy array\n first = arrival_times[1:]\n second = arrival_times[:-1]\n wait_seconds = list(first - second)\n\n # Recast arrival times as just a python list\n arrival_times = list(arrival_times)\n\n # Also ensure that both the first and last trip include context\n # framed by the evaluation time period\n from_start_time_to_first_arrival = arrival_times[0] - target_time_start\n wait_seconds.append(from_start_time_to_first_arrival)\n\n from_last_arrival_to_end_time = target_time_end - arrival_times[-1]\n wait_seconds.append(from_last_arrival_to_end_time)\n\n # Note: Can implement something more substantial here that takes into\n # account divergent/erratic performance or intentional timing\n # clusters that are not evenly dispersed in a custom method that\n # would replace this default method\n na = np.array(wait_seconds)\n\n # Prune 0-second delays as these excessively reduce wait-time estimates\n na_no_zeroes = na[na > 0]\n\n # Naive implementation: halve the headway to get average wait time\n average_wait = na_no_zeroes.mean() / 2\n return average_wait\n\n\ndef get_representative_feed(file_loc: str,\n day_type: str='busiest') -> ptg.gtfs.Feed:\n \"\"\"\n Given a filepath, extract a partridge feed object, holding a \\\n representative set of schedule patterns, extracted from the GTFS zip \\\n file, as a set of pandas DataFrames.\n\n Parameters\n ----------\n file_loc : str\n The location (filepath) of the GTFS zip file.\n day_type : str\n The name of the type of representative feed desired. Currently, only \\\n one type is supported, busiest. This extracts the schedule pattern \\\n for a day that has the most service on it. This is determined by the \\\n day with the most trips on it.\n\n Returns\n -------\n feed : ptg.gtfs.Feed\n A partridge feed object, holding related schedule information as \\\n pandas DataFrames for the busiest day in the available schedule.\n \"\"\"\n\n # Extract service ids and then trip counts by those dates\n try:\n service_ids_by_date = ptg.read_service_ids_by_date(file_loc)\n trip_counts_by_date = ptg.read_trip_counts_by_date(file_loc)\n\n # Raised by partridge if no valid dates returned\n except AssertionError:\n # Make sure we have some valid values returned in trips\n raise InvalidGTFS('No valid trip counts by date '\n 'were identified in GTFS.')\n\n # TODO: Due to partridge's assertion error being raised, this\n # check may no longer be needed.\n if not len(trip_counts_by_date.items()):\n # Otherwise, error out\n raise InvalidGTFS('No valid trip counts by date '\n 'were identified in GTFS.')\n\n # At this point, different methods can be implemented to help select how\n # to pick which date/schedule id to use\n if day_type == 'busiest':\n # Choose the service id that has the most trips associated with it\n (selected_date,\n trip_count) = max(trip_counts_by_date.items(), key=lambda p: p[1])\n else:\n raise NotImplementedError('Unsupported day type string supplied.')\n\n log('Selected_date: {}'.format(selected_date))\n log('Number of trips on that date: {}'.format(trip_count))\n\n all_service_ids = '\\n\\t'.join(service_ids_by_date[selected_date])\n log('\\nAll related service IDs: \\n\\t{}'.format(all_service_ids))\n\n sub = service_ids_by_date[selected_date]\n feed_query = {'trips.txt': {'service_id': sub}}\n return ptg.load_feed(file_loc, view=feed_query)\n\n\ndef load_feed_as_graph(feed: ptg.gtfs.Feed,\n start_time: int,\n end_time: int,\n name: str=None,\n existing_graph: nx.MultiDiGraph=None,\n connection_threshold: float=50.0,\n walk_speed_kmph: float=4.5,\n stop_cost_method: Any=_calculate_means_default,\n fallback_stop_cost: bool=FALLBACK_STOP_COST_DEFAULT,\n interpolate_times: bool=True,\n impute_walk_transfers: bool=False,\n use_multiprocessing: bool=False,\n add_trips_per_edge: bool=False):\n \"\"\"\n Convert a feed object into a NetworkX Graph, or connect to an existing \\\n NetworkX graph if one is supplied.\n\n Parameters\n ----------\n feed : ptg.gtfs.Feed\n A feed object from Partridge holding a representation of the \\\n desired schedule ids and their releated scheudule data from an \\\n operator GTFS\n start_time : int\n Represented in seconds after midnight; indicates the start time \\\n with which to take the subset of the target feed schedule \\\n to be used to measure impedance between stops along \\\n the route, as well as cost (wait time) to board at each stop\n end_time : int\n Represented in seconds after midnight; indicates the end time \\\n with which to take the subset of the target feed schedule \\\n to be used to measure impedance between stops along \\\n the route, as well as cost (wait time) to board at each stop\n name : str\n Name of the operator, which is used to create a unique ID for each \\\n of the stops, routes, etc. in the feed being supplied\n existing_graph : networkx.Graph\n An existing graph containing other operator or schedule data\n connection_threshold : float\n Treshold by which to create a connection with an existing stop \\\n in the existing_graph graph, measured in meters\n walk_speed_kmph : float\n Walk speed in km/h, that is used to determine the cost in time when \\\n walking between two nodes that get an internal connection created\n stop_cost_method : Any\n A method is passed in here that handles an arrival time numpy array \\\n and, from that array, calcualtes a representative average wait time \\\n value, in seconds, for that stop.\n fallback_stop_cost: bool\n Cost in seconds to board a line at a stop if no other data is able \\\n to be calculated from schedule data for that stop to determine \\\n what wait time is. Example of this situation would be when \\\n there is only one scheduled stop time found for the stop id.\n interpolate_times : bool\n A boolean flag to indicate whether or not to infill intermediary \\\n stops that do not have all intermediary stop arrival times specified \\\n in the GTFS schedule.\n impute_walk_transfers : bool\n A flag to indicate whether to add in walk connections between nodes \\\n that are close enough, as measured using connection_trheshold\n use_multiprocessing: bool\n A flag to indicate whether or not to leverage multiprocessing where \\\n available to attempt to speed up trivially parallelizable operations\n\n Returns\n -------\n G : nx.MultiDiGraph\n networkx.Graph, the loaded, combined representation of the schedule \\\n data from the feed subset by the time parameters provided\n \"\"\"\n # Generate a random name for name if it is None\n if not name:\n name = generate_random_name()\n\n # Some sanity checking, to make sure only positive values are provided\n if (start_time < 0) or (end_time < 0):\n raise InvalidTimeBracket('Invalid start or end target times provided.')\n\n if end_time <= start_time:\n raise InvalidTimeBracket('Invalid ordering: Start time '\n 'is greater than end time.')\n\n (summary_edge_costs,\n wait_times_by_stop) = generate_summary_graph_elements(feed,\n start_time,\n end_time,\n fallback_stop_cost,\n interpolate_times,\n stop_cost_method,\n use_multiprocessing)\n\n #print(\"print summary_edge_costs\")\n #print(summary_edge_costs)\n\n # This is a flag used to check if we need to run any additional steps\n # after the feed is returned to ensure that new nodes and edge can connect\n # with existing ones (if they exist/a graph is passed in)\n existing_graph_supplied = bool(existing_graph)\n\n # G is either a new MultiDiGraph or one pass from before\n if existing_graph_supplied:\n # TODO: If passed from before we should run some checks to ensure\n # it is valid as well as set a flag to create join points with\n # other feeds so that they can be linked when the next is added.\n G = existing_graph\n else:\n G = generate_empty_md_graph(name)\n\n return populate_graph(G,\n name,\n feed,\n wait_times_by_stop,\n summary_edge_costs,\n connection_threshold,\n walk_speed_kmph,\n impute_walk_transfers,\n add_trips_per_edge)\n\n\ndef load_synthetic_network_as_graph(\n reference_geojson: Dict,\n name: str=None,\n existing_graph: nx.MultiDiGraph=None,\n connection_threshold: float=50.0,\n walk_speed_kmph: float=4.5,\n impute_walk_transfers: bool=True,\n wait_time_cost_method: Any=lambda x: x / 2) -> nx.MultiDiGraph:\n \"\"\"\n Convert formatted transit FeatureCollection into a directed network graph.\n\n Utilizing a correctly formatted transit FeatureCollection, generate a \\\n directed networ graph (or add to an existing one), based off of features \\\n included in the reference_geojson parameter.\n\n Parameters\n ———————\n reference_geojson : dict\n The TransitJSON; a specifically formatted GeoJSON\n name : str\n The name of the graph\n existing_graph : nx.MultiDiGraph\n An existing, populated transit NetworkX graph generated from peartree\n connection_threshold : float\n Distance in meters within which a nearby transit stops should be \\\n deemed acceptably close for a walk transfer to be also added\n walk_speed_kmph : float\n Speed in kilometers per hour to be used as the reference walk speed \\\n for calculating cost (impedance in time) of walk transfers\n impute_walk_transfers : bool\n A flag to indicate whether or not walk transfers should be calculated\n wait_time_cost_method: Any\n Function that, given a headway float value, produces a wait time value\n\n Returns\n ——\n G : nx.MultiDiGraph\n The muti-directed graph\n \"\"\"\n\n # Generate a random name for name if it is None\n if not name:\n name = generate_random_name()\n\n # This is a flag used to check if we need to run any additional steps\n # after the feed is returned to ensure that new nodes and edge can connect\n # with existing ones (if they exist/a graph is passed in)\n existing_graph_supplied = bool(existing_graph)\n\n # G is either a new MultiDiGraph or one pass from before\n if existing_graph_supplied:\n # TODO: If passed from before we should run some checks to ensure\n # it is valid as well as set a flag to create join points with\n # other feeds so that they can be linked when the next is added.\n G = existing_graph\n existing_graph_nodes = generate_nodes_gdf_from_graph(\n G, to_epsg_crs=2163)\n else:\n G = generate_empty_md_graph(name)\n existing_graph_nodes = None\n\n # First, instantiate whole TransitJSON as a SyntheticTransitNetwork object;\n # will provide necessory validation prior to synthetic network construction\n as_synthetic_network = SyntheticTransitNetwork(\n reference_geojson,\n wait_time_cost_method,\n existing_graph_nodes)\n\n return make_synthetic_system_network(\n G,\n name,\n as_synthetic_network,\n connection_threshold,\n walk_speed_kmph,\n impute_walk_transfers)\n"
] | [
[
"numpy.array"
]
] |
plertvilai/birdCam_jetson | [
"8e74bbc81c289b3e0158edbd471fda0f3ed2b9fb"
] | [
"python/birdVid_ML/JetsonYolo.py"
] | [
"import cv2\nimport numpy as np\nfrom elements.yolo import OBJ_DETECTION\n\nObject_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',\n 'hair drier', 'toothbrush' ]\n\nObject_colors = list(np.random.rand(80,3)*255)\nObject_detector = OBJ_DETECTION('weights/yolov5s.pt', Object_classes)\n\ndef gstreamer_pipeline(\n capture_width=1280,\n capture_height=720,\n display_width=1280,\n display_height=720,\n framerate=60,\n flip_method=0,\n):\n return (\n \"nvarguscamerasrc ! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n capture_width,\n capture_height,\n framerate,\n flip_method,\n display_width,\n display_height,\n )\n )\n\n\n# To flip the image, modify the flip_method parameter (0 and 2 are the most common)\nprint(gstreamer_pipeline(flip_method=0))\n\n# cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)\ncap = cv2.VideoCapture(\"1627775013.mp4\")\nif cap.isOpened():\n window_handle = cv2.namedWindow(\"CSI Camera\", cv2.WINDOW_AUTOSIZE)\n # Window\n while cv2.getWindowProperty(\"CSI Camera\", 0) >= 0:\n ret, frame = cap.read()\n if ret and not(frame is None):\n # detection process\n objs = Object_detector.detect(frame)\n\n # plotting\n for obj in objs:\n # print(obj)\n label = obj['label']\n score = obj['score']\n [(xmin,ymin),(xmax,ymax)] = obj['bbox']\n color = Object_colors[Object_classes.index(label)]\n frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color, 2) \n frame = cv2.putText(frame, f'{label} ({str(score)})', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, color, 1, cv2.LINE_AA)\n else:\n break\n cv2.imshow(\"CSI Camera\", frame)\n keyCode = cv2.waitKey(30)\n if keyCode == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\nelse:\n print(\"Unable to open camera\")\n"
] | [
[
"numpy.random.rand"
]
] |
andyljones/ray | [
"52dfde1cbb7131fd62ebcb00f5a2b22ced7321ad"
] | [
"test/runtest.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport setproctitle\nimport shutil\nimport socket\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom collections import defaultdict, namedtuple, OrderedDict\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.test.cluster_utils\nimport ray.test.test_utils\nfrom ray.utils import _random_string\n\nlogger = logging.getLogger(__name__)\n\n\ndef assert_equal(obj1, obj2):\n module_numpy = (type(obj1).__module__ == np.__name__\n or type(obj2).__module__ == np.__name__)\n if module_numpy:\n empty_shape = ((hasattr(obj1, \"shape\") and obj1.shape == ())\n or (hasattr(obj2, \"shape\") and obj2.shape == ()))\n if empty_shape:\n # This is a special case because currently np.testing.assert_equal\n # fails because we do not properly handle different numerical\n # types.\n assert obj1 == obj2, (\"Objects {} and {} are \"\n \"different.\".format(obj1, obj2))\n else:\n np.testing.assert_equal(obj1, obj2)\n elif hasattr(obj1, \"__dict__\") and hasattr(obj2, \"__dict__\"):\n special_keys = [\"_pytype_\"]\n assert (set(list(obj1.__dict__.keys()) + special_keys) == set(\n list(obj2.__dict__.keys()) + special_keys)), (\"Objects {} \"\n \"and {} are \"\n \"different.\".format(\n obj1, obj2))\n for key in obj1.__dict__.keys():\n if key not in special_keys:\n assert_equal(obj1.__dict__[key], obj2.__dict__[key])\n elif type(obj1) is dict or type(obj2) is dict:\n assert_equal(obj1.keys(), obj2.keys())\n for key in obj1.keys():\n assert_equal(obj1[key], obj2[key])\n elif type(obj1) is list or type(obj2) is list:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are lists with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif type(obj1) is tuple or type(obj2) is tuple:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are tuples with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif (ray.serialization.is_named_tuple(type(obj1))\n or ray.serialization.is_named_tuple(type(obj2))):\n assert len(obj1) == len(obj2), (\"Objects {} and {} are named tuples \"\n \"with different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n else:\n assert obj1 == obj2, \"Objects {} and {} are different.\".format(\n obj1, obj2)\n\n\nif sys.version_info >= (3, 0):\n long_extras = [0, np.array([[\"hi\", u\"hi\"], [1.3, 1]])]\nelse:\n\n long_extras = [\n long(0), # noqa: E501,F821\n np.array([\n [\"hi\", u\"hi\"],\n [1.3, long(1)] # noqa: E501,F821\n ])\n ]\n\nPRIMITIVE_OBJECTS = [\n 0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], \"a\",\n string.printable, \"\\u262F\", u\"hello world\", u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\",\n None, True, False, [], (), {},\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n np.zeros([100, 100]),\n np.random.normal(size=[100, 100]),\n np.array([\"hi\", 3]),\n np.array([\"hi\", 3], dtype=object)\n] + long_extras\n\nCOMPLEX_OBJECTS = [\n [[[[[[[[[[[[]]]]]]]]]]]],\n {\"obj{}\".format(i): np.random.normal(size=[100, 100])\n for i in range(10)},\n # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {\n # (): {(): {}}}}}}}}}}}}},\n (\n (((((((((), ), ), ), ), ), ), ), ), ),\n {\n \"a\": {\n \"b\": {\n \"c\": {\n \"d\": {}\n }\n }\n }\n }\n]\n\n\nclass Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n\nclass Bar(object):\n def __init__(self):\n for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):\n setattr(self, \"field{}\".format(i), val)\n\n\nclass Baz(object):\n def __init__(self):\n self.foo = Foo()\n self.bar = Bar()\n\n def method(self, arg):\n pass\n\n\nclass Qux(object):\n def __init__(self):\n self.objs = [Foo(), Bar(), Baz()]\n\n\nclass SubQux(Qux):\n def __init__(self):\n Qux.__init__(self)\n\n\nclass CustomError(Exception):\n pass\n\n\nPoint = namedtuple(\"Point\", [\"x\", \"y\"])\nNamedTupleExample = namedtuple(\"Example\",\n \"field1, field2, field3, field4, field5\")\n\nCUSTOM_OBJECTS = [\n Exception(\"Test object.\"),\n CustomError(),\n Point(11, y=22),\n Foo(),\n Bar(),\n Baz(), # Qux(), SubQux(),\n NamedTupleExample(1, 1.0, \"hi\", np.zeros([3, 5]), [1, 2, 3])\n]\n\nBASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS\n\nLIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]\nTUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]\n# The check that type(obj).__module__ != \"numpy\" should be unnecessary, but\n# otherwise this seems to fail on Mac OS X on Travis.\nDICT_OBJECTS = (\n [{\n obj: obj\n } for obj in PRIMITIVE_OBJECTS\n if (obj.__hash__ is not None and type(obj).__module__ != \"numpy\")] + [{\n 0: obj\n } for obj in BASE_OBJECTS] + [{\n Foo(123): Foo(456)\n }])\n\nRAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS\n\n\[email protected]\ndef ray_start():\n # Start the Ray processes.\n ray.init(num_cpus=1)\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef shutdown_only():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\ndef test_passing_arguments_by_value(ray_start):\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in RAY_TEST_OBJECTS:\n assert_equal(obj, ray.get(f.remote(obj)))\n\n\ndef test_ray_recursive_objects(ray_start):\n class ClassA(object):\n pass\n\n # Make a list that contains itself.\n lst = []\n lst.append(lst)\n # Make an object that contains itself as a field.\n a1 = ClassA()\n a1.field = a1\n # Make two objects that contain each other as fields.\n a2 = ClassA()\n a3 = ClassA()\n a2.field = a3\n a3.field = a2\n # Make a dictionary that contains itself.\n d1 = {}\n d1[\"key\"] = d1\n # Create a list of recursive objects.\n recursive_objects = [lst, a1, a2, a3, d1]\n\n # Check that exceptions are thrown when we serialize the recursive\n # objects.\n for obj in recursive_objects:\n with pytest.raises(Exception):\n ray.put(obj)\n\n\ndef test_passing_arguments_by_value_out_of_the_box(ray_start):\n @ray.remote\n def f(x):\n return x\n\n # Test passing lambdas.\n\n def temp():\n return 1\n\n assert ray.get(f.remote(temp))() == 1\n assert ray.get(f.remote(lambda x: x + 1))(3) == 4\n\n # Test sets.\n assert ray.get(f.remote(set())) == set()\n s = {1, (1, 2, \"hi\")}\n assert ray.get(f.remote(s)) == s\n\n # Test types.\n assert ray.get(f.remote(int)) == int\n assert ray.get(f.remote(float)) == float\n assert ray.get(f.remote(str)) == str\n\n class Foo(object):\n def __init__(self):\n pass\n\n # Make sure that we can put and get a custom type. Note that the result\n # won't be \"equal\" to Foo.\n ray.get(ray.put(Foo))\n\n\ndef test_putting_object_that_closes_over_object_id(ray_start):\n # This test is here to prevent a regression of\n # https://github.com/ray-project/ray/issues/1317.\n\n class Foo(object):\n def __init__(self):\n self.val = ray.put(0)\n\n def method(self):\n f\n\n f = Foo()\n ray.put(f)\n\n\ndef test_put_get(shutdown_only):\n ray.init(num_cpus=0)\n\n for i in range(100):\n value_before = i * 10**6\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = i * 10**6 * 1.0\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = \"h\" * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = [1] * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n\ndef test_custom_serializers(shutdown_only):\n ray.init(num_cpus=1)\n\n class Foo(object):\n def __init__(self):\n self.x = 3\n\n def custom_serializer(obj):\n return 3, \"string1\", type(obj).__name__\n\n def custom_deserializer(serialized_obj):\n return serialized_obj, \"string2\"\n\n ray.register_custom_serializer(\n Foo, serializer=custom_serializer, deserializer=custom_deserializer)\n\n assert ray.get(ray.put(Foo())) == ((3, \"string1\", Foo.__name__), \"string2\")\n\n class Bar(object):\n def __init__(self):\n self.x = 3\n\n ray.register_custom_serializer(\n Bar, serializer=custom_serializer, deserializer=custom_deserializer)\n\n @ray.remote\n def f():\n return Bar()\n\n assert ray.get(f.remote()) == ((3, \"string1\", Bar.__name__), \"string2\")\n\n\ndef test_serialization_final_fallback(ray_start):\n pytest.importorskip(\"catboost\")\n # This test will only run when \"catboost\" is installed.\n from catboost import CatBoostClassifier\n\n model = CatBoostClassifier(\n iterations=2,\n depth=2,\n learning_rate=1,\n loss_function=\"Logloss\",\n logging_level=\"Verbose\")\n\n reconstructed_model = ray.get(ray.put(model))\n assert set(model.get_params().items()) == set(\n reconstructed_model.get_params().items())\n\n\ndef test_register_class(shutdown_only):\n ray.init(num_cpus=2)\n\n # Check that putting an object of a class that has not been registered\n # throws an exception.\n class TempClass(object):\n pass\n\n ray.get(ray.put(TempClass()))\n\n # Test subtypes of dictionaries.\n value_before = OrderedDict([(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n value_before = defaultdict(lambda: 0, [(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n value_before = defaultdict(lambda: [], [(\"hello\", 1), (\"world\", 2)])\n object_id = ray.put(value_before)\n assert value_before == ray.get(object_id)\n\n # Test passing custom classes into remote functions from the driver.\n @ray.remote\n def f(x):\n return x\n\n foo = ray.get(f.remote(Foo(7)))\n assert foo == Foo(7)\n\n regex = re.compile(r\"\\d+\\.\\d*\")\n new_regex = ray.get(f.remote(regex))\n # This seems to fail on the system Python 3 that comes with\n # Ubuntu, so it is commented out for now:\n # assert regex == new_regex\n # Instead, we do this:\n assert regex.pattern == new_regex.pattern\n\n # Test returning custom classes created on workers.\n @ray.remote\n def g():\n return SubQux(), Qux()\n\n subqux, qux = ray.get(g.remote())\n assert subqux.objs[2].foo.value == 0\n\n # Test exporting custom class definitions from one worker to another\n # when the worker is blocked in a get.\n class NewTempClass(object):\n def __init__(self, value):\n self.value = value\n\n @ray.remote\n def h1(x):\n return NewTempClass(x)\n\n @ray.remote\n def h2(x):\n return ray.get(h1.remote(x))\n\n assert ray.get(h2.remote(10)).value == 10\n\n # Test registering multiple classes with the same name.\n @ray.remote(num_return_vals=3)\n def j():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = []\n for _ in range(5):\n results += j.remote()\n for i in range(len(results) // 3):\n c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])\n\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n @ray.remote\n def k():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = ray.get([k.remote() for _ in range(5)])\n for c0, c1, c2 in results:\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n\ndef test_keyword_args(shutdown_only):\n @ray.remote\n def keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n ray.init(num_cpus=1)\n\n x = keyword_fct1.remote(1)\n assert ray.get(x) == \"1 hello\"\n x = keyword_fct1.remote(1, \"hi\")\n assert ray.get(x) == \"1 hi\"\n x = keyword_fct1.remote(1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n x = keyword_fct1.remote(a=1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n\n x = keyword_fct2.remote(a=\"w\", b=\"hi\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(b=\"hi\", a=\"w\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(a=\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(b=\"hi\")\n assert ray.get(x) == \"hello hi\"\n x = keyword_fct2.remote(\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(\"w\", \"hi\")\n assert ray.get(x) == \"w hi\"\n\n x = keyword_fct3.remote(0, 1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(a=0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, d=\"hi\", c=\"w\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, c=\"w\")\n assert ray.get(x) == \"0 1 w world\"\n x = keyword_fct3.remote(0, 1, d=\"hi\")\n assert ray.get(x) == \"0 1 hello hi\"\n x = keyword_fct3.remote(0, 1)\n assert ray.get(x) == \"0 1 hello world\"\n x = keyword_fct3.remote(a=0, b=1)\n assert ray.get(x) == \"0 1 hello world\"\n\n # Check that we cannot pass invalid keyword arguments to functions.\n @ray.remote\n def f1():\n return\n\n @ray.remote\n def f2(x, y=0, z=0):\n return\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f1.remote(3)\n\n with pytest.raises(Exception):\n f1.remote(x=3)\n\n with pytest.raises(Exception):\n f2.remote(0, w=0)\n\n with pytest.raises(Exception):\n f2.remote(3, x=3)\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f2.remote(1, 2, 3, 4)\n\n @ray.remote\n def f3(x):\n return x\n\n assert ray.get(f3.remote(4)) == 4\n\n\ndef test_variable_number_of_args(shutdown_only):\n @ray.remote\n def varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n @ray.remote\n def varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n try:\n\n @ray.remote\n def kwargs_throw_exception(**c):\n return ()\n\n kwargs_exception_thrown = False\n except Exception:\n kwargs_exception_thrown = True\n\n ray.init(num_cpus=1)\n\n x = varargs_fct1.remote(0, 1, 2)\n assert ray.get(x) == \"0 1 2\"\n x = varargs_fct2.remote(0, 1, 2)\n assert ray.get(x) == \"1 2\"\n\n assert kwargs_exception_thrown\n\n @ray.remote\n def f1(*args):\n return args\n\n @ray.remote\n def f2(x, y, *args):\n return x, y, args\n\n assert ray.get(f1.remote()) == ()\n assert ray.get(f1.remote(1)) == (1, )\n assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)\n with pytest.raises(Exception):\n f2.remote()\n with pytest.raises(Exception):\n f2.remote(1)\n assert ray.get(f2.remote(1, 2)) == (1, 2, ())\n assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))\n assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))\n\n def testNoArgs(self):\n @ray.remote\n def no_op():\n pass\n\n self.init_ray()\n\n ray.get(no_op.remote())\n\n\ndef test_defining_remote_functions(shutdown_only):\n ray.init(num_cpus=3)\n\n # Test that we can define a remote function in the shell.\n @ray.remote\n def f(x):\n return x + 1\n\n assert ray.get(f.remote(0)) == 1\n\n # Test that we can redefine the remote function.\n @ray.remote\n def f(x):\n return x + 10\n\n while True:\n val = ray.get(f.remote(0))\n assert val in [1, 10]\n if val == 10:\n break\n else:\n logger.info(\"Still using old definition of f, trying again.\")\n\n # Test that we can close over plain old data.\n data = [\n np.zeros([3, 5]), (1, 2, \"a\"), [0.0, 1.0, 1 << 62], 1 << 60, {\n \"a\": np.zeros(3)\n }\n ]\n\n @ray.remote\n def g():\n return data\n\n ray.get(g.remote())\n\n # Test that we can close over modules.\n @ray.remote\n def h():\n return np.zeros([3, 5])\n\n assert_equal(ray.get(h.remote()), np.zeros([3, 5]))\n\n @ray.remote\n def j():\n return time.time()\n\n ray.get(j.remote())\n\n # Test that we can define remote functions that call other remote\n # functions.\n @ray.remote\n def k(x):\n return x + 1\n\n @ray.remote\n def k2(x):\n return ray.get(k.remote(x))\n\n @ray.remote\n def m(x):\n return ray.get(k2.remote(x))\n\n assert ray.get(k.remote(1)) == 2\n assert ray.get(k2.remote(1)) == 2\n assert ray.get(m.remote(1)) == 2\n\n def test_submit_api(shutdown_only):\n ray.init(num_cpus=1, num_gpus=1, resources={\"Custom\": 1})\n\n @ray.remote\n def f(n):\n return list(range(n))\n\n @ray.remote\n def g():\n return ray.get_gpu_ids()\n\n assert f._remote([0], num_return_vals=0) is None\n id1 = f._remote(args=[1], num_return_vals=1)\n assert ray.get(id1) == [0]\n id1, id2 = f._remote(args=[2], num_return_vals=2)\n assert ray.get([id1, id2]) == [0, 1]\n id1, id2, id3 = f._remote(args=[3], num_return_vals=3)\n assert ray.get([id1, id2, id3]) == [0, 1, 2]\n assert ray.get(\n g._remote(\n args=[], num_cpus=1, num_gpus=1,\n resources={\"Custom\": 1})) == [0]\n infeasible_id = g._remote(args=[], resources={\"NonexistentCustom\": 1})\n ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)\n assert len(ready_ids) == 0\n assert len(remaining_ids) == 1\n\n @ray.remote\n class Actor(object):\n def __init__(self, x, y=0):\n self.x = x\n self.y = y\n\n def method(self, a, b=0):\n return self.x, self.y, a, b\n\n def gpu_ids(self):\n return ray.get_gpu_ids()\n\n a = Actor._remote(\n args=[0], kwargs={\"y\": 1}, num_gpus=1, resources={\"Custom\": 1})\n\n id1, id2, id3, id4 = a.method._remote(\n args=[\"test\"], kwargs={\"b\": 2}, num_return_vals=4)\n assert ray.get([id1, id2, id3, id4]) == [0, 1, \"test\", 2]\n\n\ndef test_get_multiple(shutdown_only):\n ray.init(num_cpus=1)\n object_ids = [ray.put(i) for i in range(10)]\n assert ray.get(object_ids) == list(range(10))\n\n # Get a random choice of object IDs with duplicates.\n indices = list(np.random.choice(range(10), 5))\n indices += indices\n results = ray.get([object_ids[i] for i in indices])\n assert results == indices\n\n\ndef test_get_multiple_experimental(shutdown_only):\n ray.init(num_cpus=1)\n object_ids = [ray.put(i) for i in range(10)]\n\n object_ids_tuple = tuple(object_ids)\n assert ray.experimental.get(object_ids_tuple) == list(range(10))\n\n object_ids_nparray = np.array(object_ids)\n assert ray.experimental.get(object_ids_nparray) == list(range(10))\n\n\ndef test_get_dict(shutdown_only):\n ray.init(num_cpus=1)\n d = {str(i): ray.put(i) for i in range(5)}\n for i in range(5, 10):\n d[str(i)] = i\n result = ray.experimental.get(d)\n expected = {str(i): i for i in range(10)}\n assert result == expected\n\n\ndef test_wait(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n ready_ids, remaining_ids = ray.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)\n assert set(ready_ids) == set(objectids)\n assert remaining_ids == []\n\n objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)\n assert time.time() - start_time < 2\n assert len(ready_ids) == 3\n assert len(remaining_ids) == 1\n ray.wait(objectids)\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)\n assert time.time() - start_time < 5\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n # Verify that calling wait with duplicate object IDs throws an\n # exception.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.wait([x, x])\n\n # Make sure it is possible to call wait with an empty list.\n ready_ids, remaining_ids = ray.wait([])\n assert ready_ids == []\n assert remaining_ids == []\n\n # Test semantics of num_returns with no timeout.\n oids = [ray.put(i) for i in range(10)]\n (found, rest) = ray.wait(oids, num_returns=2)\n assert len(found) == 2\n assert len(rest) == 8\n\n # Verify that incorrect usage raises a TypeError.\n x = ray.put(1)\n with pytest.raises(TypeError):\n ray.wait(x)\n with pytest.raises(TypeError):\n ray.wait(1)\n with pytest.raises(TypeError):\n ray.wait([1])\n\n\ndef test_wait_iterables(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n objectids = np.array(\n [f.remote(1.0),\n f.remote(0.5),\n f.remote(0.5),\n f.remote(0.5)])\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n\ndef test_multiple_waits_and_gets(shutdown_only):\n # It is important to use three workers here, so that the three tasks\n # launched in this experiment can run at the same time.\n ray.init(num_cpus=3)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n @ray.remote\n def g(l):\n # The argument l should be a list containing one object ID.\n ray.wait([l[0]])\n\n @ray.remote\n def h(l):\n # The argument l should be a list containing one object ID.\n ray.get(l[0])\n\n # Make sure that multiple wait requests involving the same object ID\n # all return.\n x = f.remote(1)\n ray.get([g.remote([x]), g.remote([x])])\n\n # Make sure that multiple get requests involving the same object ID all\n # return.\n x = f.remote(1)\n ray.get([h.remote([x]), h.remote([x])])\n\n\ndef test_caching_functions_to_run(shutdown_only):\n # Test that we export functions to run on all workers before the driver\n # is connected.\n def f(worker_info):\n sys.path.append(1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def f(worker_info):\n sys.path.append(2)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def g(worker_info):\n sys.path.append(3)\n\n ray.worker.global_worker.run_function_on_all_workers(g)\n\n def f(worker_info):\n sys.path.append(4)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n ray.init(num_cpus=1)\n\n @ray.remote\n def get_state():\n time.sleep(1)\n return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]\n\n res1 = get_state.remote()\n res2 = get_state.remote()\n assert ray.get(res1) == (1, 2, 3, 4)\n assert ray.get(res2) == (1, 2, 3, 4)\n\n # Clean up the path on the workers.\n def f(worker_info):\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n\ndef test_running_function_on_all_workers(shutdown_only):\n ray.init(num_cpus=1)\n\n def f(worker_info):\n sys.path.append(\"fake_directory\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n @ray.remote\n def get_path1():\n return sys.path\n\n assert \"fake_directory\" == ray.get(get_path1.remote())[-1]\n\n def f(worker_info):\n sys.path.pop(-1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n # Create a second remote function to guarantee that when we call\n # get_path2.remote(), the second function to run will have been run on\n # the worker.\n @ray.remote\n def get_path2():\n return sys.path\n\n assert \"fake_directory\" not in ray.get(get_path2.remote())\n\n\ndef test_profiling_api(shutdown_only):\n ray.init(num_cpus=2)\n\n @ray.remote\n def f():\n with ray.profile(\n \"custom_event\",\n extra_data={\"name\": \"custom name\"}) as ray_prof:\n ray_prof.set_attribute(\"key\", \"value\")\n\n ray.put(1)\n object_id = f.remote()\n ray.wait([object_id])\n ray.get(object_id)\n\n # Wait until all of the profiling information appears in the profile\n # table.\n timeout_seconds = 20\n start_time = time.time()\n while True:\n if time.time() - start_time > timeout_seconds:\n raise Exception(\"Timed out while waiting for information in \"\n \"profile table.\")\n profile_data = ray.global_state.chrome_tracing_dump()\n event_types = {event[\"cat\"] for event in profile_data}\n expected_types = [\n \"worker_idle\",\n \"task\",\n \"task:deserialize_arguments\",\n \"task:execute\",\n \"task:store_outputs\",\n \"wait_for_function\",\n \"ray.get\",\n \"ray.put\",\n \"ray.wait\",\n \"submit_task\",\n \"fetch_and_run_function\",\n \"register_remote_function\",\n \"custom_event\", # This is the custom one from ray.profile.\n ]\n\n if all(expected_type in event_types\n for expected_type in expected_types):\n break\n\n\[email protected]()\ndef ray_start_cluster():\n cluster = ray.test.cluster_utils.Cluster()\n yield cluster\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n cluster.shutdown()\n\n\ndef test_object_transfer_dump(ray_start_cluster):\n cluster = ray_start_cluster\n\n num_nodes = 3\n # Set the inline object size to 0 to force all objects to be written to\n # plasma.\n config = json.dumps({\"inline_object_max_size_bytes\": 0})\n for i in range(num_nodes):\n cluster.add_node(\n resources={str(i): 1},\n object_store_memory=10**9,\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n return\n\n # These objects will live on different nodes.\n object_ids = [\n f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)\n ]\n\n # Broadcast each object from each machine to each other machine.\n for object_id in object_ids:\n ray.get([\n f._remote(args=[object_id], resources={str(i): 1})\n for i in range(num_nodes)\n ])\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()\n # Make sure the transfer dump can be serialized with JSON.\n json.loads(json.dumps(transfer_dump))\n assert len(transfer_dump) >= num_nodes**2\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_receive\"\n }) == num_nodes\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_send\"\n }) == num_nodes\n\n\ndef test_identical_function_names(shutdown_only):\n # Define a bunch of remote functions and make sure that we don't\n # accidentally call an older version.\n ray.init(num_cpus=1)\n\n num_calls = 200\n\n @ray.remote\n def f():\n return 1\n\n results1 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 2\n\n results2 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 3\n\n results3 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 4\n\n results4 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 5\n\n results5 = [f.remote() for _ in range(num_calls)]\n\n assert ray.get(results1) == num_calls * [1]\n assert ray.get(results2) == num_calls * [2]\n assert ray.get(results3) == num_calls * [3]\n assert ray.get(results4) == num_calls * [4]\n assert ray.get(results5) == num_calls * [5]\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote # noqa: F811\n def g():\n return 2\n\n @ray.remote # noqa: F811\n def g():\n return 3\n\n @ray.remote # noqa: F811\n def g():\n return 4\n\n @ray.remote # noqa: F811\n def g():\n return 5\n\n result_values = ray.get([g.remote() for _ in range(num_calls)])\n assert result_values == num_calls * [5]\n\n\ndef test_illegal_api_calls(shutdown_only):\n ray.init(num_cpus=1)\n\n # Verify that we cannot call put on an ObjectID.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.put(x)\n # Verify that we cannot call get on a regular value.\n with pytest.raises(Exception):\n ray.get(3)\n\n\ndef test_multithreading(shutdown_only):\n # This test requires at least 2 CPUs to finish since the worker does not\n # relase resources when joining the threads.\n ray.init(num_cpus=2)\n\n def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):\n \"\"\"A helper function that runs test cases in multiple threads.\"\"\"\n\n def wrapper():\n for _ in range(num_repeats):\n test_case()\n time.sleep(random.randint(0, 10) / 1000.0)\n return \"ok\"\n\n executor = ThreadPoolExecutor(max_workers=num_threads)\n futures = [executor.submit(wrapper) for _ in range(num_threads)]\n for future in futures:\n assert future.result() == \"ok\"\n\n @ray.remote\n def echo(value, delay_ms=0):\n if delay_ms > 0:\n time.sleep(delay_ms / 1000.0)\n return value\n\n @ray.remote\n class Echo(object):\n def echo(self, value):\n return value\n\n def test_api_in_multi_threads():\n \"\"\"Test using Ray api in multiple threads.\"\"\"\n\n # Test calling remote functions in multiple threads.\n def test_remote_call():\n value = random.randint(0, 1000000)\n result = ray.get(echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_remote_call)\n\n # Test multiple threads calling one actor.\n actor = Echo.remote()\n\n def test_call_actor():\n value = random.randint(0, 1000000)\n result = ray.get(actor.echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_call_actor)\n\n # Test put and get.\n def test_put_and_get():\n value = random.randint(0, 1000000)\n result = ray.get(ray.put(value))\n assert value == result\n\n run_test_in_multi_threads(test_put_and_get)\n\n # Test multiple threads waiting for objects.\n num_wait_objects = 10\n objects = [\n echo.remote(i, delay_ms=10) for i in range(num_wait_objects)\n ]\n\n def test_wait():\n ready, _ = ray.wait(\n objects,\n num_returns=len(objects),\n timeout=1000.0,\n )\n assert len(ready) == num_wait_objects\n assert ray.get(ready) == list(range(num_wait_objects))\n\n run_test_in_multi_threads(test_wait, num_repeats=1)\n\n # Run tests in a driver.\n test_api_in_multi_threads()\n\n # Run tests in a worker.\n @ray.remote\n def run_tests_in_worker():\n test_api_in_multi_threads()\n return \"ok\"\n\n assert ray.get(run_tests_in_worker.remote()) == \"ok\"\n\n # Test actor that runs background threads.\n @ray.remote\n class MultithreadedActor(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.thread_results = []\n\n def background_thread(self, wait_objects):\n try:\n # Test wait\n ready, _ = ray.wait(\n wait_objects,\n num_returns=len(wait_objects),\n timeout=1000.0,\n )\n assert len(ready) == len(wait_objects)\n for _ in range(50):\n num = 20\n # Test remote call\n results = [echo.remote(i) for i in range(num)]\n assert ray.get(results) == list(range(num))\n # Test put and get\n objects = [ray.put(i) for i in range(num)]\n assert ray.get(objects) == list(range(num))\n time.sleep(random.randint(0, 10) / 1000.0)\n except Exception as e:\n with self.lock:\n self.thread_results.append(e)\n else:\n with self.lock:\n self.thread_results.append(\"ok\")\n\n def spawn(self):\n wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]\n self.threads = [\n threading.Thread(\n target=self.background_thread, args=(wait_objects, ))\n for _ in range(20)\n ]\n [thread.start() for thread in self.threads]\n\n def join(self):\n [thread.join() for thread in self.threads]\n assert self.thread_results == [\"ok\"] * len(self.threads)\n return \"ok\"\n\n actor = MultithreadedActor.remote()\n actor.spawn.remote()\n ray.get(actor.join.remote()) == \"ok\"\n\n\ndef test_free_objects_multi_node(ray_start_cluster):\n # This test will do following:\n # 1. Create 3 raylets that each hold an actor.\n # 2. Each actor creates an object which is the deletion target.\n # 3. Invoke 64 methods on each actor to flush plasma client.\n # 4. After flushing, the plasma client releases the targets.\n # 5. Check that the deletion targets have been deleted.\n # Caution: if remote functions are used instead of actor methods,\n # one raylet may create more than one worker to execute the\n # tasks, so the flushing operations may be executed in different\n # workers and the plasma client holding the deletion target\n # may not be flushed.\n cluster = ray_start_cluster\n config = json.dumps({\"object_manager_repeated_push_delay_ms\": 1000})\n for i in range(3):\n cluster.add_node(\n num_cpus=1,\n resources={\"Custom{}\".format(i): 1},\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"Custom0\": 1})\n class ActorOnNode0(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"Custom1\": 1})\n class ActorOnNode1(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"Custom2\": 1})\n class ActorOnNode2(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def create(actors):\n a = actors[0].get.remote()\n b = actors[1].get.remote()\n c = actors[2].get.remote()\n (l1, l2) = ray.wait([a, b, c], num_returns=3)\n assert len(l1) == 3\n assert len(l2) == 0\n return (a, b, c)\n\n def flush(actors):\n # Flush the Release History.\n # Current Plasma Client Cache will maintain 64-item list.\n # If the number changed, this will fail.\n logger.info(\"Start Flush!\")\n for i in range(64):\n ray.get([actor.get.remote() for actor in actors])\n logger.info(\"Flush finished!\")\n\n def run_one_test(actors, local_only):\n (a, b, c) = create(actors)\n # The three objects should be generated on different object stores.\n assert ray.get(a) != ray.get(b)\n assert ray.get(a) != ray.get(c)\n assert ray.get(c) != ray.get(b)\n ray.internal.free([a, b, c], local_only=local_only)\n flush(actors)\n return (a, b, c)\n\n actors = [\n ActorOnNode0.remote(),\n ActorOnNode1.remote(),\n ActorOnNode2.remote()\n ]\n # Case 1: run this local_only=False. All 3 objects will be deleted.\n (a, b, c) = run_one_test(actors, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)\n # All the objects are deleted.\n assert len(l1) == 0\n assert len(l2) == 3\n # Case 2: run this local_only=True. Only 1 object will be deleted.\n (a, b, c) = run_one_test(actors, True)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)\n # One object is deleted and 2 objects are not.\n assert len(l1) == 2\n assert len(l2) == 1\n # The deleted object will have the same store with the driver.\n local_return = ray.worker.global_worker.plasma_client.store_socket_name\n for object_id in l1:\n assert ray.get(object_id) != local_return\n\n\ndef test_local_mode(shutdown_only):\n @ray.remote\n def local_mode_f():\n return np.array([0, 0])\n\n @ray.remote\n def local_mode_g(x):\n x[0] = 1\n return x\n\n ray.init(local_mode=True)\n\n @ray.remote\n def f():\n return np.ones([3, 4, 5])\n\n xref = f.remote()\n # Remote functions should return by value.\n assert_equal(xref, np.ones([3, 4, 5]))\n # Check that ray.get is the identity.\n assert_equal(xref, ray.get(xref))\n y = np.random.normal(size=[11, 12])\n # Check that ray.put is the identity.\n assert_equal(y, ray.put(y))\n\n # Make sure objects are immutable, this example is why we need to copy\n # arguments before passing them into remote functions in python mode\n aref = local_mode_f.remote()\n assert_equal(aref, np.array([0, 0]))\n bref = local_mode_g.remote(aref)\n # Make sure local_mode_g does not mutate aref.\n assert_equal(aref, np.array([0, 0]))\n assert_equal(bref, np.array([1, 0]))\n\n # wait should return the first num_returns values passed in as the\n # first list and the remaining values as the second list\n num_returns = 5\n object_ids = [ray.put(i) for i in range(20)]\n ready, remaining = ray.wait(\n object_ids, num_returns=num_returns, timeout=None)\n assert_equal(ready, object_ids[:num_returns])\n assert_equal(remaining, object_ids[num_returns:])\n\n # Test actors in LOCAL_MODE.\n\n @ray.remote\n class LocalModeTestClass(object):\n def __init__(self, array):\n self.array = array\n\n def set_array(self, array):\n self.array = array\n\n def get_array(self):\n return self.array\n\n def modify_and_set_array(self, array):\n array[0] = -1\n self.array = array\n\n test_actor = LocalModeTestClass.remote(np.arange(10))\n # Remote actor functions should return by value\n assert_equal(test_actor.get_array.remote(), np.arange(10))\n\n test_array = np.arange(10)\n # Remote actor functions should not mutate arguments\n test_actor.modify_and_set_array.remote(test_array)\n assert_equal(test_array, np.arange(10))\n # Remote actor functions should keep state\n test_array[0] = -1\n assert_equal(test_array, test_actor.get_array.remote())\n\n # Check that actor handles work in Python mode.\n\n @ray.remote\n def use_actor_handle(handle):\n array = np.ones(10)\n handle.set_array.remote(array)\n assert np.alltrue(array == ray.get(handle.get_array.remote()))\n\n ray.get(use_actor_handle.remote(test_actor))\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 0.3\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 0.3\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 10\n ray.init(num_cpus=10, num_gpus=num_gpus)\n\n @ray.remote(num_gpus=0)\n def f0():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=1)\n def f1():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=2)\n def f2():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=3)\n def f3():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 3\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=4)\n def f4():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 4\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n @ray.remote(num_gpus=5)\n def f5():\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 5\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.1)\n return os.getpid()\n\n start_time = time.time()\n while True:\n if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:\n break\n if time.time() > start_time + 10:\n raise Exception(\"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n\n list_of_ids = ray.get([f1.remote() for _ in range(10)])\n set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}\n assert set_of_ids == {(i, ) for i in range(10)}\n\n list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n assert set(all_ids) == set(range(10))\n\n remaining = [f5.remote() for _ in range(20)]\n for _ in range(10):\n t1 = time.time()\n ready, remaining = ray.wait(remaining, num_returns=2)\n t2 = time.time()\n # There are only 10 GPUs, and each task uses 2 GPUs, so there\n # should only be 2 tasks scheduled at a given time, so if we wait\n # for 2 tasks to finish, then it should take at least 0.1 seconds\n # for each pair of tasks to finish.\n assert t2 - t1 > 0.09\n list_of_ids = ray.get(ready)\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n # Commenting out the below assert because it seems to fail a lot.\n # assert set(all_ids) == set(range(10))\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n # The task should be able to execute.\n ray.get(f.remote())\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(redis_address=cluster.redis_address)\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # Make sure tasks and actors run on the remote local scheduler.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != local_plasma\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_local_schedulers(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific local schedulers, and we will check that they are assigned\n # to the correct local schedulers.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(redis_address=cluster.redis_address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and local schedulers (at least right now), this can be\n # used to identify which local scheduler the task was assigned to.\n\n # This must be run on the zeroth local scheduler.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first local scheduler.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the second local scheduler.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first or second local scheduler.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the zeroth or second local scheduler.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.global_state.client_table()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"][\"GPU\"] == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f tasks should be scheduled on both local schedulers.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The g tasks should be scheduled only on the second local scheduler.\n local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(local_scheduler_ids) == 1\n assert list(local_scheduler_ids)[0] != local_plasma\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f and g tasks should be scheduled on both local schedulers.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The h tasks should be scheduled only on the second local scheduler.\n local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(local_scheduler_ids) == 1\n assert list(local_scheduler_ids)[0] != local_plasma\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(shutdown_only):\n ray.init(num_cpus=1)\n\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.test.test_utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.test.test_utils.wait_for_pid_to_exit(pid1)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all local\n # schedulers in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all local\n # schedulers in a roughly equal manner even when the tasks have\n # dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This object will be local to one of the local schedulers. Make sure\n # this doesn't prevent tasks from being scheduled on other local\n # schedulers.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.global_state.task_table()) >= num_tasks:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.global_state.object_table()) >= num_objects:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_global_state_api(shutdown_only):\n with pytest.raises(Exception):\n ray.global_state.object_table()\n\n with pytest.raises(Exception):\n ray.global_state.task_table()\n\n with pytest.raises(Exception):\n ray.global_state.client_table()\n\n with pytest.raises(Exception):\n ray.global_state.function_table()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n resources = {\"CPU\": 5, \"GPU\": 3, \"CustomResource\": 1}\n assert ray.global_state.cluster_resources() == resources\n\n assert ray.global_state.object_table() == {}\n\n driver_id = ray.experimental.state.binary_to_hex(\n ray.worker.global_worker.worker_id)\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.global_state.task_table()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_id_hex = ray.ObjectID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"DriverID\"] == driver_id\n assert task_spec[\"FunctionID\"] == nil_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.global_state.client_table()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.global_state.task_table()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n function_table = ray.global_state.function_table()\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == [1, \"hi\", x_id]\n assert task_spec[\"DriverID\"] == driver_id\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n function_table_entry = function_table[task_spec[\"FunctionID\"]]\n assert function_table_entry[\"Name\"] == \"runtest.f\"\n assert function_table_entry[\"DriverID\"] == driver_id\n assert function_table_entry[\"Module\"] == \"runtest\"\n\n assert task_table[task_id] == ray.global_state.task_table(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.global_state.object_table()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.global_state.object_table()\n assert len(object_table) == 2\n\n assert object_table[x_id][\"IsEviction\"][0] is False\n\n assert object_table[result_id][\"IsEviction\"][0] is False\n\n assert object_table[x_id] == ray.global_state.object_table(x_id)\n object_table_entry = ray.global_state.object_table(result_id)\n assert object_table[result_id] == object_table_entry\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError(object):\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n if sys.version_info >= (3, 0):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n else:\n import cStringIO\n self.output_buffer = cStringIO.StringIO()\n self.error_buffer = cStringIO.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n error_lines = captured[\"err\"]\n for i in range(200):\n assert str(i) in error_lines\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n error_lines = captured[\"err\"]\n assert len(error_lines) == 0\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(redirect_worker_output=True, num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n worker_info = ray.global_state.workers()\n assert len(worker_info) >= num_workers\n for worker_id, info in worker_info.items():\n assert \"node_ip_address\" in info\n assert \"plasma_store_socket\" in info\n assert \"stderr_file\" in info\n assert \"stdout_file\" in info\n\n\ndef test_specific_driver_id():\n dummy_driver_id = ray.DriverID(b\"00112233445566778899\")\n ray.init(driver_id=dummy_driver_id)\n\n @ray.remote\n def f():\n return ray.worker.global_worker.task_driver_id.binary()\n\n assert_equal(dummy_driver_id.binary(), ray.worker.global_worker.worker_id)\n\n task_driver_id = ray.get(f.remote())\n assert_equal(dummy_driver_id.binary(), task_driver_id)\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID(_random_string())\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=10**8)\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.plasma_client.contains(\n ray.pyarrow.plasma.ObjectID(x_id.binary()))\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_inline_objects(shutdown_only):\n config = json.dumps({\"initial_reconstruction_timeout_milliseconds\": 200})\n ray.init(num_cpus=1, object_store_memory=10**7, _internal_config=config)\n\n @ray.remote\n class Actor(object):\n def create_inline_object(self):\n return \"inline\"\n\n def create_non_inline_object(self):\n return 10000 * [1]\n\n def get(self):\n return\n\n a = Actor.remote()\n # Count the number of objects that were successfully inlined.\n inlined = 0\n for _ in range(100):\n inline_object = a.create_inline_object.remote()\n ray.get(inline_object)\n plasma_id = ray.pyarrow.plasma.ObjectID(inline_object.binary())\n ray.worker.global_worker.plasma_client.delete([plasma_id])\n # Make sure we can still get an inlined object created by an actor even\n # after it has been evicted.\n try:\n value = ray.get(inline_object)\n assert value == \"inline\"\n inlined += 1\n except ray.worker.RayTaskError:\n pass\n # Make sure some objects were inlined. Some of them may not get inlined\n # because we evict the object soon after creating it.\n assert inlined > 0\n\n # Non-inlined objects are not able to be recreated after eviction.\n for _ in range(10):\n non_inline_object = a.create_non_inline_object.remote()\n ray.get(non_inline_object)\n plasma_id = ray.pyarrow.plasma.ObjectID(non_inline_object.binary())\n # This while loop is necessary because sometimes the object is still\n # there immediately after plasma_client.delete.\n while ray.worker.global_worker.plasma_client.contains(plasma_id):\n ray.worker.global_worker.plasma_client.delete([plasma_id])\n # Objects created by an actor that were evicted and larger than the\n # maximum inline object size cannot be retrieved or reconstructed.\n with pytest.raises(ray.worker.RayTaskError):\n ray.get(non_inline_object) == 10000 * [1]\n\n\ndef test_ray_setproctitle(shutdown_only):\n ray.init(num_cpus=2)\n\n @ray.remote\n class UniqueName(object):\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:f()\"\n\n @ray.remote\n def unique_1():\n assert setproctitle.getproctitle() == \"ray_worker:runtest.unique_1()\"\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.DriverID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\", ray.gcs_utils.TablePrefix.ERROR_INFO,\n ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),\n error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\", ray.gcs_utils.TablePrefix.ERROR_INFO,\n ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),\n error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(shutdown_only):\n ray.init(num_cpus=2)\n\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID(_random_string()).hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(shutdown_only):\n\n ray.init(num_cpus=1)\n node_manager_address = None\n node_manager_port = None\n for client in ray.global_state.client_table():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b'asdf')\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n"
] | [
[
"numpy.int8",
"numpy.ones",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.random.permutation",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.float32",
"numpy.int64",
"numpy.arange",
"numpy.int32",
"numpy.uint32",
"numpy.uint64",
"numpy.array",
"numpy.random.randint",
"numpy.float64",
"numpy.uint8"
]
] |
SimonMcLain/Project_Repository_Programming_and_Scripting_2018 | [
"c10769973461d4c30b2c0a9d3a1f0e812049ca44"
] | [
"RoughWork/stddev.py"
] | [
"#Simon McLain 2018-04-25\n# Experimenting with numpy\n# https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html reference to standard deviation \n# calculate the standard deviation of each column\n\nimport numpy\n#imports numpy library providing math functions to operate on them \ndata = numpy.genfromtxt('iris.csv', delimiter=',')\n# Reads the data into an array\ncol1 = data[:,0] \nstdcol1 = numpy.std(data[:,0])\ncol2 =data[:, 1]\nstdcol2 = numpy.std(data[:, 1])\ncol3 =data[:, 2]\nstdcol3 = numpy.std(data[:, 2])\ncol4 =data[:, 3]\nstdcol4 = numpy.std(data[:, 3])\n# Individually looks at each column and returns the standard deviation for that column\n\nprint(\"The standard deviation in petal length is: \", numpy.around(stdcol1, decimals = 2))\nprint(\"The standard deviation in petal width is: \", numpy.around(stdcol2, decimals = 2))\nprint(\"The standard deviation in sepal length is: \", numpy.around(stdcol3, decimals = 2))\nprint(\"The standard deviation in sepal width is: \", numpy.around(stdcol4, decimals = 2))\n"
] | [
[
"numpy.std",
"numpy.genfromtxt",
"numpy.around"
]
] |
paulowiz/AiesecBot | [
"ac77cc5426ed6382772603afa8015208020c0fba"
] | [
"Get Retroativo/2017_09.py"
] | [
"import psycopg2.extras\nfrom controller import RobotRotine as rr\nfrom api import graphqlconsume, querygraphql\nimport time\nimport datetime\nimport numpy as np\n\"\"\"\ncurrent = np.datetime64(datetime.datetime.now())\ncurrentab = np.datetime64(current) + np.timedelta64(5, 'h')\nlastdate = np.datetime64(currentab) - np.timedelta64(15, 'm')\nprint(lastdate)\nprint(currentab)\nprint('-')\n\n\"\"\"\nrobo5 = rr.RobotRotine()\ni = 0\ndtinit = '2017-09-01T00:00:00'\nwhile i < 31:\n print(dtinit)\n dtfim = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n robo5.ExecutaRotina('created_at', dtinit,\n dtfim, 1)\n i = i+1\n dtinit = np.datetime64(dtinit) + np.timedelta64(24, 'h')\n\nprint('Periodo Executado com sucesso')\n"
] | [
[
"numpy.timedelta64",
"numpy.datetime64"
]
] |
GuilhermeToso/masters-project | [
"01d5acfddaedb3cbf7fa9247a88108530547e155",
"01d5acfddaedb3cbf7fa9247a88108530547e155"
] | [
"tests/5 - Models segmentation by sync/5.1 - Hodgkin-Huxley/shh_Nneurons_sync_test.py",
"tests/3 - Stochastic Models Synchronization/3.3 - Integrate-and-Fire/sif_couple_var.py"
] | [
"\"\"\" \nStochastic Hodgkin-Huxley Neurons\n=================================\n\nAnalysis of 12 Neurons coupled in 3 different groups\n----------------------------------------------------\n\n**Author**: Guilherme M. Toso\n**Tittle**: shh_Nneurons_sunc_test.py\n**Project**: Semi-Supervised Learning Using Competition for Neurons' Synchronization\n\n\n**Description**:\n\n This script uses the Hodgkin-Huxley Biological Neuron Model with Stochastic terms,\n and synchronizes 12 neurons in 3 different groups, such that the neurons in the same group\n are synchronized, while the neurons in different groups are desynchronized. This script plots \n 12 stochastic trajectories, as much as their differences (|V\\:sub:`i` - V\\:sub:`j`|),\n the growing phases (\\phi\\:sub:`i`, \\phi\\:sub:`j`), and the phases difference(|\\phi\\:sub:`i` - \\phi\\:sub:`j`|)\n\n\"\"\"\n\n\"\"\" Dependencies \"\"\"\nimport sys\nimport os\npath = os.getcwd()\nsys.path.insert(0,path)\nfrom nsc import HodgkinHuxley, SDE, Chemical, Couple\nfrom nsc import unwrap\nfrom nsc import ngplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib import colors\nimport sys\nnp.random.seed(0)\n\n\"\"\" Define the total amount of neurons \"\"\"\nneurons = 12\n\n\"\"\" Define the initial parameters \"\"\"\nv_na = np.zeros(neurons) + 115\nv_k = np.zeros(neurons) - 12\nv_l = np.zeros(neurons) + 86\ng_na = np.zeros(neurons) + 120\ng_k = np.zeros(neurons) + 36\ng_l = np.zeros(neurons) + .3\nc = np.ones(neurons)\nsigma = 1.0\nsigma_external = 0.5\n\n\"\"\" Frequency \"\"\" \nfreq = 50.0\n\n\"\"\" Period \"\"\"\nT = 1/freq\n\n\"\"\" Define the step value for every step \"\"\"\nstep = T\n\n\"\"\" Instantiate the Hodgkin-Huxley Class \"\"\"\nhh = HodgkinHuxley(v_na, v_k, v_l, g_na, g_k, g_l, c)\nsde = SDE(step, sigma_external, sigma, v_na, v_k, v_l, g_na, g_k, g_l, c)\n\n\"\"\" Define the Chemical class params an instantiate it \"\"\"\njch = 0.1\nv_reversal = -70\nch = Chemical(jch, v_reversal)\n\n\"\"\" Define the total time \"\"\"\nt_max = 300.0\n\n\"\"\" Define the number of iterations \"\"\"\nn_iter = int(t_max/step)\n\n\n\"\"\" Define the initial time t, and the variables V, M, N, and H \"\"\"\nt = 0.0\nv = np.random.uniform(0.0, 4.0, (neurons))\nm = hh.m0(v)[2]\nn = hh.n0(v)[2]\nh = hh.h0(v)[2]\ny = ch.y0(v)[2]\n\n\n\"\"\" Define the array where will be stored all Variables (V, M, N and H) of all Neurons at all times. \"\"\"\ndata = np.zeros((n_iter,5,neurons))\n\n\"\"\" Initialize the matrix init that contains all Variables of all Neurons at time t \"\"\"\ninit = np.array([v,m,n,h,y])\n\n\"\"\" Create the array of time \"\"\"\ntime = np.zeros((n_iter))\n\n\"\"\" Cluster Amount \"\"\"\ncluster = 3\n\n\"\"\" Determine the coupling force \"\"\"\nk = np.zeros(shape=(neurons,neurons)) + 0.8\nprint(k)\n\n\"\"\" Determine the adjacent matrix that represents which oscillators are coupled\n and in this case, none of them \"\"\"\nadjacency = np.array([\\\n [0,0,0,0,1,0,0,0,1,0,0,1],\\\n [0,0,1,0,0,1,0,0,0,0,1,0],\\\n [0,1,0,0,0,1,0,0,0,0,1,0],\\\n [0,0,0,0,0,0,1,1,0,1,0,0],\\\n [1,0,0,0,0,0,0,0,1,0,0,1],\\\n [0,1,1,0,0,0,0,0,0,0,1,0],\\\n [0,0,0,1,0,0,0,1,0,1,0,0],\\\n [0,0,0,1,0,0,1,0,0,1,0,0],\\\n [1,0,0,0,1,0,0,0,0,0,0,1],\\\n [0,0,0,1,0,0,1,1,0,0,0,0],\\\n [0,1,1,0,0,1,0,0,0,0,0,0],\\\n [1,0,0,0,1,0,0,0,1,0,0,0]])\n\nk = k*adjacency\n\n\"\"\" Instantiate the Couple class \"\"\"\ncouple = Couple()\n\n\"\"\" Begin the Iteration Process \"\"\"\nfor i in range(len(time)):\n \n \"\"\" Stores the matrix init at the data array in the time i \"\"\"\n data[i] = init\n \n \"\"\" The array time at iteration i receives the value of t \"\"\"\n time[i] = t\n \n \"\"\" Define the initial Variables \"\"\"\n v = init[0]\n m = init[1]\n n = init[2]\n h = init[3]\n y = init[4]\n \n \"\"\" Set the electrical current I \"\"\"\n current = 20\n\n couple.data = v\n\n next_v = v + sde.membrane_potential(v,m,n,h,current) - ch.synapse(y,v)*step - couple.synapse(k)\n next_m = m + sde.stochastic_sodium_activate(m,v)\n next_h = h + sde.stochastic_sodium_deactivate(h,v)\n next_n = n + sde.stochastic_potassium_activate(n,v)\n next_y = y + sde.stochastic_chemical_transmitter(y,v)\n\n init[0] = next_v\n init[1] = next_m\n init[2] = next_n\n init[3] = next_h\n init[4] = next_y\n\n \n \"\"\" Update Time \"\"\"\n t = t + step\n\n\n\n\"\"\" Transpose the data array \"\"\"\ndata1 = np.transpose(data,(1,2,0))\n\n\n\"\"\" Calculate the Sodium and Potassium Conductances \"\"\"\ngna1 = 120*(data1[1]**3)*data1[3]\ngk1 = 36*(data1[2]**4)\n\nred_colors = [\"orangered\", 'darkred', 'firebrick', 'red']\ngreen_colors = [\"limegreen\", 'forestgreen', 'darkgreen', 'green']\nblue_colors = [\"royalblue\", \"midnightblue\",\"mediumblue\", \"blue\"]\n\n\"\"\" Total colors \"\"\"\ncolors = red_colors + green_colors + blue_colors\n\n\"\"\" Organize the data \"\"\"\ndata2 = np.array([data1[0][0], data1[0][4], data1[0][8], data1[0][11],\\\n data1[0][1], data1[0][2], data1[0][5], data1[0][10], \\\n data1[0][3], data1[0][6], data1[0][7], data1[0][9]])\n\n\n\"\"\" Get the peak indexes, times and the periods between them \"\"\"\ninds, times, pers = unwrap.get_peaks_indexes(data2[:,:].T, 40, T)\n#print(inds)\n\nneurons_array = []\nfor i in range(len(times)):\n\n neurons_array.append(np.zeros(times[i].size)+i)\n\ncols = ['r','g','b']\nlabeled = {'Grupo 1':[0,1,2,3], 'Grupo 2':[4,5,6,7], 'Grupo 3':[8,9,10,11]}\nngplot.neural_activity(times, neurons_array,t_max, colors = cols, labeled=labeled)\n\n\"\"\" Get the phases \"\"\"\nphases = unwrap.unwrap_static_2(data2.shape[1], inds, T, model='HH')\n\n\"\"\" Plot phases \"\"\"\nngplot.phases(phases, colors, T)\n\n\n\"\"\" Plot the trajectories \"\"\"\n\nplt.plot(time, data1[0][0], c = red_colors[0])\nplt.plot(time, data1[0][4], c = red_colors[1])\nplt.plot(time, data1[0][8], c = red_colors[2])\nplt.plot(time, data1[0][11], c = red_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 1\", fontsize = 24)\nplt.show()\n\n\nplt.plot(time, data1[0][1], c = green_colors[0])\nplt.plot(time, data1[0][2], c = green_colors[1])\nplt.plot(time, data1[0][5], c = green_colors[2])\nplt.plot(time, data1[0][10], c = green_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 2\", fontsize = 24)\nplt.show()\n\nplt.plot(time, data1[0][3], c = blue_colors[0])\nplt.plot(time, data1[0][6], c = blue_colors[1])\nplt.plot(time, data1[0][7], c = blue_colors[2])\nplt.plot(time, data1[0][9], c = blue_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Sincronizados pertencentes ao Grupo 3\", fontsize = 24)\nplt.show()\n\nplt.plot(time, data1[0][11], c = red_colors[3])\nplt.plot(time, data1[0][10], c = green_colors[3])\nplt.plot(time, data1[0][3], c = blue_colors[0])\nplt.plot(time, data1[0][6], c = blue_colors[1])\nplt.plot(time, data1[0][7], c = blue_colors[2])\nplt.plot(time, data1[0][9], c = blue_colors[3])\nplt.xlabel('t [ms]',fontsize=34, labelpad=10)\nplt.ylabel('V [mV]',fontsize=34, labelpad=30)\nplt.yticks(fontsize=34)\nplt.xticks(fontsize=34)\nplt.grid(True)\nplt.title(\"Neurônios Dessincronizados dos grupos 1, 2 e 3\", fontsize = 24)\nplt.show()\n\n\"\"\" Get the Phases difference with group1 as reference\"\"\"\nngplot.phases_diff_3D(0, phases, T)\n\n\"\"\" Get the Phases difference with group2 as reference\"\"\"\nngplot.phases_diff_3D(4, phases, T)\n\n\"\"\" Get the Phases difference with group2 as reference\"\"\"\nngplot.phases_diff_3D(8, phases, T)\n\n\"\"\" Get the Trajectories difference with group1 as reference\"\"\"\nngplot.trajecs_diff_3D(0, data2, T)\n\n\"\"\" Get the Trajectories difference with group2 as reference\"\"\"\nngplot.trajecs_diff_3D(4, data2, T)\n\n\"\"\" Get the Trajectories difference with group3 as reference\"\"\"\nngplot.trajecs_diff_3D(8, data2, T)",
"\"\"\" \nStochastic Integrate-and-Fire Neurons\n=================================\n\nCoupling Force Variation\n------------------------\n\n**Author**: Guilherme M. Toso\n**Tittle**: sif_couple_var.py\n**Project**: Semi-Supervised Learning Using Competition for Neurons' Synchronization\n\n**Description**:\n\n This script uses the Integrate-and-Fire Biological Neuron Model with Stochastic terms,\n it uses two neurons and then try to synchronize them by varying yhe coupling force k. This script plots \n the differences (|V\\:sub:`i` - V\\:sub:`j`|) and the phases difference(|\\phi\\:sub:`i` - \\phi\\:sub:`j`|) of the two trajectories\n of every k value.\n\n\"\"\"\n\nimport sys\nimport os\npath = os.getcwd()\nsys.path.insert(0,path)\nfrom nsc import IntegrateAndFire, Couple, ngplot, unwrap\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nfrom tqdm import tqdm\n\n#np.random.seed(0)\n\n\"\"\" Integrate and Fire Parameters \"\"\"\nvrest = 0.0\nr = 1.0\ntau = 10.0\nthreshold = 1.0\nI = 2.5\n\n\"\"\" Instantiates the Integrate and Fire Model Class \"\"\"\nIF = IntegrateAndFire(vrest,r,tau,threshold)\n\n\"\"\" Neurons amount \"\"\"\nneurons = 2\n\"\"\" Time Properties \"\"\"\ntotal = 200\n\n\"\"\" Coupling Force Vector \"\"\"\nk = np.linspace(0,1.2,num=100)\ndef force_variation(k, neurons, decimals = 2):\n\n num = k.size\n k = np.repeat(k,repeats=neurons**2)\n k = np.reshape(k,(num,neurons,neurons))\n k[:,np.arange(neurons), np.arange(neurons)] = 0\n k = np.around(k,decimals=decimals)\n\n return k\nforce = force_variation(k,neurons)\n\ncouple = Couple()\n\n\"\"\" Create the data structure to store the trajectories differences and the phases \"\"\"\ndiff_data = np.zeros((k.size, total+1))\nphases_data = np.zeros((k.size, total+1))\n\nsigma = 0.3\n\nfor i in tqdm(range(k.size)):\n\n \n time = np.linspace(0,total,(total+1))\n\n \"\"\" Data array \"\"\"\n data = np.zeros((total+1,neurons))\n\n u = np.random.uniform(0,0.5,size=neurons)\n\n\n for j in tqdm(range(time.size)):\n\n data[j] = u\n\n couple.data = u\n\n next_u = u + IF.lif(u,I) + sigma*u*np.random.normal(0,0.2,size=neurons) - couple.synapse(force[i])\n\n u = IF.reset(data[j],next_u)\n\n \"\"\" Store the trajecs difference data with the ith element of coupling force k \"\"\"\n diff_data[i] = np.abs(data[:,0] - data[:,1])\n\n \"\"\" Get the peak indexes, times and the periods between them \"\"\"\n inds, times, pers = unwrap.get_peaks_indexes(data[:,:], threshold, 1)\n \"\"\" Get the phases \"\"\"\n phases = unwrap.unwrap_static_2(total+1, inds, 1,model='IAF')\n\n \"\"\" Store the phases difference data with the ith element of coupling force k \"\"\"\n phases_data[i] = np.abs(phases[0] - phases[1])\n\nngplot.coupling(diff_data, phases_data, k, time)\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.transpose",
"numpy.zeros",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.grid",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel"
],
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.reshape",
"numpy.abs",
"numpy.repeat",
"numpy.arange",
"numpy.random.normal",
"numpy.around",
"numpy.linspace"
]
] |
InkToYou/TreeWasserstein | [
"b3f26dd50cc5f06a40e076b2e68f6e5c83786e7b"
] | [
"tests/test_treewasserstein.py"
] | [
"import numpy as np\nimport pytest\n\nimport networkx as nx\nimport ot\nimport tw\n\n\nclass TestBuildValidTreeMetric(object):\n @pytest.mark.parametrize(\n \"num_node, edges\",\n [\n (5, [(i % 5, (i + 1) % 5, i + 1) for i in range(5)]),\n (3, [(i % 3, (i + 1) % 3, i + 1) for i in range(3)]),\n ],\n )\n def test_invalid_tree(self, num_node, edges):\n with pytest.raises(ValueError):\n first_prob = np.zeros(num_node)\n second_prob = np.zeros(num_node)\n first_prob[0] = 1.0\n second_prob[-1] = 1.0\n tw.distance(first_prob, second_prob, edges)\n\n\nclass TestTreeWasserstein(object):\n def test_tree_wasserstein(self):\n for i in range(100):\n num_node = np.random.randint(10, 200)\n G = nx.generators.random_tree(num_node)\n edges = [(fr, to, 1) for (fr, to) in list(G.edges())]\n first_prob = np.random.rand(num_node)\n first_prob = first_prob / first_prob.sum()\n second_prob = np.random.rand(num_node)\n second_prob = second_prob / second_prob.sum()\n twd = tw.distance(first_prob, second_prob, edges)\n\n adj_dict = dict(nx.all_pairs_shortest_path_length(G))\n metric = np.array(\n [[adj_dict[i][j] for i in range(num_node)] for j in range(num_node)]\n )\n ans = ot.lp.emd2(first_prob, second_prob, metric)\n\n assert np.allclose([twd], [ans]), f\"i: {i}, TW : {twd}, WD : {ans}\"\n"
] | [
[
"numpy.allclose",
"numpy.random.randint",
"numpy.random.rand",
"numpy.zeros"
]
] |
reveriel/depconv | [
"4f50d8651655c3a275f15422559eac82879704da"
] | [
"second/pytorch/models/voxelnet.py"
] | [
"import time\nfrom enum import Enum\nfrom functools import reduce\nimport contextlib\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport torchplus\nfrom second.pytorch.core import box_torch_ops\nfrom second.pytorch.core.losses import (WeightedSigmoidClassificationLoss,\n WeightedSmoothL1LocalizationLoss,\n WeightedSoftmaxClassificationLoss)\nfrom second.pytorch.models import middle, pointpillars, rpn, voxel_encoder\nfrom torchplus import metrics\nfrom second.pytorch.utils import torch_timer\n\nfrom second.sphere.model import DepConvNet3, ConvNet\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\nREGISTERED_NETWORK_CLASSES = {}\n\ndef register_voxelnet(cls, name=None):\n global REGISTERED_NETWORK_CLASSES\n if name is None:\n name = cls.__name__\n assert name not in REGISTERED_NETWORK_CLASSES, f\"exist class: {REGISTERED_NETWORK_CLASSES}\"\n REGISTERED_NETWORK_CLASSES[name] = cls\n return cls\n\ndef get_voxelnet_class(name):\n global REGISTERED_NETWORK_CLASSES\n assert name in REGISTERED_NETWORK_CLASSES, f\"available class: {REGISTERED_NETWORK_CLASSES}\"\n return REGISTERED_NETWORK_CLASSES[name]\n\nclass LossNormType(Enum):\n NormByNumPositives = \"norm_by_num_positives\"\n NormByNumExamples = \"norm_by_num_examples\"\n NormByNumPosNeg = \"norm_by_num_pos_neg\"\n DontNorm = \"dont_norm\"\n\n@register_voxelnet\nclass VoxelNet(nn.Module):\n def __init__(self,\n output_shape,\n num_class=2,\n num_input_features=4,\n vfe_class_name=\"VoxelFeatureExtractor\",\n vfe_num_filters=[32, 128],\n with_distance=False,\n middle_class_name=\"SparseMiddleExtractor\",\n middle_num_input_features=-1,\n middle_num_filters_d1=[64],\n middle_num_filters_d2=[64, 64],\n rpn_class_name=\"RPN\",\n rpn_num_input_features=-1,\n rpn_layer_nums=[3, 5, 5],\n rpn_layer_strides=[2, 2, 2],\n rpn_num_filters=[128, 128, 256],\n rpn_upsample_strides=[1, 2, 4],\n rpn_num_upsample_filters=[256, 256, 256],\n use_norm=True,\n use_groupnorm=False,\n num_groups=32,\n use_direction_classifier=True,\n use_sigmoid_score=False,\n encode_background_as_zeros=True,\n use_rotate_nms=True,\n multiclass_nms=False,\n nms_score_thresholds=None,\n nms_pre_max_sizes=None,\n nms_post_max_sizes=None,\n nms_iou_thresholds=None,\n target_assigner=None,\n cls_loss_weight=1.0,\n loc_loss_weight=1.0,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n direction_loss_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n encode_rad_error_by_sin=False,\n loc_loss_ftor=None,\n cls_loss_ftor=None,\n measure_time=False,\n voxel_generator=None,\n post_center_range=None,\n dir_offset=0.0,\n sin_error_factor=1.0,\n nms_class_agnostic=False,\n num_direction_bins=2,\n direction_limit_offset=0,\n name='voxelnet'):\n super().__init__()\n self.name = name\n self._sin_error_factor = sin_error_factor\n self._num_class = num_class\n self._use_rotate_nms = use_rotate_nms\n self._multiclass_nms = multiclass_nms\n self._nms_score_thresholds = nms_score_thresholds\n self._nms_pre_max_sizes = nms_pre_max_sizes\n self._nms_post_max_sizes = nms_post_max_sizes\n self._nms_iou_thresholds = nms_iou_thresholds\n self._use_sigmoid_score = use_sigmoid_score\n self._encode_background_as_zeros = encode_background_as_zeros\n self._use_direction_classifier = use_direction_classifier\n self._num_input_features = num_input_features\n self._box_coder = target_assigner.box_coder\n self.target_assigner = target_assigner\n self.voxel_generator = voxel_generator\n self._pos_cls_weight = pos_cls_weight\n self._neg_cls_weight = neg_cls_weight\n self._encode_rad_error_by_sin = encode_rad_error_by_sin\n self._loss_norm_type = loss_norm_type\n self._dir_loss_ftor = WeightedSoftmaxClassificationLoss()\n self._diff_loc_loss_ftor = WeightedSmoothL1LocalizationLoss()\n self._dir_offset = dir_offset\n self._loc_loss_ftor = loc_loss_ftor\n self._cls_loss_ftor = cls_loss_ftor\n self._direction_loss_weight = direction_loss_weight\n self._cls_loss_weight = cls_loss_weight\n self._loc_loss_weight = loc_loss_weight\n self._post_center_range = post_center_range or []\n self.measure_time = measure_time\n self._nms_class_agnostic = nms_class_agnostic\n self._num_direction_bins = num_direction_bins\n self._dir_limit_offset = direction_limit_offset\n self.voxel_feature_extractor = voxel_encoder.get_vfe_class(vfe_class_name)(\n num_input_features,\n use_norm,\n num_filters=vfe_num_filters,\n with_distance=with_distance,\n voxel_size=self.voxel_generator.voxel_size,\n pc_range=self.voxel_generator.point_cloud_range,\n )\n self.middle_feature_extractor = middle.get_middle_class(middle_class_name)(\n output_shape,\n use_norm,\n num_input_features=middle_num_input_features,\n num_filters_down1=middle_num_filters_d1,\n num_filters_down2=middle_num_filters_d2)\n # self.feature_extractor = DepConvNet3(5)\n # self.feature_extractor = ConvNet(5)\n\n self.rpn = rpn.get_rpn_class(rpn_class_name)(\n use_norm=True,\n num_class=num_class,\n layer_nums=rpn_layer_nums,\n layer_strides=rpn_layer_strides,\n num_filters=rpn_num_filters,\n upsample_strides=rpn_upsample_strides,\n num_upsample_filters=rpn_num_upsample_filters,\n num_input_features=rpn_num_input_features,\n num_anchor_per_loc=target_assigner.num_anchors_per_location,\n encode_background_as_zeros=encode_background_as_zeros,\n use_direction_classifier=use_direction_classifier,\n use_groupnorm=use_groupnorm,\n num_groups=num_groups,\n box_code_size=target_assigner.box_coder.code_size,\n num_direction_bins=self._num_direction_bins)\n self.rpn_acc = metrics.Accuracy(\n dim=-1, encode_background_as_zeros=encode_background_as_zeros)\n self.rpn_precision = metrics.Precision(dim=-1)\n self.rpn_recall = metrics.Recall(dim=-1)\n self.rpn_metrics = metrics.PrecisionRecall(\n dim=-1,\n thresholds=[0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95],\n use_sigmoid_score=use_sigmoid_score,\n encode_background_as_zeros=encode_background_as_zeros)\n\n self.rpn_cls_loss = metrics.Scalar()\n self.rpn_loc_loss = metrics.Scalar()\n self.rpn_total_loss = metrics.Scalar()\n self.register_buffer(\"global_step\", torch.LongTensor(1).zero_())\n\n self._time_dict = {}\n self._time_total_dict = {}\n self._time_count_dict = {}\n\n def start_timer(self, *names):\n if not self.measure_time:\n return\n torch.cuda.synchronize()\n for name in names:\n self._time_dict[name] = time.time()\n\n def end_timer(self, name):\n if not self.measure_time:\n return\n torch.cuda.synchronize()\n time_elapsed = time.time() - self._time_dict[name]\n if name not in self._time_count_dict:\n self._time_count_dict[name] = 1\n self._time_total_dict[name] = time_elapsed\n else:\n self._time_count_dict[name] += 1\n self._time_total_dict[name] += time_elapsed\n self._time_dict[name] = 0\n\n def clear_timer(self):\n self._time_count_dict.clear()\n self._time_dict.clear()\n self._time_total_dict.clear()\n\n @contextlib.contextmanager\n def profiler(self):\n old_measure_time = self.measure_time\n self.measure_time = True\n yield\n self.measure_time = old_measure_time\n\n def get_avg_time_dict(self):\n ret = {}\n for name, val in self._time_total_dict.items():\n count = self._time_count_dict[name]\n ret[name] = val / max(1, count)\n return ret\n\n def update_global_step(self):\n self.global_step += 1\n\n def get_global_step(self):\n return int(self.global_step.cpu().numpy()[0])\n\n def clear_global_step(self):\n self.global_step.zero_()\n\n def loss(self, example, preds_dict):\n box_preds = preds_dict[\"box_preds\"]\n cls_preds = preds_dict[\"cls_preds\"]\n batch_size_dev = cls_preds.shape[0]\n self.start_timer(\"loss forward\")\n labels = example['labels']\n reg_targets = example['reg_targets']\n importance = example['importance']\n self.start_timer(\"prepare weight forward\")\n cls_weights, reg_weights, cared = prepare_loss_weights(\n labels,\n pos_cls_weight=self._pos_cls_weight,\n neg_cls_weight=self._neg_cls_weight,\n loss_norm_type=self._loss_norm_type,\n dtype=box_preds.dtype)\n\n cls_targets = labels * cared.type_as(labels)\n cls_targets = cls_targets.unsqueeze(-1)\n self.end_timer(\"prepare weight forward\")\n self.start_timer(\"create_loss forward\")\n loc_loss, cls_loss = create_loss(\n self._loc_loss_ftor,\n self._cls_loss_ftor,\n box_preds=box_preds,\n cls_preds=cls_preds,\n cls_targets=cls_targets,\n cls_weights=cls_weights * importance,\n reg_targets=reg_targets,\n reg_weights=reg_weights * importance,\n num_class=self._num_class,\n encode_rad_error_by_sin=self._encode_rad_error_by_sin,\n encode_background_as_zeros=self._encode_background_as_zeros,\n box_code_size=self._box_coder.code_size,\n sin_error_factor=self._sin_error_factor,\n num_direction_bins=self._num_direction_bins,\n )\n loc_loss_reduced = loc_loss.sum() / batch_size_dev\n loc_loss_reduced *= self._loc_loss_weight\n cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)\n cls_pos_loss /= self._pos_cls_weight\n cls_neg_loss /= self._neg_cls_weight\n cls_loss_reduced = cls_loss.sum() / batch_size_dev\n cls_loss_reduced *= self._cls_loss_weight\n loss = loc_loss_reduced + cls_loss_reduced\n self.end_timer(\"create_loss forward\")\n if self._use_direction_classifier:\n dir_targets = get_direction_target(\n example['anchors'],\n reg_targets,\n dir_offset=self._dir_offset,\n num_bins=self._num_direction_bins)\n dir_logits = preds_dict[\"dir_cls_preds\"].view(\n batch_size_dev, -1, self._num_direction_bins)\n weights = (labels > 0).type_as(dir_logits) * importance\n weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)\n dir_loss = self._dir_loss_ftor(\n dir_logits, dir_targets, weights=weights)\n dir_loss = dir_loss.sum() / batch_size_dev\n loss += dir_loss * self._direction_loss_weight\n self.end_timer(\"loss forward\")\n res = {\n \"loss\": loss,\n \"cls_loss\": cls_loss,\n \"loc_loss\": loc_loss,\n \"cls_pos_loss\": cls_pos_loss,\n \"cls_neg_loss\": cls_neg_loss,\n \"cls_preds\": cls_preds,\n \"cls_loss_reduced\": cls_loss_reduced,\n \"loc_loss_reduced\": loc_loss_reduced,\n \"cared\": cared,\n }\n if self._use_direction_classifier:\n res[\"dir_loss_reduced\"] = dir_loss\n return res\n\n # def network_forward(self, feature, batch_size):\n def network_forward(self, voxels, num_points, coors, batch_size):\n \"\"\"this function is used for subclass.\n you can add custom network architecture by subclass VoxelNet class\n and override this function.\n Returns:\n preds_dict: {\n box_preds: ...\n cls_preds: ...\n dir_cls_preds: ...\n }\n \"\"\"\n self.start_timer(\"voxel_feature_extractor\")\n voxel_features = self.voxel_feature_extractor(voxels, num_points,\n coors)\n self.end_timer(\"voxel_feature_extractor\")\n self.start_timer(\"middle forward\")\n spatial_features = self.middle_feature_extractor(\n voxel_features, coors, batch_size)\n # spatial_features = self.feature_extractor(feature)\n self.end_timer(\"middle forward\")\n self.start_timer(\"rpn forward\")\n preds_dict = self.rpn(spatial_features)\n self.end_timer(\"rpn forward\")\n return preds_dict\n\n def forward(self, example):\n \"\"\"module's forward should always accept dict and return loss.\n \"\"\"\n voxels = example[\"voxels\"]\n num_points = example[\"num_points\"]\n coors = example[\"coordinates\"]\n # feature = example[\"feature\"]\n # feature = torch.tensor(feature, device=\"cuda\", dtype=torch.float32)\n if len(num_points.shape) == 2: # multi-gpu\n num_voxel_per_batch = example[\"num_voxels\"].cpu().numpy().reshape(\n -1)\n voxel_list = []\n num_points_list = []\n coors_list = []\n for i, num_voxel in enumerate(num_voxel_per_batch):\n voxel_list.append(voxels[i, :num_voxel])\n num_points_list.append(num_points[i, :num_voxel])\n coors_list.append(coors[i, :num_voxel])\n voxels = torch.cat(voxel_list, dim=0)\n num_points = torch.cat(num_points_list, dim=0)\n coors = torch.cat(coors_list, dim=0)\n coors[:,0] -= coors[:,0].min()\n # print(\"voxels shape = \", voxels.shape)\n # print(\"coors shape = \", coors.shape)\n # print(\"num_points shape = \", num_points.shape)\n batch_anchors = example[\"anchors\"]\n # print(\"batch anchor shpae\", batch_anchors.shape)\n batch_size_dev = batch_anchors.shape[0]\n # features: [num_voxels, max_num_points_per_voxel, 7]\n # num_points: [num_voxels]\n # coors: [num_voxels, 4]\n preds_dict = self.network_forward(voxels, num_points, coors, batch_size_dev)\n # preds_dict = self.network_forward(feature, batch_size_dev)\n\n # need to check size.\n box_preds = preds_dict[\"box_preds\"].view(batch_size_dev, -1, self._box_coder.code_size)\n err_msg = f\"num_anchors={batch_anchors.shape[1]}, but num_output={box_preds.shape[1]}. please check size\"\n assert batch_anchors.shape[1] == box_preds.shape[1], err_msg\n if self.training:\n return self.loss(example, preds_dict)\n else:\n self.start_timer(\"predict\")\n with torch.no_grad():\n res = self.predict(example, preds_dict)\n self.end_timer(\"predict\")\n return res\n\n def predict(self, example, preds_dict):\n \"\"\"start with v1.6.0, this function don't contain any kitti-specific code.\n Returns:\n predict: list of pred_dict.\n pred_dict: {\n box3d_lidar: [N, 7] 3d box.\n scores: [N]\n label_preds: [N]\n metadata: meta-data which contains dataset-specific information.\n for kitti, it contains image idx (label idx),\n for nuscenes, sample_token is saved in it.\n }\n \"\"\"\n batch_size = example['anchors'].shape[0]\n if \"metadata\" not in example or len(example[\"metadata\"]) == 0:\n meta_list = [None] * batch_size\n else:\n meta_list = example[\"metadata\"]\n batch_anchors = example[\"anchors\"].view(batch_size, -1,\n example[\"anchors\"].shape[-1])\n if \"anchors_mask\" not in example:\n batch_anchors_mask = [None] * batch_size\n else:\n batch_anchors_mask = example[\"anchors_mask\"].view(batch_size, -1)\n\n t = time.time()\n batch_box_preds = preds_dict[\"box_preds\"]\n batch_cls_preds = preds_dict[\"cls_preds\"]\n batch_box_preds = batch_box_preds.view(batch_size, -1,\n self._box_coder.code_size)\n num_class_with_bg = self._num_class\n if not self._encode_background_as_zeros:\n num_class_with_bg = self._num_class + 1\n\n batch_cls_preds = batch_cls_preds.view(batch_size, -1,\n num_class_with_bg)\n batch_box_preds = self._box_coder.decode_torch(batch_box_preds,\n batch_anchors)\n if self._use_direction_classifier:\n batch_dir_preds = preds_dict[\"dir_cls_preds\"]\n batch_dir_preds = batch_dir_preds.view(batch_size, -1,\n self._num_direction_bins)\n else:\n batch_dir_preds = [None] * batch_size\n\n predictions_dicts = []\n post_center_range = None\n if len(self._post_center_range) > 0:\n post_center_range = torch.tensor(\n self._post_center_range,\n dtype=batch_box_preds.dtype,\n device=batch_box_preds.device).float()\n for box_preds, cls_preds, dir_preds, a_mask, meta in zip(\n batch_box_preds, batch_cls_preds, batch_dir_preds,\n batch_anchors_mask, meta_list):\n if a_mask is not None:\n box_preds = box_preds[a_mask]\n cls_preds = cls_preds[a_mask]\n box_preds = box_preds.float()\n cls_preds = cls_preds.float()\n if self._use_direction_classifier:\n if a_mask is not None:\n dir_preds = dir_preds[a_mask]\n dir_labels = torch.max(dir_preds, dim=-1)[1]\n if self._encode_background_as_zeros:\n # this don't support softmax\n assert self._use_sigmoid_score is True\n total_scores = torch.sigmoid(cls_preds)\n else:\n # encode background as first element in one-hot vector\n if self._use_sigmoid_score:\n total_scores = torch.sigmoid(cls_preds)[..., 1:]\n else:\n total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]\n # Apply NMS in birdeye view\n if self._use_rotate_nms:\n nms_func = box_torch_ops.rotate_nms\n else:\n nms_func = box_torch_ops.nms\n feature_map_size_prod = batch_box_preds.shape[\n 1] // self.target_assigner.num_anchors_per_location\n if self._multiclass_nms:\n assert self._encode_background_as_zeros is True\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(\n boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],\n boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(\n box_preds_corners)\n\n selected_boxes, selected_labels, selected_scores = [], [], []\n selected_dir_labels = []\n\n scores = total_scores\n boxes = boxes_for_nms\n selected_per_class = []\n score_threshs = self._nms_score_thresholds\n pre_max_sizes = self._nms_pre_max_sizes\n post_max_sizes = self._nms_post_max_sizes\n iou_thresholds = self._nms_iou_thresholds\n for class_idx, score_thresh, pre_ms, post_ms, iou_th in zip(\n range(self._num_class),\n score_threshs,\n pre_max_sizes, post_max_sizes, iou_thresholds):\n if self._nms_class_agnostic:\n class_scores = total_scores.view(\n feature_map_size_prod, -1,\n self._num_class)[..., class_idx]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])\n class_boxes = box_preds\n class_dir_labels = dir_labels\n else:\n anchors_range = self.target_assigner.anchors_range(class_idx)\n class_scores = total_scores.view(\n -1,\n self._num_class)[anchors_range[0]:anchors_range[1], class_idx]\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = class_boxes_nms.contiguous().view(\n -1, boxes_for_nms.shape[-1])\n class_boxes = box_preds.view(-1,\n box_preds.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_boxes = class_boxes.contiguous().view(\n -1, box_preds.shape[-1])\n if self._use_direction_classifier:\n class_dir_labels = dir_labels.view(-1)[anchors_range[0]:anchors_range[1]]\n class_dir_labels = class_dir_labels.contiguous(\n ).view(-1)\n if score_thresh > 0.0:\n class_scores_keep = class_scores >= score_thresh\n if class_scores_keep.shape[0] == 0:\n selected_per_class.append(None)\n continue\n class_scores = class_scores[class_scores_keep]\n if class_scores.shape[0] != 0:\n if score_thresh > 0.0:\n class_boxes_nms = class_boxes_nms[\n class_scores_keep]\n class_boxes = class_boxes[class_scores_keep]\n class_dir_labels = class_dir_labels[\n class_scores_keep]\n keep = nms_func(class_boxes_nms, class_scores, pre_ms,\n post_ms, iou_th)\n if keep.shape[0] != 0:\n selected_per_class.append(keep)\n else:\n selected_per_class.append(None)\n else:\n selected_per_class.append(None)\n selected = selected_per_class[-1]\n\n if selected is not None:\n selected_boxes.append(class_boxes[selected])\n selected_labels.append(\n torch.full([class_boxes[selected].shape[0]],\n class_idx,\n dtype=torch.int64,\n device=box_preds.device))\n if self._use_direction_classifier:\n selected_dir_labels.append(\n class_dir_labels[selected])\n selected_scores.append(class_scores[selected])\n selected_boxes = torch.cat(selected_boxes, dim=0)\n selected_labels = torch.cat(selected_labels, dim=0)\n selected_scores = torch.cat(selected_scores, dim=0)\n if self._use_direction_classifier:\n selected_dir_labels = torch.cat(selected_dir_labels, dim=0)\n else:\n # get highest score per prediction, than apply nms\n # to remove overlapped box.\n if num_class_with_bg == 1:\n top_scores = total_scores.squeeze(-1)\n top_labels = torch.zeros(\n total_scores.shape[0],\n device=total_scores.device,\n dtype=torch.long)\n else:\n top_scores, top_labels = torch.max(\n total_scores, dim=-1)\n if self._nms_score_thresholds[0] > 0.0:\n top_scores_keep = top_scores >= self._nms_score_thresholds[0]\n top_scores = top_scores.masked_select(top_scores_keep)\n\n if top_scores.shape[0] != 0:\n if self._nms_score_thresholds[0] > 0.0:\n box_preds = box_preds[top_scores_keep]\n if self._use_direction_classifier:\n dir_labels = dir_labels[top_scores_keep]\n top_labels = top_labels[top_scores_keep]\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(\n boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],\n boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(\n box_preds_corners)\n # the nms in 3d detection just remove overlap boxes.\n selected = nms_func(\n boxes_for_nms,\n top_scores,\n pre_max_size=self._nms_pre_max_sizes[0],\n post_max_size=self._nms_post_max_sizes[0],\n iou_threshold=self._nms_iou_thresholds[0],\n )\n else:\n selected = []\n # if selected is not None:\n selected_boxes = box_preds[selected]\n if self._use_direction_classifier:\n selected_dir_labels = dir_labels[selected]\n selected_labels = top_labels[selected]\n selected_scores = top_scores[selected]\n # finally generate predictions.\n if selected_boxes.shape[0] != 0:\n box_preds = selected_boxes\n scores = selected_scores\n label_preds = selected_labels\n if self._use_direction_classifier:\n dir_labels = selected_dir_labels\n period = (2 * np.pi / self._num_direction_bins)\n dir_rot = box_torch_ops.limit_period(\n box_preds[..., 6] - self._dir_offset,\n self._dir_limit_offset, period)\n box_preds[\n ...,\n 6] = dir_rot + self._dir_offset + period * dir_labels.to(\n box_preds.dtype)\n final_box_preds = box_preds\n final_scores = scores\n final_labels = label_preds\n if post_center_range is not None:\n mask = (final_box_preds[:, :3] >=\n post_center_range[:3]).all(1)\n mask &= (final_box_preds[:, :3] <=\n post_center_range[3:]).all(1)\n predictions_dict = {\n \"box3d_lidar\": final_box_preds[mask],\n \"scores\": final_scores[mask],\n \"label_preds\": label_preds[mask],\n \"metadata\": meta,\n }\n else:\n predictions_dict = {\n \"box3d_lidar\": final_box_preds,\n \"scores\": final_scores,\n \"label_preds\": label_preds,\n \"metadata\": meta,\n }\n else:\n dtype = batch_box_preds.dtype\n device = batch_box_preds.device\n predictions_dict = {\n \"box3d_lidar\":\n torch.zeros([0, box_preds.shape[-1]],\n dtype=dtype,\n device=device),\n \"scores\":\n torch.zeros([0], dtype=dtype, device=device),\n \"label_preds\":\n torch.zeros([0], dtype=top_labels.dtype, device=device),\n \"metadata\":\n meta,\n }\n predictions_dicts.append(predictions_dict)\n return predictions_dicts\n\n def metrics_to_float(self):\n self.rpn_acc.float()\n self.rpn_metrics.float()\n self.rpn_cls_loss.float()\n self.rpn_loc_loss.float()\n self.rpn_total_loss.float()\n\n def update_metrics(self, cls_loss, loc_loss, cls_preds, labels, sampled):\n batch_size = cls_preds.shape[0]\n num_class = self._num_class\n if not self._encode_background_as_zeros:\n num_class += 1\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n rpn_acc = self.rpn_acc(labels, cls_preds, sampled).numpy()[0]\n prec, recall = self.rpn_metrics(labels, cls_preds, sampled)\n prec = prec.numpy()\n recall = recall.numpy()\n rpn_cls_loss = self.rpn_cls_loss(cls_loss).numpy()[0]\n rpn_loc_loss = self.rpn_loc_loss(loc_loss).numpy()[0]\n ret = {\n \"loss\": {\n \"cls_loss\": float(rpn_cls_loss),\n \"cls_loss_rt\": float(cls_loss.data.cpu().numpy()),\n 'loc_loss': float(rpn_loc_loss),\n \"loc_loss_rt\": float(loc_loss.data.cpu().numpy()),\n },\n \"rpn_acc\": float(rpn_acc),\n \"pr\": {},\n }\n for i, thresh in enumerate(self.rpn_metrics.thresholds):\n ret[\"pr\"][f\"prec@{int(thresh*100)}\"] = float(prec[i])\n ret[\"pr\"][f\"rec@{int(thresh*100)}\"] = float(recall[i])\n return ret\n\n def clear_metrics(self):\n self.rpn_acc.clear()\n self.rpn_metrics.clear()\n self.rpn_cls_loss.clear()\n self.rpn_loc_loss.clear()\n self.rpn_total_loss.clear()\n\n @staticmethod\n def convert_norm_to_float(net):\n '''\n BatchNorm layers to have parameters in single precision.\n Find all layers and convert them back to float. This can't\n be done with built in .apply as that function will apply\n fn to all modules, parameters, and buffers. Thus we wouldn't\n be able to guard the float conversion based on the module type.\n '''\n if isinstance(net, torch.nn.modules.batchnorm._BatchNorm):\n net.float()\n for child in net.children():\n VoxelNet.convert_norm_to_float(child)\n return net\n\n\ndef add_sin_difference(boxes1, boxes2, boxes1_rot, boxes2_rot, factor=1.0):\n if factor != 1.0:\n boxes1_rot = factor * boxes1_rot\n boxes2_rot = factor * boxes2_rot\n rad_pred_encoding = torch.sin(boxes1_rot) * torch.cos(boxes2_rot)\n rad_tg_encoding = torch.cos(boxes1_rot) * torch.sin(boxes2_rot)\n boxes1 = torch.cat([boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]],\n dim=-1)\n boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],\n dim=-1)\n return boxes1, boxes2\n\n\ndef create_loss(loc_loss_ftor,\n cls_loss_ftor,\n box_preds,\n cls_preds,\n cls_targets,\n cls_weights,\n reg_targets,\n reg_weights,\n num_class,\n encode_background_as_zeros=True,\n encode_rad_error_by_sin=True,\n sin_error_factor=1.0,\n box_code_size=7,\n num_direction_bins=2):\n batch_size = int(box_preds.shape[0])\n box_preds = box_preds.view(batch_size, -1, box_code_size)\n if encode_background_as_zeros:\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n else:\n cls_preds = cls_preds.view(batch_size, -1, num_class + 1)\n cls_targets = cls_targets.squeeze(-1)\n one_hot_targets = torchplus.nn.one_hot(\n cls_targets, depth=num_class + 1, dtype=box_preds.dtype)\n if encode_background_as_zeros:\n one_hot_targets = one_hot_targets[..., 1:]\n if encode_rad_error_by_sin:\n # sin(a - b) = sinacosb-cosasinb\n # reg_tg_rot = box_torch_ops.limit_period(\n # reg_targets[..., 6:7], 0.5, 2 * np.pi / num_direction_bins)\n box_preds, reg_targets = add_sin_difference(box_preds, reg_targets,\n box_preds[..., 6:7], reg_targets[..., 6:7], sin_error_factor)\n\n loc_losses = loc_loss_ftor(\n box_preds, reg_targets, weights=reg_weights) # [N, M]\n cls_losses = cls_loss_ftor(\n cls_preds, one_hot_targets, weights=cls_weights) # [N, M]\n return loc_losses, cls_losses\n\n\ndef prepare_loss_weights(labels,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n dtype=torch.float32):\n \"\"\"get cls_weights and reg_weights from labels.\n \"\"\"\n cared = labels >= 0\n # cared: [N, num_anchors]\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.type(dtype) * neg_cls_weight\n cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)\n reg_weights = positives.type(dtype)\n if loss_norm_type == LossNormType.NormByNumExamples:\n num_examples = cared.type(dtype).sum(1, keepdim=True)\n num_examples = torch.clamp(num_examples, min=1.0)\n cls_weights /= num_examples\n bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(bbox_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n cls_weights /= torch.clamp(pos_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPosNeg:\n pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)\n normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]\n cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]\n cls_normalizer = torch.clamp(cls_normalizer, min=1.0)\n # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg\n normalizer = torch.clamp(normalizer, min=1.0)\n reg_weights /= normalizer[:, 0:1, 0]\n cls_weights /= cls_normalizer\n elif loss_norm_type == LossNormType.DontNorm: # support ghm loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n else:\n raise ValueError(\n f\"unknown loss norm type. available: {list(LossNormType)}\")\n return cls_weights, reg_weights, cared\n\n\ndef assign_weight_to_each_class(labels,\n weight_per_class,\n norm_by_num=True,\n dtype=torch.float32):\n weights = torch.zeros(labels.shape, dtype=dtype, device=labels.device)\n for label, weight in weight_per_class:\n positives = (labels == label).type(dtype)\n weight_class = weight * positives\n if norm_by_num:\n normalizer = positives.sum()\n normalizer = torch.clamp(normalizer, min=1.0)\n weight_class /= normalizer\n weights += weight_class\n return weights\n\n\ndef get_direction_target(anchors,\n reg_targets,\n one_hot=True,\n dir_offset=0,\n num_bins=2):\n batch_size = reg_targets.shape[0]\n anchors = anchors.view(batch_size, -1, anchors.shape[-1])\n rot_gt = reg_targets[..., 6] + anchors[..., 6]\n offset_rot = box_torch_ops.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)\n dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()\n dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)\n if one_hot:\n dir_cls_targets = torchplus.nn.one_hot(\n dir_cls_targets, num_bins, dtype=anchors.dtype)\n return dir_cls_targets\n"
] | [
[
"torch.stack",
"torch.cos",
"torch.nn.functional.softmax",
"torch.no_grad",
"torch.cuda.synchronize",
"torch.tensor",
"torch.full",
"torch.sin",
"torch.max",
"torch.zeros",
"torch.sigmoid",
"torch.LongTensor",
"torch.cat",
"torch.clamp",
"torch.floor"
]
] |
admariner/NeMo | [
"e542d7f9063a40afa4119a3b94de4c2c636a37bb"
] | [
"nemo/collections/asr/parts/utils/vad_utils.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport json\nimport math\nimport multiprocessing\nimport os\nimport shutil\nfrom itertools import repeat\nfrom typing import Dict, Tuple\n\nimport IPython.display as ipd\nimport librosa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom pyannote.core import Annotation, Segment\nfrom pyannote.metrics import detection\nfrom sklearn.model_selection import ParameterGrid\n\nfrom nemo.collections.asr.models import EncDecClassificationModel\nfrom nemo.utils import logging\n\ntry:\n from torch.cuda.amp import autocast\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def autocast(enabled=None):\n yield\n\n\n\"\"\"\nThis file contains all the utility functions required for voice activity detection. \n\"\"\"\n\n\ndef prepare_manifest(config: dict) -> str:\n \"\"\"\n Perform VAD on long audio snippet might cause CUDA out of memory issue. \n Automatically split manifest entry by split_duration to avoid the potential memory issue.\n \"\"\"\n if 'prepared_manifest_vad_input' in config and config['prepared_manifest_vad_input']:\n manifest_vad_input = config['prepared_manifest_vad_input']\n else:\n manifest_vad_input = \"manifest_vad_input.json\"\n\n # input_list is a list of variable ['audio_filepath': i, \"offset\": xxx, \"duration\": xxx])\n if type(config['input']) == str:\n input_list = []\n with open(config['input'], 'r', encoding='utf-8') as manifest:\n for line in manifest.readlines():\n input_list.append(json.loads(line.strip()))\n elif type(config['input']) == list:\n input_list = config['input']\n else:\n raise ValueError(\n \"The input for manifest preparation would either be a string of the filepath to manifest or a list of {'audio_filepath': i, 'offset': 0, 'duration': null} \"\n )\n\n args_func = {\n 'label': 'infer',\n 'split_duration': config['split_duration'],\n 'window_length_in_sec': config['window_length_in_sec'],\n }\n\n if config.get('num_workers') is not None and config['num_workers'] > 1:\n p = multiprocessing.Pool(processes=config['num_workers'])\n results = p.starmap(write_vad_infer_manifest, zip(input_list, repeat(args_func)))\n p.close()\n else:\n results = [write_vad_infer_manifest(input_el, args_func) for input_el in input_list]\n\n if os.path.exists(manifest_vad_input):\n logging.info(\"The prepared manifest file exists. Overwriting!\")\n os.remove(manifest_vad_input)\n\n with open(manifest_vad_input, 'a', encoding='utf-8') as fout:\n for res in results:\n for r in res:\n json.dump(r, fout)\n fout.write('\\n')\n fout.flush()\n return manifest_vad_input\n\n\ndef write_vad_infer_manifest(file: dict, args_func: dict) -> list:\n \"\"\"\n Used by prepare_manifest.\n Given a list of files, split them with maximum split_duration and write them to the manifest.\n Args:\n files (dict) : file to be processed\n args_func:\n label (str): label for audio snippet.y\n split_duration (float): max duration of each audio clip (each line in json)\n window_length_in_sec (float) : length of window for generating the frame. Used for taking care of joint. \n Returns:\n res (list) : list of generated metadata line of json for file\n \"\"\"\n res = []\n label = args_func['label']\n split_duration = args_func['split_duration']\n window_length_in_sec = args_func['window_length_in_sec']\n filepath = file['audio_filepath']\n in_duration = file.get('duration', None)\n in_offset = file.get('offset', 0)\n\n try:\n sr = 16000\n x, _sr = librosa.load(filepath, sr=sr, offset=in_offset, duration=in_duration)\n duration = librosa.get_duration(y=x, sr=sr)\n left = duration\n current_offset = in_offset\n\n status = 'single'\n while left > 0:\n if left <= split_duration:\n if status == 'single':\n write_duration = left\n current_offset = 0\n else:\n status = 'end'\n write_duration = left + window_length_in_sec\n current_offset -= window_length_in_sec\n offset_inc = left\n left = 0\n else:\n if status == 'start' or status == 'next':\n status = 'next'\n else:\n status = 'start'\n\n if status == 'start':\n write_duration = split_duration\n offset_inc = split_duration\n else:\n write_duration = split_duration + window_length_in_sec\n current_offset -= window_length_in_sec\n offset_inc = split_duration + window_length_in_sec\n\n left -= split_duration\n\n metadata = {\n 'audio_filepath': filepath,\n 'duration': write_duration,\n 'label': label,\n 'text': '_',\n 'offset': current_offset,\n }\n res.append(metadata)\n\n current_offset += offset_inc\n\n except Exception as e:\n err_file = \"error.log\"\n with open(err_file, 'w', encoding='utf-8') as fout:\n fout.write(filepath + \":\" + str(e))\n return res\n\n\ndef get_vad_stream_status(data: list) -> list:\n \"\"\"\n Generate a list of status for each snippet in manifest. A snippet should be in single, start, next or end status. \n Used for concatenating to full audio file.\n Args:\n data (list): list of filepath of audio snippet\n Returns:\n status (list): list of status of each snippet.\n \"\"\"\n if len(data) == 1:\n return ['single']\n\n status = [None] * len(data)\n for i in range(len(data)):\n if i == 0:\n status[i] = 'start' if data[i] == data[i + 1] else 'single'\n elif i == len(data) - 1:\n status[i] = 'end' if data[i] == data[i - 1] else 'single'\n else:\n if data[i] != data[i - 1] and data[i] == data[i + 1]:\n status[i] = 'start'\n elif data[i] == data[i - 1] and data[i] == data[i + 1]:\n status[i] = 'next'\n elif data[i] == data[i - 1] and data[i] != data[i + 1]:\n status[i] = 'end'\n else:\n status[i] = 'single'\n return status\n\n\ndef load_tensor_from_file(filepath: str) -> Tuple[torch.Tensor, str]:\n \"\"\"\n Load torch.Tensor and the name from file\n \"\"\"\n frame = []\n with open(filepath, \"r\", encoding='utf-8') as f:\n for line in f.readlines():\n frame.append(float(line))\n\n name = filepath.split(\"/\")[-1].rsplit(\".\", 1)[0]\n return torch.tensor(frame), name\n\n\ndef generate_overlap_vad_seq(\n frame_pred_dir: str,\n smoothing_method: str,\n overlap: float,\n window_length_in_sec: float,\n shift_length_in_sec: float,\n num_workers: int,\n out_dir: str = None,\n) -> str:\n \"\"\"\n Generate predictions with overlapping input windows/segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple windows. \n Two common smoothing filters are supported: majority vote (median) and average (mean).\n This function uses multiprocessing to speed up. \n Args:\n frame_pred_dir (str): Directory of frame prediction file to be processed.\n smoothing_method (str): median or mean smoothing filter.\n overlap (float): amounts of overlap of adjacent windows.\n window_length_in_sec (float): length of window for generating the frame.\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n out_dir (str): directory of generated predictions.\n num_workers(float): number of process for multiprocessing\n Returns:\n overlap_out_dir(str): directory of the generated predictions.\n \"\"\"\n\n frame_filepathlist = glob.glob(frame_pred_dir + \"/*.frame\")\n if out_dir:\n overlap_out_dir = out_dir\n else:\n overlap_out_dir = frame_pred_dir + \"/overlap_smoothing_output\" + \"_\" + smoothing_method + \"_\" + str(overlap)\n\n if not os.path.exists(overlap_out_dir):\n os.mkdir(overlap_out_dir)\n\n per_args = {\n \"overlap\": overlap,\n \"window_length_in_sec\": window_length_in_sec,\n \"shift_length_in_sec\": shift_length_in_sec,\n \"out_dir\": overlap_out_dir,\n \"smoothing_method\": smoothing_method,\n }\n if num_workers is not None and num_workers > 1:\n p = multiprocessing.Pool(processes=num_workers)\n p.starmap(generate_overlap_vad_seq_per_file, zip(frame_filepathlist, repeat(per_args)))\n p.close()\n p.join()\n else:\n for frame_filepath in frame_filepathlist:\n generate_overlap_vad_seq_per_file(frame_filepath, per_args)\n\n return overlap_out_dir\n\n\[email protected]\ndef generate_overlap_vad_seq_per_tensor(\n frame: torch.Tensor, per_args: Dict[str, float], smoothing_method: str\n) -> torch.Tensor:\n \"\"\"\n Use generated frame prediction (generated by shifting window of shift_length_in_sec (10ms)) to generate prediction with overlapping input window/segments\n See description in generate_overlap_vad_seq.\n Use this for single instance pipeline. \n \"\"\"\n # This function will be refactor for vectorization but this is okay for now\n\n overlap = per_args['overlap']\n window_length_in_sec = per_args['window_length_in_sec']\n shift_length_in_sec = per_args['shift_length_in_sec']\n frame_len = per_args.get('frame_len', 0.01)\n\n shift = int(shift_length_in_sec / frame_len) # number of units of shift\n seg = int((window_length_in_sec / frame_len + 1)) # number of units of each window/segment\n\n jump_on_target = int(seg * (1 - overlap)) # jump on target generated sequence\n jump_on_frame = int(jump_on_target / shift) # jump on input frame sequence\n\n if jump_on_frame < 1:\n raise ValueError(\n f\"Note we jump over frame sequence to generate overlapping input segments. \\n \\\n Your input makes jump_on_frame={jump_on_frame} < 1 which is invalid because it cannot jump and will stuck.\\n \\\n Please try different window_length_in_sec, shift_length_in_sec and overlap choices. \\n \\\n jump_on_target = int(seg * (1 - overlap)) \\n \\\n jump_on_frame = int(jump_on_frame/shift) \"\n )\n\n target_len = int(len(frame) * shift)\n\n if smoothing_method == 'mean':\n preds = torch.zeros(target_len)\n pred_count = torch.zeros(target_len)\n\n for i, og_pred in enumerate(frame):\n if i % jump_on_frame != 0:\n continue\n start = i * shift\n end = start + seg\n preds[start:end] = preds[start:end] + og_pred\n pred_count[start:end] = pred_count[start:end] + 1\n\n preds = preds / pred_count\n last_non_zero_pred = preds[pred_count != 0][-1]\n preds[pred_count == 0] = last_non_zero_pred\n\n elif smoothing_method == 'median':\n preds = [torch.empty(0) for _ in range(target_len)]\n for i, og_pred in enumerate(frame):\n if i % jump_on_frame != 0:\n continue\n\n start = i * shift\n end = start + seg\n for j in range(start, end):\n if j <= target_len - 1:\n preds[j] = torch.cat((preds[j], og_pred.unsqueeze(0)), 0)\n\n preds = torch.stack([torch.nanquantile(l, q=0.5) for l in preds])\n nan_idx = torch.isnan(preds)\n last_non_nan_pred = preds[~nan_idx][-1]\n preds[nan_idx] = last_non_nan_pred\n\n else:\n raise ValueError(\"smoothing_method should be either mean or median\")\n\n return preds\n\n\ndef generate_overlap_vad_seq_per_file(frame_filepath: str, per_args: dict) -> str:\n \"\"\"\n A wrapper for generate_overlap_vad_seq_per_tensor.\n \"\"\"\n\n out_dir = per_args['out_dir']\n smoothing_method = per_args['smoothing_method']\n frame, name = load_tensor_from_file(frame_filepath)\n\n per_args_float: Dict[str, float] = {}\n for i in per_args:\n if type(per_args[i]) == float or type(per_args[i]) == int:\n per_args_float[i] = per_args[i]\n\n preds = generate_overlap_vad_seq_per_tensor(frame, per_args_float, smoothing_method)\n\n overlap_filepath = os.path.join(out_dir, name + \".\" + smoothing_method)\n with open(overlap_filepath, \"w\", encoding='utf-8') as f:\n for pred in preds:\n f.write(f\"{pred:.4f}\\n\")\n\n return overlap_filepath\n\n\[email protected]\ndef merge_overlap_segment(segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Merged the given overlapped segments.\n For example:\n torch.Tensor([[0, 1.5], [1, 3.5]]) -> torch.Tensor([0, 3.5])\n \"\"\"\n if (\n segments.shape == torch.Size([0])\n or segments.shape == torch.Size([0, 2])\n or segments.shape == torch.Size([1, 2])\n ):\n return segments\n\n segments = segments[segments[:, 0].sort()[1]]\n merge_boundary = segments[:-1, 1] >= segments[1:, 0]\n head_padded = torch.nn.functional.pad(merge_boundary, [1, 0], mode='constant', value=0.0)\n head = segments[~head_padded, 0]\n tail_padded = torch.nn.functional.pad(merge_boundary, [0, 1], mode='constant', value=0.0)\n tail = segments[~tail_padded, 1]\n merged = torch.stack((head, tail), dim=1)\n return merged\n\n\[email protected]\ndef filter_short_segments(segments: torch.Tensor, threshold: float) -> torch.Tensor:\n \"\"\"\n Remove segments which duration is smaller than a threshold.\n For example,\n torch.Tensor([[0, 1.5], [1, 3.5], [4, 7]]) and threshold = 2.0\n -> \n torch.Tensor([[1, 3.5], [4, 7]])\n \"\"\"\n return segments[segments[:, 1] - segments[:, 0] >= threshold]\n\n\ndef percentile(data: torch.Tensor, perc: int) -> float:\n \"\"\"\n Calculate percentile given data\n \"\"\"\n size = len(data)\n return float(sorted(data)[int(math.ceil((size * perc) / 100)) - 1])\n\n\ndef cal_vad_onset_offset(\n scale: str, onset: float, offset: float, sequence: torch.Tensor = None\n) -> Tuple[float, float]:\n \"\"\"\n Calculate onset and offset threshold given different scale.\n \"\"\"\n if scale == \"absolute\":\n mini = 0\n maxi = 1\n elif scale == \"relative\":\n mini = min(sequence)\n maxi = max(sequence)\n elif scale == \"percentile\":\n mini = percentile(sequence, 1)\n maxi = percentile(sequence, 99)\n\n onset = mini + onset * (maxi - mini)\n offset = mini + offset * (maxi - mini)\n return float(onset), float(offset)\n\n\[email protected]\ndef binarization(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n \"\"\"\n Binarize predictions to speech and non-speech\n\n Reference\n Paper: Gregory Gelly and Jean-Luc Gauvain. \"Minimum Word Error Training of RNN-based Voice Activity Detection\", InterSpeech 2015. \n Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py \n\n Args:\n sequence (torch.Tensor) : A tensor of frame level predictions.\n per_args:\n onset (float): onset threshold for detecting the beginning and end of a speech \n offset (float): offset threshold for detecting the end of a speech. \n pad_onset (float): adding durations before each speech segment\n pad_offset (float): adding durations after each speech segment;\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n \n Returns:\n speech_segments(torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n \"\"\"\n shift_length_in_sec = per_args.get('shift_length_in_sec', 0.01)\n\n onset = per_args.get('onset', 0.5)\n offset = per_args.get('offset', 0.5)\n pad_onset = per_args.get('pad_onset', 0.0)\n pad_offset = per_args.get('pad_offset', 0.0)\n\n speech = False\n start = 0.0\n i = 0\n\n speech_segments = torch.empty(0)\n\n for i in range(1, len(sequence)):\n # Current frame is speech\n if speech:\n # Switch from speech to non-speech\n if sequence[i] < offset:\n if i * shift_length_in_sec + pad_offset > max(0, start - pad_onset):\n new_seg = torch.tensor(\n [max(0, start - pad_onset), i * shift_length_in_sec + pad_offset]\n ).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n\n start = i * shift_length_in_sec\n speech = False\n\n # Current frame is non-speech\n else:\n # Switch from non-speech to speech\n if sequence[i] > onset:\n start = i * shift_length_in_sec\n speech = True\n\n # if it's speech at the end, add final segment\n if speech:\n new_seg = torch.tensor([max(0, start - pad_onset), i * shift_length_in_sec + pad_offset]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n\n # Merge the overlapped speech segments due to padding\n speech_segments = merge_overlap_segment(speech_segments) # not sorted\n return speech_segments\n\n\[email protected]\ndef remove_segments(original_segments: torch.Tensor, to_be_removed_segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Remove speech segments list in to_be_removed_segments from original_segments.\n For example, \n remove torch.Tensor([[start2, end2],[start4, end4]]) from torch.Tensor([[start1, end1],[start2, end2],[start3, end3], [start4, end4]]),\n -> \n torch.Tensor([[start1, end1],[start3, end3]])\n \"\"\"\n for y in to_be_removed_segments:\n original_segments = original_segments[original_segments.eq(y).all(dim=1).logical_not()]\n return original_segments\n\n\[email protected]\ndef get_gap_segments(segments: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Get the gap segments. \n For example,\n torch.Tensor([[start1, end1], [start2, end2], [start3, end3]]) -> torch.Tensor([[end1, start2], [end2, start3]])\n \"\"\"\n segments = segments[segments[:, 0].sort()[1]]\n return torch.column_stack((segments[:-1, 1], segments[1:, 0]))\n\n\[email protected]\ndef filtering(speech_segments: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n\n \"\"\"\n Filter out short non_speech and speech segments.\n\n Reference\n Paper: Gregory Gelly and Jean-Luc Gauvain. \"Minimum Word Error Training of RNN-based Voice Activity Detection\", InterSpeech 2015. \n Implementation: https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/utils/signal.py \n Args:\n speech_segments (torch.Tensor): A tensor of speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n per_args:\n min_duration_on (float): threshold for small non_speech deletion\n min_duration_off (float): threshold for short speech segment deletion\n filter_speech_first (float): Whether to perform short speech segment deletion first. Use 1.0 to represent True. \n\n Returns:\n speech_segments(torch.Tensor): A tensor of filtered speech segment in torch.Tensor([[start1, end1], [start2, end2]]) format. \n \"\"\"\n if speech_segments.shape == torch.Size([0]):\n return speech_segments\n\n min_duration_on = per_args.get('min_duration_on', 0.0)\n min_duration_off = per_args.get('min_duration_off', 0.0)\n filter_speech_first = per_args.get('filter_speech_first', 1.0)\n\n if filter_speech_first == 1.0:\n # Filter out the shorter speech segments\n if min_duration_on > 0.0:\n speech_segments = filter_short_segments(speech_segments, min_duration_on)\n # Filter out the shorter non-speech segments and return to be as speech segments\n if min_duration_off > 0.0:\n # Find non-speech segments\n non_speech_segments = get_gap_segments(speech_segments)\n # Find shorter non-speech segments\n short_non_speech_segments = remove_segments(\n non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)\n )\n # Return shorter non-speech segments to be as speech segments\n speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)\n\n # Merge the overlapped speech segments\n speech_segments = merge_overlap_segment(speech_segments)\n else:\n if min_duration_off > 0.0:\n # Find non-speech segments\n non_speech_segments = get_gap_segments(speech_segments)\n # Find shorter non-speech segments\n short_non_speech_segments = remove_segments(\n non_speech_segments, filter_short_segments(non_speech_segments, min_duration_off)\n )\n\n speech_segments = torch.cat((speech_segments, short_non_speech_segments), 0)\n\n # Merge the overlapped speech segments\n speech_segments = merge_overlap_segment(speech_segments)\n if min_duration_on > 0.0:\n speech_segments = filter_short_segments(speech_segments, min_duration_on)\n\n return speech_segments\n\n\ndef prepare_gen_segment_table(sequence: torch.Tensor, per_args: dict) -> Tuple[str, dict]:\n \"\"\"\n Preparing for generating segment table. \n \"\"\"\n out_dir = per_args.get('out_dir', None)\n\n # calculate onset offset based on scale selection\n per_args['onset'], per_args['offset'] = cal_vad_onset_offset(\n per_args.get('scale', 'absolute'), per_args['onset'], per_args['offset'], sequence\n )\n\n # cast 'filter_speech_first' for torch.jit.script\n if 'filter_speech_first' in per_args:\n if per_args['filter_speech_first']:\n per_args['filter_speech_first'] = 1.0\n else:\n per_args['filter_speech_first'] = 0.0\n\n per_args_float: Dict[str, float] = {}\n for i in per_args:\n if type(per_args[i]) == float or type(per_args[i]) == int:\n per_args_float[i] = per_args[i]\n\n return out_dir, per_args_float\n\n\[email protected]\ndef generate_vad_segment_table_per_tensor(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:\n \"\"\"\n See description in generate_overlap_vad_seq.\n Use this for single instance pipeline. \n \"\"\"\n\n shift_length_in_sec = per_args['shift_length_in_sec']\n speech_segments = binarization(sequence, per_args)\n speech_segments = filtering(speech_segments, per_args)\n\n if speech_segments.shape == torch.Size([0]):\n return speech_segments\n\n speech_segments, _ = torch.sort(speech_segments, 0)\n\n dur = speech_segments[:, 1:2] - speech_segments[:, 0:1] + shift_length_in_sec\n speech_segments = torch.column_stack((speech_segments, dur))\n\n return speech_segments\n\n\ndef generate_vad_segment_table_per_file(pred_filepath: str, per_args: dict) -> str:\n \"\"\"\n A wrapper for generate_vad_segment_table_per_tensor\n \"\"\"\n sequence, name = load_tensor_from_file(pred_filepath)\n out_dir, per_args_float = prepare_gen_segment_table(sequence, per_args)\n\n preds = generate_vad_segment_table_per_tensor(sequence, per_args_float)\n save_name = name + \".txt\"\n save_path = os.path.join(out_dir, save_name)\n\n if preds.shape == torch.Size([0]):\n with open(save_path, \"w\", encoding='utf-8') as fp:\n fp.write(f\"0 0 speech\\n\")\n\n else:\n with open(save_path, \"w\", encoding='utf-8') as fp:\n for i in preds:\n fp.write(f\"{i[0]:.4f} {i[2]:.4f} speech\\n\")\n\n return save_path\n\n\ndef generate_vad_segment_table(\n vad_pred_dir: str, postprocessing_params: dict, shift_length_in_sec: float, num_workers: int, out_dir: str = None,\n) -> str:\n \"\"\"\n Convert frame level prediction to speech segment in start and end times format.\n And save to csv file in rttm-like format\n 0, 10, speech\n 17,18, speech\n Args:\n vad_pred_dir (str): directory of prediction files to be processed.\n postprocessing_params (dict): dictionary of thresholds for prediction score. See details in binarization and filtering.\n shift_length_in_sec (float): amount of shift of window for generating the frame.\n out_dir (str): output dir of generated table/csv file.\n num_workers(float): number of process for multiprocessing\n Returns:\n table_out_dir(str): directory of the generated table.\n \"\"\"\n\n suffixes = (\"frame\", \"mean\", \"median\")\n vad_pred_filepath_list = [os.path.join(vad_pred_dir, x) for x in os.listdir(vad_pred_dir) if x.endswith(suffixes)]\n\n if out_dir:\n table_out_dir = out_dir\n else:\n table_out_dir_name = \"table_output_tmp_\"\n for key in postprocessing_params:\n table_out_dir_name = table_out_dir_name + str(key) + str(postprocessing_params[key]) + \"_\"\n\n table_out_dir = os.path.join(vad_pred_dir, table_out_dir_name)\n\n if not os.path.exists(table_out_dir):\n os.mkdir(table_out_dir)\n\n per_args = {\n \"shift_length_in_sec\": shift_length_in_sec,\n \"out_dir\": table_out_dir,\n }\n per_args = {**per_args, **postprocessing_params}\n\n if num_workers is not None and num_workers > 1:\n p = multiprocessing.Pool(processes=num_workers)\n p.starmap(generate_vad_segment_table_per_file, zip(vad_pred_filepath_list, repeat(per_args)))\n p.close()\n p.join()\n else:\n for vad_pred_filepath in vad_pred_filepath_list:\n generate_vad_segment_table_per_file(vad_pred_filepath, per_args)\n\n return table_out_dir\n\n\ndef vad_construct_pyannote_object_per_file(\n vad_table_filepath: str, groundtruth_RTTM_file: str\n) -> Tuple[Annotation, Annotation]:\n \"\"\"\n Construct a Pyannote object for evaluation.\n Args:\n vad_table_filepath(str) : path of vad rttm-like table.\n groundtruth_RTTM_file(str): path of groundtruth rttm file.\n Returns:\n reference(pyannote.Annotation): groundtruth\n hypothesis(pyannote.Annotation): prediction\n \"\"\"\n\n pred = pd.read_csv(vad_table_filepath, sep=\" \", header=None)\n label = pd.read_csv(groundtruth_RTTM_file, sep=\" \", delimiter=None, header=None)\n label = label.rename(columns={3: \"start\", 4: \"dur\", 7: \"speaker\"})\n\n # construct reference\n reference = Annotation()\n for index, row in label.iterrows():\n reference[Segment(row['start'], row['start'] + row['dur'])] = row['speaker']\n\n # construct hypothsis\n hypothesis = Annotation()\n for index, row in pred.iterrows():\n hypothesis[Segment(float(row[0]), float(row[0]) + float(row[1]))] = 'Speech'\n return reference, hypothesis\n\n\ndef get_parameter_grid(params: dict) -> list:\n \"\"\"\n Get the parameter grid given a dictionary of parameters.\n \"\"\"\n has_filter_speech_first = False\n if 'filter_speech_first' in params:\n filter_speech_first = params['filter_speech_first']\n has_filter_speech_first = True\n params.pop(\"filter_speech_first\")\n\n params_grid = list(ParameterGrid(params))\n\n if has_filter_speech_first:\n for i in params_grid:\n i['filter_speech_first'] = filter_speech_first\n return params_grid\n\n\ndef vad_tune_threshold_on_dev(\n params: dict,\n vad_pred: str,\n groundtruth_RTTM: str,\n result_file: str = \"res\",\n vad_pred_method: str = \"frame\",\n focus_metric: str = \"DetER\",\n shift_length_in_sec: float = 0.01,\n num_workers: int = 20,\n) -> Tuple[dict, dict]:\n \"\"\"\n Tune thresholds on dev set. Return best thresholds which gives the lowest detection error rate (DetER) in thresholds.\n Args:\n params (dict): dictionary of parameters to be tuned on.\n vad_pred_method (str): suffix of prediction file. Use to locate file. Should be either in \"frame\", \"mean\" or \"median\".\n groundtruth_RTTM_dir (str): directory of ground-truth rttm files or a file contains the paths of them.\n focus_metric (str): metrics we care most when tuning threshold. Should be either in \"DetER\", \"FA\", \"MISS\"\n Returns:\n best_threshold (float): threshold that gives lowest DetER.\n \"\"\"\n min_score = 100\n all_perf = {}\n try:\n check_if_param_valid(params)\n except:\n raise ValueError(\"Please check if the parameters are valid\")\n\n paired_filenames, groundtruth_RTTM_dict, vad_pred_dict = pred_rttm_map(vad_pred, groundtruth_RTTM, vad_pred_method)\n metric = detection.DetectionErrorRate()\n params_grid = get_parameter_grid(params)\n\n for param in params_grid:\n for i in param:\n if type(param[i]) == np.float64 or type(param[i]) == np.int64:\n param[i] = float(param[i])\n try:\n # Generate speech segments by performing binarization on the VAD prediction according to param.\n # Filter speech segments according to param and write the result to rttm-like table.\n vad_table_dir = generate_vad_segment_table(\n vad_pred, param, shift_length_in_sec=shift_length_in_sec, num_workers=num_workers\n )\n # add reference and hypothesis to metrics\n for filename in paired_filenames:\n groundtruth_RTTM_file = groundtruth_RTTM_dict[filename]\n vad_table_filepath = os.path.join(vad_table_dir, filename + \".txt\")\n reference, hypothesis = vad_construct_pyannote_object_per_file(\n vad_table_filepath, groundtruth_RTTM_file\n )\n metric(reference, hypothesis) # accumulation\n\n # delete tmp table files\n shutil.rmtree(vad_table_dir, ignore_errors=True)\n\n report = metric.report(display=False)\n DetER = report.iloc[[-1]][('detection error rate', '%')].item()\n FA = report.iloc[[-1]][('false alarm', '%')].item()\n MISS = report.iloc[[-1]][('miss', '%')].item()\n\n assert (\n focus_metric == \"DetER\" or focus_metric == \"FA\" or focus_metric == \"MISS\"\n ), \"Metric we care most should be only in 'DetER', 'FA' or 'MISS'!\"\n all_perf[str(param)] = {'DetER (%)': DetER, 'FA (%)': FA, 'MISS (%)': MISS}\n logging.info(f\"parameter {param}, {all_perf[str(param)] }\")\n\n score = all_perf[str(param)][focus_metric + ' (%)']\n\n del report\n metric.reset() # reset internal accumulator\n\n # save results for analysis\n with open(result_file + \".txt\", \"a\", encoding='utf-8') as fp:\n fp.write(f\"{param}, {all_perf[str(param)] }\\n\")\n\n if score < min_score:\n best_threshold = param\n optimal_scores = all_perf[str(param)]\n min_score = score\n print(\"Current best\", best_threshold, optimal_scores)\n\n except RuntimeError as e:\n print(f\"Pass {param}, with error {e}\")\n except pd.errors.EmptyDataError as e1:\n print(f\"Pass {param}, with error {e1}\")\n\n return best_threshold, optimal_scores\n\n\ndef check_if_param_valid(params: dict) -> bool:\n \"\"\"\n Check if the parameters are valid.\n \"\"\"\n for i in params:\n if i == \"filter_speech_first\":\n if not type(params[\"filter_speech_first\"]) == bool:\n raise ValueError(\"Invalid inputs! filter_speech_first should be either True or False!\")\n elif i == \"pad_onset\":\n continue\n elif i == \"pad_offset\":\n continue\n else:\n for j in params[i]:\n if not j >= 0:\n raise ValueError(\n \"Invalid inputs! All float parameters except pad_onset and pad_offset should be larger than 0!\"\n )\n\n if not (all(i <= 1 for i in params['onset']) and all(i <= 1 for i in params['offset'])):\n raise ValueError(\"Invalid inputs! The onset and offset thresholds should be in range [0, 1]!\")\n\n return True\n\n\ndef pred_rttm_map(vad_pred: str, groundtruth_RTTM: str, vad_pred_method: str = \"frame\") -> Tuple[set, dict, dict]:\n \"\"\"\n Find paired files in vad_pred and groundtruth_RTTM\n \"\"\"\n groundtruth_RTTM_dict = {}\n if os.path.isfile(groundtruth_RTTM):\n with open(groundtruth_RTTM, \"r\", encoding='utf-8') as fp:\n groundtruth_RTTM_files = fp.read().splitlines()\n elif os.path.isdir(groundtruth_RTTM):\n groundtruth_RTTM_files = glob.glob(os.path.join(groundtruth_RTTM, \"*.rttm\"))\n else:\n raise ValueError(\n \"groundtruth_RTTM should either be a directory contains rttm files or a file contains paths to them!\"\n )\n for f in groundtruth_RTTM_files:\n filename = os.path.basename(f).rsplit(\".\", 1)[0]\n groundtruth_RTTM_dict[filename] = f\n\n vad_pred_dict = {}\n if os.path.isfile(vad_pred):\n with open(vad_pred, \"r\", encoding='utf-8') as fp:\n vad_pred_files = fp.read().splitlines()\n elif os.path.isdir(vad_pred):\n vad_pred_files = glob.glob(os.path.join(vad_pred, \"*.\" + vad_pred_method))\n else:\n raise ValueError(\n \"vad_pred should either be a directory containing vad pred files or a file contains paths to them!\"\n )\n for f in vad_pred_files:\n filename = os.path.basename(f).rsplit(\".\", 1)[0]\n vad_pred_dict[filename] = f\n\n paired_filenames = groundtruth_RTTM_dict.keys() & vad_pred_dict.keys()\n return paired_filenames, groundtruth_RTTM_dict, vad_pred_dict\n\n\ndef plot(\n path2audio_file: str,\n path2_vad_pred: str,\n path2ground_truth_label: str = None,\n offset: float = 0,\n duration: float = None,\n threshold: float = None,\n per_args: dict = None,\n) -> ipd.Audio:\n \"\"\"\n Plot VAD outputs for demonstration in tutorial\n Args:\n path2audio_file (str): path to audio file.\n path2_vad_pred (str): path to vad prediction file,\n path2ground_truth_label(str): path to groundtruth label file.\n threshold (float): threshold for prediction score (from 0 to 1).\n per_args(dict): a dict that stores the thresholds for postprocessing.\n \"\"\"\n plt.figure(figsize=[20, 2])\n FRAME_LEN = 0.01\n\n audio, sample_rate = librosa.load(path=path2audio_file, sr=16000, mono=True, offset=offset, duration=duration)\n dur = librosa.get_duration(y=audio, sr=sample_rate)\n\n time = np.arange(offset, offset + dur, FRAME_LEN)\n frame, _ = load_tensor_from_file(path2_vad_pred)\n frame_snippet = frame[int(offset / FRAME_LEN) : int((offset + dur) / FRAME_LEN)]\n\n len_pred = len(frame_snippet)\n ax1 = plt.subplot()\n ax1.plot(np.arange(audio.size) / sample_rate, audio, 'gray')\n ax1.set_xlim([0, int(dur) + 1])\n ax1.tick_params(axis='y', labelcolor='b')\n ax1.set_ylabel('Signal')\n ax1.set_ylim([-1, 1])\n ax2 = ax1.twinx()\n\n if threshold and per_args:\n raise ValueError(\"threshold and per_args cannot be used at same time!\")\n if not threshold and not per_args:\n raise ValueError(\"One and only one of threshold and per_args must have been used!\")\n\n if threshold:\n pred_snippet = np.where(frame_snippet >= threshold, 1, 0)\n if per_args:\n _, per_args_float = prepare_gen_segment_table(\n frame, per_args\n ) # take whole frame here for calculating onset and offset\n speech_segments = generate_vad_segment_table_per_tensor(frame, per_args_float)\n pred = gen_pred_from_speech_segments(speech_segments, frame)\n pred_snippet = pred[int(offset / FRAME_LEN) : int((offset + dur) / FRAME_LEN)]\n\n if path2ground_truth_label:\n label = extract_labels(path2ground_truth_label, time)\n ax2.plot(np.arange(len_pred) * FRAME_LEN, label, 'r', label='label')\n\n ax2.plot(np.arange(len_pred) * FRAME_LEN, pred_snippet, 'b', label='pred')\n ax2.plot(np.arange(len_pred) * FRAME_LEN, frame_snippet, 'g--', label='speech prob')\n ax2.tick_params(axis='y', labelcolor='r')\n ax2.legend(loc='lower right', shadow=True)\n ax2.set_ylabel('Preds and Probas')\n ax2.set_ylim([-0.1, 1.1])\n return ipd.Audio(audio, rate=16000)\n\n\ndef gen_pred_from_speech_segments(\n speech_segments: torch.Tensor, prob: float, shift_length_in_sec: float = 0.01\n) -> np.array:\n \"\"\"\n Generate prediction arrays like 000111000... from speech segments {[0,1][2,4]} \n \"\"\"\n pred = np.zeros(prob.shape)\n speech_segments = [list(i) for i in speech_segments]\n speech_segments.sort(key=lambda x: x[0])\n\n for seg in speech_segments:\n start = int(seg[0] / shift_length_in_sec)\n end = int(seg[1] / shift_length_in_sec)\n pred[start:end] = 1\n return pred\n\n\ndef extract_labels(path2ground_truth_label: str, time: list) -> list:\n \"\"\"\n Extract ground-truth label for given time period.\n path2ground_truth_label (str): path of groundtruth label file \n time (list) : a list of array representing time period.\n \"\"\"\n\n data = pd.read_csv(path2ground_truth_label, sep=\" \", delimiter=None, header=None)\n data = data.rename(columns={3: \"start\", 4: \"dur\", 7: \"speaker\"})\n labels = []\n for pos in time:\n line = data[(data[\"start\"] <= pos) & (data[\"start\"] + data[\"dur\"] > pos)]\n if len(line) >= 1:\n labels.append(1)\n else:\n labels.append(0)\n return labels\n\n\ndef generate_vad_frame_pred(\n vad_model, window_length_in_sec: float, shift_length_in_sec: float, manifest_vad_input: str, out_dir: str\n) -> str:\n \"\"\"\n Generate VAD frame level prediction and write to out_dir\n \"\"\"\n time_unit = int(window_length_in_sec / shift_length_in_sec)\n trunc = int(time_unit / 2)\n trunc_l = time_unit - trunc\n all_len = 0\n\n data = []\n for line in open(manifest_vad_input, 'r', encoding='utf-8'):\n file = json.loads(line)['audio_filepath'].split(\"/\")[-1]\n data.append(file.split(\".wav\")[0])\n logging.info(f\"Inference on {len(data)} audio files/json lines!\")\n\n status = get_vad_stream_status(data)\n for i, test_batch in enumerate(vad_model.test_dataloader()):\n test_batch = [x.to(vad_model.device) for x in test_batch]\n with autocast():\n log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])\n probs = torch.softmax(log_probs, dim=-1)\n pred = probs[:, 1]\n\n if status[i] == 'start':\n to_save = pred[:-trunc]\n elif status[i] == 'next':\n to_save = pred[trunc:-trunc_l]\n elif status[i] == 'end':\n to_save = pred[trunc_l:]\n else:\n to_save = pred\n\n all_len += len(to_save)\n outpath = os.path.join(out_dir, data[i] + \".frame\")\n with open(outpath, \"a\", encoding='utf-8') as fout:\n for f in range(len(to_save)):\n fout.write('{0:0.4f}\\n'.format(to_save[f]))\n\n del test_batch\n if status[i] == 'end' or status[i] == 'single':\n logging.debug(f\"Overall length of prediction of {data[i]} is {all_len}!\")\n all_len = 0\n return out_dir\n\n\ndef init_vad_model(model_path: str):\n \"\"\"\n Initiate VAD model with model path\n \"\"\"\n if model_path.endswith('.nemo'):\n logging.info(f\"Using local VAD model from {model_path}\")\n vad_model = EncDecClassificationModel.restore_from(restore_path=model_path)\n elif model_path.endswith('.ckpt'):\n vad_model = EncDecClassificationModel.load_from_checkpoint(checkpoint_path=model_path)\n else:\n logging.info(f\"Using NGC cloud VAD model {model_path}\")\n vad_model = EncDecClassificationModel.from_pretrained(model_name=model_path)\n return vad_model\n\n\ndef stitch_segmented_asr_output(\n segmented_output_manifest: str,\n speech_segments_tensor_dir: str = \"speech_segments\",\n stitched_output_manifest: str = \"asr_stitched_output_manifest.json\",\n) -> str:\n \"\"\"\n Stitch the prediction of speech segments.\n \"\"\"\n if not os.path.exists(speech_segments_tensor_dir):\n os.mkdir(speech_segments_tensor_dir)\n\n segmented_output = []\n for line in open(segmented_output_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n segmented_output.append(file)\n\n with open(stitched_output_manifest, 'w', encoding='utf-8') as fout:\n speech_segments = torch.Tensor()\n all_pred_text = \"\"\n if len(segmented_output) > 1:\n for i in range(1, len(segmented_output)):\n start, end = (\n segmented_output[i - 1]['offset'],\n segmented_output[i - 1]['offset'] + segmented_output[i - 1]['duration'],\n )\n new_seg = torch.tensor([start, end]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n pred_text = segmented_output[i - 1]['pred_text']\n all_pred_text += pred_text\n name = segmented_output[i - 1]['audio_filepath'].split(\"/\")[-1].rsplit(\".\", 1)[0]\n\n if segmented_output[i - 1]['audio_filepath'] != segmented_output[i]['audio_filepath']:\n\n speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')\n torch.save(speech_segments, speech_segments_tensor_path)\n meta = {\n 'audio_filepath': segmented_output[i - 1]['audio_filepath'],\n 'speech_segments_filepath': speech_segments_tensor_path,\n 'pred_text': all_pred_text,\n }\n\n json.dump(meta, fout)\n fout.write('\\n')\n fout.flush()\n speech_segments = torch.Tensor()\n all_pred_text = \"\"\n else:\n all_pred_text += \" \"\n else:\n i = -1\n\n start, end = segmented_output[i]['offset'], segmented_output[i]['offset'] + segmented_output[i]['duration']\n new_seg = torch.tensor([start, end]).unsqueeze(0)\n speech_segments = torch.cat((speech_segments, new_seg), 0)\n pred_text = segmented_output[i]['pred_text']\n all_pred_text += pred_text\n name = segmented_output[i]['audio_filepath'].split(\"/\")[-1].rsplit(\".\", 1)[0]\n speech_segments_tensor_path = os.path.join(speech_segments_tensor_dir, name + '.pt')\n torch.save(speech_segments, speech_segments_tensor_path)\n\n meta = {\n 'audio_filepath': segmented_output[i]['audio_filepath'],\n 'speech_segments_filepath': speech_segments_tensor_path,\n 'pred_text': all_pred_text,\n }\n json.dump(meta, fout)\n fout.write('\\n')\n fout.flush()\n\n logging.info(\n f\"Finish stitch segmented ASR output to {stitched_output_manifest}, the speech segments info has been stored in directory {speech_segments_tensor_dir}\"\n )\n return stitched_output_manifest\n\n\ndef construct_manifest_eval(\n input_manifest: str, stitched_output_manifest: str, aligned_vad_asr_output_manifest: str = \"vad_asr_out.json\"\n) -> str:\n\n \"\"\"\n Generate aligned manifest for evaluation.\n Because some pure noise samples might not appear in stitched_output_manifest.\n \"\"\"\n stitched_output = dict()\n for line in open(stitched_output_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n stitched_output[file[\"audio_filepath\"]] = file\n\n out = []\n for line in open(input_manifest, 'r', encoding='utf-8'):\n file = json.loads(line)\n sample = file[\"audio_filepath\"]\n if sample in stitched_output:\n file[\"pred_text\"] = stitched_output[sample][\"pred_text\"]\n file[\"speech_segments_filepath\"] = stitched_output[sample][\"speech_segments_filepath\"]\n else:\n file[\"pred_text\"] = \"\"\n file[\"speech_segments_filepath\"] = \"\"\n\n out.append(file)\n\n with open(aligned_vad_asr_output_manifest, 'w', encoding='utf-8') as fout:\n for i in out:\n json.dump(i, fout)\n fout.write('\\n')\n fout.flush()\n\n return aligned_vad_asr_output_manifest\n"
] | [
[
"torch.empty",
"torch.stack",
"torch.cat",
"torch.softmax",
"matplotlib.pyplot.figure",
"torch.nn.functional.pad",
"torch.save",
"sklearn.model_selection.ParameterGrid",
"numpy.where",
"torch.Tensor",
"torch.sort",
"numpy.zeros",
"pandas.read_csv",
"torch.tensor",
"numpy.arange",
"torch.column_stack",
"torch.isnan",
"torch.Size",
"torch.nanquantile",
"matplotlib.pyplot.subplot",
"torch.cuda.amp.autocast",
"torch.zeros"
]
] |
qwang70/PreSumm | [
"b2c3aee0ada7f5fa8754dffd44355b956fe0d45b"
] | [
"src/train_extractive.py"
] | [
"#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport glob\nimport os\nimport random\nimport signal\nimport time\n\nimport torch\n\nimport distributed\nfrom models import data_loader, model_builder\nfrom models.data_loader import load_dataset\nfrom models.model_builder import ExtSummarizer\nfrom models.trainer_ext import build_trainer\nfrom others.logging import logger, init_logger\n\nimport pdb\n\nmodel_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n\n\ndef train_multi_ext(args):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n procs = []\n for i in range(nb_gpu):\n device_id = i\n procs.append(mp.Process(target=run, args=(args,\n device_id, error_queue,), daemon=True))\n procs[i].start()\n logger.info(\" Starting process pid: %d \" % procs[i].pid)\n error_handler.add_child(procs[i].pid)\n for p in procs:\n p.join()\n\n\ndef run(args, device_id, error_queue):\n \"\"\" run process \"\"\"\n setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != args.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n\n train_single_ext(args, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef validate_ext(args, device_id):\n timestep = 0\n FILE_PATH = 'model_step_*.pt'\n #FILE_PATH = 'bertext_cnndm_transformer*.pt'\n if (args.test_all):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = validate(args, device_id, cp, step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if (i - max_step > 10):\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info('PPL %s' % str(xent_lst))\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n test_ext(args, device_id, cp, step)\n else:\n while (True):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (not os.path.getsize(cp) > 0):\n print(\"will sleep 60\", os.path.getsize(cp))\n time.sleep(60)\n continue\n if (time_of_cp > timestep):\n timestep = time_of_cp\n step = 0\n step = int(cp.split('.')[-2].split('_')[-1])\n validate(args, device_id, cp, step)\n test_ext(args, device_id, cp, step)\n\n cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (time_of_cp > timestep):\n continue\n return\n else:\n print(\"will sleep 300\", cp_files)\n time.sleep(300)\n\n\ndef validate(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = ExtSummarizer(args, device, checkpoint)\n model.eval()\n\n valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),\n args.batch_size, device,\n shuffle=False, is_test=False)\n trainer = build_trainer(args, device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n\ndef test_ext(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = ExtSummarizer(args, device, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, device,\n shuffle=False, is_test=True)\n trainer = build_trainer(args, device_id, model, None)\n trainer.test(test_iter, step)\n\ndef train_ext(args, device_id):\n if (args.world_size > 1):\n train_multi_ext(args)\n else:\n train_single_ext(args, device_id)\n\n\ndef train_single_ext(args, device_id):\n init_logger(args.log_file)\n\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % device_id)\n logger.info('Device %s' % device)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n torch.cuda.manual_seed(args.seed)\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n if args.train_from != '':\n logger.info('Loading checkpoint from %s' % args.train_from)\n checkpoint = torch.load(args.train_from,\n map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n else:\n checkpoint = None\n\n def train_iter_fct():\n return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,\n shuffle=True, is_test=False)\n\n model = ExtSummarizer(args, device, checkpoint)\n optim = model_builder.build_optim(args, model, checkpoint)\n\n logger.info(model)\n\n trainer = build_trainer(args, device_id, model, optim)\n trainer.train(train_iter_fct, args.train_steps)\n"
] | [
[
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.multiprocessing.get_context",
"torch.cuda.set_device"
]
] |
Melimet/DAP2020 | [
"0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a"
] | [
"hy-data-analysis-with-python-spring-2020/part05-e08_bicycle_timeseries/test/test_bicycle_timeseries.py"
] | [
"#!/usr/bin/env python3\n\nimport unittest\nfrom unittest.mock import patch, MagicMock\nimport pandas as pd\nimport numpy as np\n\n\nfrom tmc import points\n\nfrom tmc.utils import load, get_out, patch_helper\n\nmodule_name=\"src.bicycle_timeseries\"\nbicycle_timeseries = load(module_name, \"bicycle_timeseries\")\nmain = load(module_name, \"main\")\nph = patch_helper(module_name)\n\n\n@points('p05-08.1')\nclass BicycleTimeseries(unittest.TestCase):\n\n # @classmethod\n # def setUpClass(cls):\n # cls.df = bicycle_timeseries()\n\n def setUp(self):\n self.df = bicycle_timeseries()\n \n def test_shape(self):\n self.assertEqual(self.df.shape, (37128, 20), msg=\"Incorrect shape!\")\n\n def test_columns(self):\n cols = ['Auroransilta', 'Eteläesplanadi', 'Huopalahti (asema)',\n 'Kaisaniemi/Eläintarhanlahti', 'Kaivokatu', 'Kulosaaren silta et.',\n 'Kulosaaren silta po. ', 'Kuusisaarentie', 'Käpylä, Pohjoisbaana',\n 'Lauttasaaren silta eteläpuoli', 'Merikannontie',\n 'Munkkiniemen silta eteläpuoli', 'Munkkiniemi silta pohjoispuoli',\n 'Heperian puisto/Ooppera', 'Pitkäsilta itäpuoli',\n 'Pitkäsilta länsipuoli', 'Lauttasaaren silta pohjoispuoli',\n 'Ratapihantie', 'Viikintie', 'Baana']\n np.testing.assert_array_equal(self.df.columns, cols, err_msg=\"Incorrect columns!\")\n\n def test_index(self):\n self.assertIsInstance(self.df.index[0], pd.Timestamp,\n msg=\"Expected index to have type timestamp!\")\n self.assertEqual(self.df.index[0], pd.to_datetime(\"2014-1-1 00:00\"),\n msg=\"Incorrect first index!\")\n \n self.assertEqual(self.df.index[1], pd.to_datetime(\"2014-1-1 01:00\"),\n msg=\"Incorrect second index!\")\n\n def test_calls(self):\n with patch(ph(\"bicycle_timeseries\"), wraps=bicycle_timeseries) as pbts,\\\n patch(ph(\"pd.read_csv\"), wraps=pd.read_csv) as prc,\\\n patch(ph(\"pd.to_datetime\"), wraps=pd.to_datetime) as pdatetime:\n main()\n pbts.assert_called_once()\n prc.assert_called_once()\n pdatetime.assert_called()\n \nif __name__ == '__main__':\n unittest.main()\n \n"
] | [
[
"pandas.to_datetime",
"numpy.testing.assert_array_equal"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.