repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
slavslav/tensorflow | [
"e29e704c9c8d68113fc407243b75a09325c86d08"
] | [
"tensorflow/python/distribute/distribute_lib.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library for running a computation across multiple devices.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport enum\nimport threading\nimport weakref\nimport six\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.eager import context as eager_context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.tools.docs import doc_controls\n\n\n# ------------------------------------------------------------------------------\n# Context tracking whether in a strategy.update() or .update_non_slot() call.\n\n\n_update_device = threading.local()\n\n\ndef get_update_device():\n \"\"\"Get the current device if in a `tf.distribute.Strategy.update()` call.\"\"\"\n try:\n return _update_device.current\n except AttributeError:\n return None\n\n\nclass UpdateContext(object):\n \"\"\"Context manager when you are in `update()` or `update_non_slot()`.\"\"\"\n\n def __init__(self, device):\n self._device = device\n self._old_device = None\n\n def __enter__(self):\n self._old_device = get_update_device()\n _update_device.current = self._device\n\n def __exit__(self, exception_type, exception_value, traceback):\n del exception_type, exception_value, traceback\n _update_device.current = self._old_device\n\n\n# ------------------------------------------------------------------------------\n# Public utility functions.\n\n\n@tf_export(v1=[\"distribute.get_loss_reduction\"])\ndef get_loss_reduction():\n \"\"\"DEPRECATED: Now always returns `tf.distribute.ReduceOp.SUM`.\n\n We now always make the complete adjustment when computing the loss, so\n code should always add gradients/losses across replicas, never average.\n \"\"\"\n return reduce_util.ReduceOp.SUM\n\n\n# ------------------------------------------------------------------------------\n# Internal API for validating the current thread mode\n\n\ndef _require_cross_replica_or_default_context_extended(extended):\n \"\"\"Verify in cross-replica context.\"\"\"\n context = _get_per_thread_mode()\n cross_replica = context.cross_replica_context\n if cross_replica is not None and cross_replica.extended is extended:\n return\n if context is _get_default_replica_mode():\n return\n strategy = extended._container_strategy() # pylint: disable=protected-access\n # We have an error to report, figure out the right message.\n if context.strategy is not strategy:\n _wrong_strategy_scope(strategy, context)\n assert cross_replica is None\n raise RuntimeError(\"Method requires being in cross-replica context, use \"\n \"get_replica_context().merge_call()\")\n\n\ndef _wrong_strategy_scope(strategy, context):\n # Figure out the right error message.\n if not distribution_strategy_context.has_strategy():\n raise RuntimeError(\n 'Need to be inside \"with strategy.scope()\" for %s' %\n (strategy,))\n else:\n raise RuntimeError(\n \"Mixing different tf.distribute.Strategy objects: %s is not %s\" %\n (context.strategy, strategy))\n\n\ndef require_replica_context(replica_ctx):\n \"\"\"Verify in `replica_ctx` replica context.\"\"\"\n context = _get_per_thread_mode()\n if context.replica_context is replica_ctx: return\n # We have an error to report, figure out the right message.\n if context.replica_context is None:\n raise RuntimeError(\"Need to be inside `call_for_each_replica()`\")\n if context.strategy is replica_ctx.strategy:\n # Two different ReplicaContexts with the same tf.distribute.Strategy.\n raise RuntimeError(\"Mismatching ReplicaContext.\")\n raise RuntimeError(\n \"Mismatching tf.distribute.Strategy objects: %s is not %s.\" %\n (context.strategy, replica_ctx.strategy))\n\n\ndef _require_strategy_scope_strategy(strategy):\n \"\"\"Verify in a `strategy.scope()` in this thread.\"\"\"\n context = _get_per_thread_mode()\n if context.strategy is strategy: return\n _wrong_strategy_scope(strategy, context)\n\n\ndef _require_strategy_scope_extended(extended):\n \"\"\"Verify in a `distribution_strategy.scope()` in this thread.\"\"\"\n context = _get_per_thread_mode()\n if context.strategy.extended is extended: return\n # Report error.\n strategy = extended._container_strategy() # pylint: disable=protected-access\n _wrong_strategy_scope(strategy, context)\n\n\n# ------------------------------------------------------------------------------\n# Internal context managers used to implement the DistributionStrategy\n# base class\n\n\nclass _CurrentDistributionContext(object):\n \"\"\"Context manager setting the current `tf.distribute.Strategy`.\n\n Also: overrides the variable creator and optionally the current device.\n \"\"\"\n\n def __init__(self,\n strategy,\n var_creator_scope,\n var_scope=None,\n default_device=None):\n self._context = distribution_strategy_context._CrossReplicaThreadMode( # pylint: disable=protected-access\n strategy)\n self._var_creator_scope = var_creator_scope\n self._var_scope = var_scope\n if default_device:\n self._device_scope = ops.device(default_device)\n else:\n self._device_scope = None\n self._same_scope_again_count = 0\n\n def __enter__(self):\n # Allow this scope to be entered if this strategy is already in scope.\n if distribution_strategy_context.has_strategy():\n _require_cross_replica_or_default_context_extended(\n self._context.strategy.extended)\n self._same_scope_again_count += 1\n else:\n _push_per_thread_mode(self._context)\n if self._var_scope:\n self._var_scope.__enter__()\n self._var_creator_scope.__enter__()\n if self._device_scope:\n self._device_scope.__enter__()\n return self._context.strategy\n\n def __exit__(self, exception_type, exception_value, traceback):\n if self._same_scope_again_count > 0:\n self._same_scope_again_count -= 1\n return\n if self._device_scope:\n try:\n self._device_scope.__exit__(exception_type, exception_value, traceback)\n except RuntimeError as e:\n six.raise_from(\n RuntimeError(\"Device scope nesting error: move call to \"\n \"tf.distribute.set_strategy() out of `with` scope.\"),\n e)\n\n try:\n self._var_creator_scope.__exit__(\n exception_type, exception_value, traceback)\n except RuntimeError as e:\n six.raise_from(\n RuntimeError(\"Variable creator scope nesting error: move call to \"\n \"tf.distribute.set_strategy() out of `with` scope.\"),\n e)\n\n if self._var_scope:\n try:\n self._var_scope.__exit__(exception_type, exception_value, traceback)\n except RuntimeError as e:\n six.raise_from(\n RuntimeError(\"Variable scope nesting error: move call to \"\n \"tf.distribute.set_strategy() out of `with` scope.\"),\n e)\n _pop_per_thread_mode()\n\n\n# TODO(yuefengz): add more replication modes.\n@tf_export(\"distribute.InputReplicationMode\")\nclass InputReplicationMode(enum.Enum):\n \"\"\"Replication mode for input function.\n\n * `PER_WORKER`: The input function will be called on each worker\n independently, creating as many input pipelines as number of workers.\n Replicas will dequeue from the local Dataset on their worker.\n `tf.distribute.Strategy` doesn't manage any state sharing between such\n separate input pipelines.\n \"\"\"\n PER_WORKER = \"PER_WORKER\"\n\n\n@tf_export(\"distribute.InputContext\")\nclass InputContext(object):\n \"\"\"A class wrapping information needed by an input function.\n\n This is a context class that is passed to the user's input fn and contains\n information about the compute replicas and input pipelines. The number of\n compute replicas (in sync training) helps compute per input pipeline batch\n size from the desired global batch size. Input pipeline information can be\n used to return a different subset of the input in each input pipeline (for\n e.g. shard the input pipeline, use a different input source etc).\n \"\"\"\n\n def __init__(self,\n num_input_pipelines=1,\n input_pipeline_id=0,\n num_replicas_in_sync=1):\n \"\"\"Initializes an InputContext object.\n\n Args:\n num_input_pipelines: the number of input pipelines in a cluster.\n input_pipeline_id: the current input pipeline id, should be an int in\n [0,`num_input_pipelines`).\n num_replicas_in_sync: the number of replicas that are in sync.\n \"\"\"\n self._num_input_pipelines = num_input_pipelines\n self._input_pipeline_id = input_pipeline_id\n self._num_replicas_in_sync = num_replicas_in_sync\n\n @property\n def num_replicas_in_sync(self):\n \"\"\"Returns the number of compute replicas in sync.\"\"\"\n return self._num_replicas_in_sync\n\n @property\n def input_pipeline_id(self):\n \"\"\"Returns the input pipeline ID.\"\"\"\n return self._input_pipeline_id\n\n @property\n def num_input_pipelines(self):\n \"\"\"Returns the number of input pipelines.\"\"\"\n return self._num_input_pipelines\n\n def get_per_replica_batch_size(self, global_batch_size):\n \"\"\"Returns the per-replica batch size.\n\n Args:\n global_batch_size: the global batch size which should be divisible by\n `num_replicas_in_sync`.\n\n Returns:\n the per-replica batch size.\n\n Raises:\n ValueError: if `global_batch_size` not divisible by\n `num_replicas_in_sync`.\n \"\"\"\n if global_batch_size % self._num_replicas_in_sync != 0:\n raise ValueError(\"The `global_batch_size` %r is not divisible by \"\n \"`num_replicas_in_sync` %r \" %\n (global_batch_size, self._num_replicas_in_sync))\n return global_batch_size // self._num_replicas_in_sync\n\n\n# ------------------------------------------------------------------------------\n# Base classes for all distribution strategies.\n\n\n@tf_export(\"distribute.Strategy\")\nclass DistributionStrategy(object):\n \"\"\"A list of devices with a state & compute distribution policy.\n\n See [tensorflow/contrib/distribute/README.md](\n https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)\n for overview and examples.\n \"\"\"\n\n # TODO(josh11b): Raise an exception if variable partitioning requested before\n # we add support.\n # TODO(josh11b): Also `parameter_device_index` property?\n # TODO(josh11b): `map()`\n # TODO(josh11b): ClusterSpec/ClusterResolver\n # TODO(josh11b): Partitioned computations, state; sharding\n # TODO(josh11b): Model parallelism: \"replicas\" with multiple devices; shuffling\n # TODO(josh11b): List of replicas with their worker and parameter devices\n # (where the parameter devices may overlap in the ps case).\n\n def __init__(self, extended):\n self._extended = extended\n\n @property\n def extended(self):\n \"\"\"`tf.distribute.StrategyExtended` with additional methods.\"\"\"\n return self._extended\n\n def scope(self):\n \"\"\"Returns a context manager selecting this Strategy as current.\n\n Inside a `with strategy.scope():` code block, this thread\n will use a variable creator set by `strategy`, and will\n enter its \"cross-replica context\".\n\n Returns:\n A context manager.\n \"\"\"\n return self._extended._scope(self) # pylint: disable=protected-access\n\n @doc_controls.do_not_generate_docs # DEPRECATED, moving to `extended`\n def colocate_vars_with(self, colocate_with_variable):\n \"\"\"DEPRECATED: use extended.colocate_vars_with() instead.\"\"\"\n return self._extended.colocate_vars_with(colocate_with_variable)\n\n def make_dataset_iterator(self, dataset):\n \"\"\"Makes an iterator for input provided via `dataset`.\n\n Data from the given dataset will be distributed evenly across all the\n compute replicas. We will assume that the input dataset is batched by the\n global batch size. With this assumption, we will make a best effort to\n divide each batch across all the replicas (one or more workers).\n If this effort fails, an error will be thrown, and the user should instead\n use `make_input_fn_iterator` which provides more control to the user, and\n does not try to divide a batch across replicas.\n\n The user could also use `make_input_fn_iterator` if they want to\n customize which input is fed to which replica/worker etc.\n\n Args:\n dataset: `tf.data.Dataset` that will be distributed evenly across all\n replicas.\n\n Returns:\n An `tf.distribute.InputIterator` which returns inputs for each step of the\n computation. User should call `initialize` on the returned iterator.\n \"\"\"\n return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access\n\n def make_input_fn_iterator(self,\n input_fn,\n replication_mode=InputReplicationMode.PER_WORKER):\n \"\"\"Returns an iterator split across replicas created from an input function.\n\n The `input_fn` should take an `tf.distribute.InputContext` object where\n information about batching and input sharding can be accessed:\n\n ```\n def input_fn(input_context):\n batch_size = input_context.get_per_replica_batch_size(global_batch_size)\n d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)\n return d.shard(input_context.num_input_pipelines,\n input_context.input_pipeline_id)\n with strategy.scope():\n iterator = strategy.make_input_fn_iterator(input_fn)\n replica_results = strategy.experimental_run(replica_fn, iterator)\n ```\n\n The `tf.data.Dataset` returned by `input_fn` should have a per-replica\n batch size, which may be computed using\n `input_context.get_per_replica_batch_size`.\n\n Args:\n input_fn: A function taking a `tf.distribute.InputContext` object and\n returning a `tf.data.Dataset`.\n replication_mode: an enum value of `tf.distribute.InputReplicationMode`.\n Only `PER_WORKER` is supported currently, which means there will be\n a single call to `input_fn` per worker. Replicas will dequeue from the\n local `tf.data.Dataset` on their worker.\n\n Returns:\n An iterator object that should first be `.initialize()`-ed. It may then\n either be passed to `strategy.experimental_run()` or you can\n `iterator.get_next()` to get the next value to pass to\n `strategy.extended.call_for_each_replica()`.\n \"\"\"\n if replication_mode != InputReplicationMode.PER_WORKER:\n raise ValueError(\n \"Input replication mode not supported: %r\" % replication_mode)\n with self.scope():\n return self.extended._make_input_fn_iterator( # pylint: disable=protected-access\n input_fn, replication_mode=replication_mode)\n\n @doc_controls.do_not_generate_docs # DEPRECATED\n def experimental_make_numpy_iterator(\n self, numpy_input, batch_size, num_epochs=1, shuffle=1024, session=None):\n \"\"\"Makes an iterator for input provided via a nest of numpy arrays.\n\n DEPRECATED: Use `extended.experimental_make_numpy_dataset` instead.\n\n Args:\n numpy_input: A nest of NumPy input arrays that will be distributed evenly\n across all replicas. Note that lists of Numpy arrays are stacked,\n as that is normal `tf.data.Dataset` behavior.\n batch_size: The number of entries from the array we should consume in one\n step of the computation, across all replicas. This is the global batch\n size. It should be divisible by `num_replicas_in_sync`.\n num_epochs: The number of times to iterate through the examples. A value\n of `None` means repeat forever.\n shuffle: Size of buffer to use for shuffling the input examples.\n Use `None` to disable shuffling.\n session: (TensorFlow v1.x graph execution only) A session used for\n initialization.\n\n Returns:\n An `tf.distribute.InputIterator` which returns inputs for each step of the\n computation. User should call `initialize` on the returned iterator.\n \"\"\"\n ds = self.extended.experimental_make_numpy_dataset(\n numpy_input, session=session)\n if shuffle:\n ds = ds.shuffle(shuffle)\n if num_epochs != 1:\n ds = ds.repeat(num_epochs)\n # We need to use the drop_remainder argument to get a known static\n # input shape which is required for TPUs.\n drop_remainder = self.extended.experimental_require_static_shapes\n ds = ds.batch(batch_size, drop_remainder=drop_remainder)\n return self.make_dataset_iterator(ds)\n\n def experimental_run(self, fn, input_iterator=None):\n \"\"\"Runs ops in `fn` on each replica, with inputs from `input_iterator`.\n\n When eager execution is enabled, executes ops specified by `fn` on each\n replica. Otherwise, builds a graph to execute the ops on each replica.\n\n Each replica will take a single, different input from the inputs provided by\n one `get_next` call on the input iterator.\n\n `fn` may call `tf.distribute.get_replica_context()` to access members such\n as `replica_id_in_sync_group`.\n\n IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being\n used, and whether eager execution is enabled, `fn` may be called one or more\n times (once for each replica).\n\n Args:\n fn: The function to run. The inputs to the function must match the outputs\n of `input_iterator.get_next()`. The output must be a `tf.nest` of\n `Tensor`s.\n input_iterator: (Optional) input iterator from which the inputs are taken.\n\n Returns:\n Merged return value of `fn` across replicas. The structure of the return\n value is the same as the return value from `fn`. Each element in the\n structure can either be `PerReplica` (if the values are unsynchronized),\n `Mirrored` (if the values are kept in sync), or `Tensor` (if running on a\n single replica).\n \"\"\"\n with self.scope():\n args = (input_iterator.get_next(),) if input_iterator is not None else ()\n return self.experimental_run_v2(fn, args=args)\n\n def experimental_run_v2(self, fn, args=(), kwargs=None):\n \"\"\"Runs ops in `fn` on each replica, with the given arguments.\n\n When eager execution is enabled, executes ops specified by `fn` on each\n replica. Otherwise, builds a graph to execute the ops on each replica.\n\n `fn` may call `tf.distribute.get_replica_context()` to access members such\n as `replica_id_in_sync_group`.\n\n IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being\n used, and whether eager execution is enabled, `fn` may be called one or more\n times (once for each replica).\n\n Args:\n fn: The function to run. The output must be a `tf.nest` of `Tensor`s.\n args: (Optional) Positional arguments to `fn`.\n kwargs: (Optional) Keyword arguments to `fn`.\n\n Returns:\n Merged return value of `fn` across replicas. The structure of the return\n value is the same as the return value from `fn`. Each element in the\n structure can either be `PerReplica` (if the values are unsynchronized),\n `Mirrored` (if the values are kept in sync), or `Tensor` (if running on a\n single replica).\n \"\"\"\n with self.scope():\n return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)\n\n def reduce(self, reduce_op, value):\n \"\"\"Reduce `value` across replicas.\n\n Args:\n reduce_op: A `tf.distribute.ReduceOp` value specifying how values should\n be combined.\n value: A \"per replica\" value to be combined into a single tensor.\n\n Returns:\n A `Tensor`.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self._extended)\n return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access\n\n @doc_controls.do_not_generate_docs # DEPRECATED\n def unwrap(self, value):\n \"\"\"Returns the list of all local per-replica values contained in `value`.\n\n DEPRECATED: Please use `experimental_local_results` instead.\n\n Note: This only returns values on the workers initiated by this client.\n When using a `Strategy` like\n `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker\n will be its own client, and this function will only return values\n computed on that worker.\n\n Args:\n value: A value returned by `experimental_run()`,\n `extended.call_for_each_replica()`, or a variable created in `scope`.\n\n Returns:\n A tuple of values contained in `value`. If `value` represents a single\n value, this returns `(value,).`\n \"\"\"\n return self._extended._local_results(value) # pylint: disable=protected-access\n\n def experimental_local_results(self, value):\n \"\"\"Returns the list of all local per-replica values contained in `value`.\n\n Note: This only returns values on the workers initiated by this client.\n When using a `Strategy` like\n `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker\n will be its own client, and this function will only return values\n computed on that worker.\n\n Args:\n value: A value returned by `experimental_run()`, `experimental_run_v2()`,\n `extended.call_for_each_replica()`, or a variable created in `scope`.\n\n Returns:\n A tuple of values contained in `value`. If `value` represents a single\n value, this returns `(value,).`\n \"\"\"\n return self._extended._local_results(value) # pylint: disable=protected-access\n\n @doc_controls.do_not_generate_docs # DEPRECATED: TF v1.x only\n def group(self, value, name=None):\n \"\"\"Shortcut for `tf.group(self.experimental_local_results(value))`.\"\"\"\n return self._extended._group(value, name) # pylint: disable=protected-access\n\n @property\n def num_replicas_in_sync(self):\n \"\"\"Returns number of replicas over which gradients are aggregated.\"\"\"\n return self._extended._num_replicas_in_sync # pylint: disable=protected-access\n\n @doc_controls.do_not_generate_docs # DEPRECATED, being replaced by a new API.\n def configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n # pylint: disable=g-doc-return-or-yield,g-doc-args\n \"\"\"DEPRECATED: use `update_config_proto` instead.\n\n Configures the strategy class.\n\n DEPRECATED: This method's functionality has been split into the strategy\n constructor and `update_config_proto`. In the future, we will allow passing\n cluster and config_proto to the constructor to configure the strategy. And\n `update_config_proto` can be used to update the config_proto based on the\n specific strategy.\n \"\"\"\n return self._extended._configure( # pylint: disable=protected-access\n session_config, cluster_spec, task_type, task_id)\n\n def update_config_proto(self, config_proto):\n \"\"\"Returns a copy of `config_proto` modified for use with this strategy.\n\n The updated config has something needed to run a strategy, e.g.\n configuration to run collective ops, or device filters to improve\n distributed training performance.\n\n Args:\n config_proto: a `tf.ConfigProto` object.\n\n Returns:\n The updated copy of the `config_proto`.\n \"\"\"\n return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access\n\n def __deepcopy__(self, memo):\n # First do a regular deepcopy of `self`.\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v, memo))\n # One little fix-up: we want `result._extended` to reference `result`\n # instead of `self`.\n result._extended._container_strategy_weakref = weakref.ref(result) # pylint: disable=protected-access\n return result\n\n def __copy__(self):\n raise RuntimeError(\"Must only deepcopy DistributionStrategy.\")\n\n\n@tf_export(\"distribute.StrategyExtended\")\nclass DistributionStrategyExtended(object):\n \"\"\"Additional APIs for algorithms that need to be distribution-aware.\n\n The intent is that you can write an algorithm in a stylized way and\n it will be usable with a variety of different\n `tf.distribute.Strategy`\n implementations. Each descendant will implement a different strategy\n for distributing the algorithm across multiple devices/machines.\n Furthermore, these changes can be hidden inside the specific layers\n and other library classes that need special treatment to run in a\n distributed setting, so that most users' model definition code can\n run unchanged. The `tf.distribute.Strategy` API works the same way\n with eager and graph execution.\n\n First let's introduce a few high-level concepts:\n\n * _Data parallelism_ is where we run multiple copies of the model\n on different slices of the input data. This is in contrast to\n _model parallelism_ where we divide up a single copy of a model\n across multiple devices.\n Note: we only support data parallelism for now, but\n hope to add support for model parallelism in the future.\n * A _replica_ is one copy of the model, running on one slice of the\n input data.\n * _Synchronous_, or more commonly _sync_, training is where the\n updates from each replica are aggregated together before updating\n the model variables. This is in contrast to _asynchronous_, or\n _async_ training, where each replica updates the model variables\n independently.\n * Furthermore you might run your computation on multiple devices\n on one machine (or \"host\"), or on multiple machines/hosts.\n If you are running on multiple machines, you might have a\n single master host that drives computation across all of them,\n or you might have multiple clients driving the computation\n asynchronously.\n\n To distribute an algorithm, we might use some of these ingredients:\n\n * Parameter servers: These are hosts that hold a single copy of\n parameters/variables. All replicas that want to operate on a variable\n retrieve it at the beginning of a step and send an update to be\n applied at the end of the step. Can support either sync or async\n training.\n * Mirrored variables: These are variables that are copied to multiple\n devices, where we keep the copies in sync by applying the same\n updates to every copy. Normally would only be used with sync training.\n * Reductions and Allreduce: A _reduction_ is some method of\n aggregating multiple values into one value, like \"sum\" or\n \"mean\". If doing sync training, we will perform a reduction on the\n gradients to a parameter from all replicas before applying the\n update. Allreduce is an algorithm for performing a reduction on\n values from multiple devices and making the result available on\n all of those devices.\n * In the future we will have support for TensorFlow's partitioned\n variables, where a single variable is split across multiple\n devices.\n\n We have then a few approaches we want to support:\n\n * Code written (as if) with no knowledge of class `tf.distribute.Strategy`.\n This code should work as before, even if some of the layers, etc.\n used by that code are written to be distribution-aware. This is done\n by having a default `tf.distribute.Strategy` that gives ordinary behavior,\n and by default being in a single replica context.\n * Ordinary model code that you want to run using a specific\n `tf.distribute.Strategy`. This can be as simple as:\n\n ```\n with my_strategy.scope():\n iterator = my_strategy.make_dataset_iterator(dataset)\n session.run(iterator.initialize())\n replica_train_ops = my_strategy.extended.call_for_each_replica(\n replica_fn, args=(iterator.get_next(),))\n train_op = my_strategy.group(replica_train_ops)\n ```\n\n This takes an ordinary `dataset` and `replica_fn` and runs it\n distributed using a particular `tf.distribute.Strategy` in\n `my_strategy`. Any variables created in `replica_fn` are created\n using `my_strategy`'s policy, and library functions called by\n `replica_fn` can use the `get_replica_context()` API to get enhanced\n behavior in this case.\n\n * If you want to write a distributed algorithm, you may use any of\n the `tf.distribute.Strategy` APIs inside a\n `with my_strategy.scope():` block of code.\n\n Lower-level concepts:\n\n * Wrapped values: In order to represent values parallel across devices\n (either replicas or the devices associated with a particular value), we\n wrap them in a \"PerReplica\" or \"Mirrored\" object that contains a map\n from device to values. \"PerReplica\" is used when the value may be\n different across replicas, and \"Mirrored\" when the value are the same.\n * Unwrapping and merging: Consider calling a function `fn` on multiple\n replicas, like `extended.call_for_each_replica(fn, args=[w])` with an\n argument `w` that is a wrapped value. This means `w` will have a map taking\n replica device `d0` to `w0`, replica device `d1` to `w1`,\n etc. `extended.call_for_each_replica()` unwraps `w` before calling `fn`, so\n it calls `fn(w0)` on `d0`, `fn(w1)` on `d1`, etc. It then merges the return\n values from `fn()`, which can possibly result in wrapped values. For\n example, let's say `fn()` returns a tuple with three components: `(x, a,\n v0)` from replica 0, `(x, b, v1)` on replica 1, etc. If the first component\n is the same object `x` from every replica, then the first component of the\n merged result will also be `x`. If the second component is different (`a`,\n `b`, ...) from each replica, then the merged value will have a wrapped map\n from replica device to the different values. If the third component is the\n members of a mirrored variable (`v` maps `d0` to `v0`, `d1` to `v1`, etc.),\n then the merged result will be that mirrored variable (`v`).\n * Replica context vs. Cross-replica context: _replica context_ is when we\n are in some function that is being called once for each replica.\n Otherwise we are in cross-replica context, which is useful for\n calling `tf.distribute.Strategy` methods which operate across the\n replicas (like `reduce_to()`). By default you start in a replica context\n (the default \"single replica context\") and then some methods can\n switch you back and forth, as described below.\n * Worker devices vs. parameter devices: Most replica computations will\n happen on worker devices. Since we don't yet support model\n parallelism, there will be one worker device per replica. When using\n parameter servers (see above), the set of devices holding\n variables may be different, otherwise the parameter devices might\n match the worker devices.\n * Non-slot devices are some subset of the parameter devices where we\n put all the non-slot variables. We need to ensure that all\n non-slot variables are allocated on the same device, or mirrored\n across the same set of devices. If you have some variable you want\n to colocate all the non-slot variables with, you can use\n `colocate_vars_with()` to get the remaining non-slot variables on\n the same device. Otherwise you can use `non_slot_devices()` to\n pick a consistent set of devices to pass to both\n `colocate_vars_with()` and `update_non_slot()`.\n\n When using a `tf.distribute.Strategy`, we have a new type dimension\n called _locality_ that says what values are compatible with which\n APIs:\n\n * T: different value for each replica (e.g. a PerReplica-wrapped value).\n * M: value is \"mirrored\" across replicas, i.e. there are copies with the\n same value on each replica (e.g. a Mirrored-wrapped value).\n * V(`v`): value is \"mirrored\" across all the devices which have a\n copy of variable `v` (also a Mirrored-wrapped value, but over\n parameter devices instead of worker devices).\n * N: value is \"mirrored\" across all the \"non-slot\" devices\n\n Rules for methods with respect to locality and single-replica vs.\n cross-replica context:\n\n * `with d.scope()`: default single-replica context -> cross-replica context\n for `d`\n * `with d.extended.colocate_vars_with(v)`: in replica/cross-replica context,\n variables will be created with locality V(`v`). That is, if we write\n `with d.extended.colocate_vars_with(v1): v2 = tf.get_variable(...)`,\n then `v2` will have locality V(`v1`), i.e. locality V(`v2`) will equal\n V(`v1`).\n * `with d.extended.colocate_vars_with(d.extended.non_slot_devices(...))`: in\n replica/cross-replica context, variables will be created with locality N\n * `v = tf.get_variable(...)`: in replica/cross-replica context, creates\n a variable (which by definition will have locality V(`v`), though\n will match another locality if inside a `colocate_vars_with`\n scope).\n * `d.make_dataset_iterator(dataset)`: in cross-replica\n context, produces an iterator with locality T\n * `d.extended.broadcast_to(t, v)`: in cross-replica context, produces a value\n with locality V(`v`)\n * `d.extended.call_for_each_replica(fn, ...)`: in cross-replica context, runs\n `fn()` in a replica context (and so may call `get_replica_context()` and\n use its API, including `merge_call()` to get back to cross-replica\n context), once for each replica. May use values with locality T or\n M, and any variable.\n * `d.extended.reduce_to(m, t, t)`: in cross-replica context, accepts t with\n locality T and produces a value with locality M.\n * `d.extended.reduce_to(m, t, v)`: in cross-replica context, accepts t with\n locality T and produces a value with locality V(`v`).\n * `d.extended.batch_reduce_to(m, [(t, v)]): see `d.extended.reduce_to()`\n * `d.extended.update(v, fn, ...)`: in cross-replica context, runs `fn()` once\n for each device `v` is copied to, all inputs should have locality\n V(`v`), output will have locality V(`v`) as well.\n * `d.extended.update_non_slot(d.extended.non_slot_devices(), fn)`: in\n cross-replica context, like `d.extended.update()` except with locality N.\n * `d.extended.read_var(v)`: Gets the (read-only) value of the variable `v` (on\n the device determined by the current device scope), aggregating\n across replicas for replica-local variables. Frequently, this will be\n done automatically when using `v` in an expression or fetching it in\n a cross-replica context, but this function can be used to force that\n conversion happens at a particular point in time (for example, to\n add the result of the conversion to a graph collection).\n\n The standard pattern for updating variables is to:\n\n 1. Create an input iterator with `d.make_dataset_iterator()`.\n 2. Define each replica `d.extended.call_for_each_replica()` up to the point of\n getting a list of gradient, variable pairs.\n 3. Call `d.extended.reduce_to(VariableAggregation.SUM, t, v)` or\n `d.extended.batch_reduce_to()` to sum the gradients (with locality T)\n into values with locality V(`v`).\n 4. Call `d.extended.update(v)` for each variable to update its value.\n\n Steps 3 and 4 are done automatically by class `Optimizer` if you call\n its `apply_gradients` method in a replica context. Otherwise you can\n manually call its `_distributed_apply` method in a cross-replica context.\n\n Another thing you might want to do in the middle of your replica function is\n an all-reduce of some intermediate value, using `d.extended.reduce_to()` or\n `d.extended.batch_reduce_to()`. You simply provide the same tensor as the\n input and destination.\n\n Layers should expect to be called in a replica context, and can use\n the `tf.distribute.get_replica_context` function to get a\n `tf.distribute.ReplicaContext` object. The\n `ReplicaContext` object has a `merge_call()` method for entering\n cross-replica context where you can use `reduce_to()` (or\n `batch_reduce_to()`) and then optionally `update()` to update state.\n\n You may use this API whether or not a `tf.distribute.Strategy` is\n being used, since there is a default implementation of\n `ReplicaContext` and `tf.distribute.Strategy`.\n\n NOTE for new `tf.distribute.Strategy` implementations: Please put all logic\n in a subclass of `tf.distribute.StrategyExtended`. The only code needed for\n the `tf.distribute.Strategy` subclass is for instantiating your subclass of\n `tf.distribute.StrategyExtended` in the `__init__` method.\n \"\"\"\n\n def __init__(self, container_strategy):\n self._container_strategy_weakref = weakref.ref(container_strategy)\n self._default_device = None\n # This property is used to determine if we should set drop_remainder=True\n # when creating Datasets from numpy array inputs.\n self._require_static_shapes = False\n\n def _container_strategy(self):\n \"\"\"Get the containing `DistributionStrategy`.\n\n This should not generally be needed except when creating a new\n `ReplicaContext` and to validate that the caller is in the correct\n `scope()`.\n\n Returns:\n The `DistributionStrategy` such that `strategy.extended` is `self`.\n \"\"\"\n container_strategy = self._container_strategy_weakref()\n assert container_strategy is not None\n return container_strategy\n\n def _scope(self, strategy):\n \"\"\"Implementation of DistributionStrategy.scope().\"\"\"\n def creator_with_resource_vars(*args, **kwargs):\n _require_strategy_scope_extended(self)\n kwargs[\"use_resource\"] = True\n kwargs[\"distribute_strategy\"] = strategy\n return self._create_variable(*args, **kwargs)\n\n def distributed_getter(getter, *args, **kwargs):\n if not self._allow_variable_partition():\n if kwargs.pop(\"partitioner\", None) is not None:\n tf_logging.log_first_n(\n tf_logging.WARN, \"Partitioned variables are disabled when using \"\n \"current tf.distribute.Strategy.\", 1)\n return getter(*args, **kwargs)\n\n return _CurrentDistributionContext(\n strategy,\n variable_scope.variable_creator_scope(creator_with_resource_vars),\n variable_scope.variable_scope(\n variable_scope.get_variable_scope(),\n custom_getter=distributed_getter), self._default_device)\n\n def _allow_variable_partition(self):\n return False\n\n def _create_variable(self, next_creator, *args, **kwargs):\n # Note: should support \"colocate_with\" argument.\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def variable_created_in_scope(self, v):\n \"\"\"Tests whether `v` was created while this strategy scope was active.\n\n Variables created inside the strategy scope are \"owned\" by it:\n\n >>> with strategy.scope():\n ... v = tf.Variable(1.)\n >>> strategy.variable_created_in_scope(v)\n True\n\n Variables created outside the strategy are not owned by it:\n\n >>> v = tf.Variable(1.)\n >>> strategy.variable_created_in_scope(v)\n False\n\n Args:\n v: A `tf.Variable` instance.\n\n Returns:\n True if `v` was created inside the scope, False if not.\n \"\"\"\n return v._distribute_strategy == self._container_strategy_weakref() # pylint: disable=protected-access\n\n def read_var(self, v):\n \"\"\"Reads the value of a variable.\n\n Returns the aggregate value of a replica-local variable, or the\n (read-only) value of any other variable.\n\n Args:\n v: A variable allocated within the scope of this `tf.distribute.Strategy`.\n\n Returns:\n A tensor representing the value of `v`, aggregated across replicas if\n necessary.\n \"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def colocate_vars_with(self, colocate_with_variable):\n \"\"\"Scope that controls which devices variables will be created on.\n\n No operations should be added to the graph inside this scope, it\n should only be used when creating variables (some implementations\n work by changing variable creation, others work by using a\n tf.colocate_with() scope).\n\n This may only be used inside `self.scope()`.\n\n Example usage:\n\n ```\n with strategy.scope():\n var1 = tf.get_variable(...)\n with strategy.extended.colocate_vars_with(var1):\n # var2 and var3 will be created on the same device(s) as var1\n var2 = tf.get_variable(...)\n var3 = tf.get_variable(...)\n\n def fn(v1, v2, v3):\n # operates on v1 from var1, v2 from var2, and v3 from var3\n\n # `fn` runs on every device `var1` is on, `var2` and `var3` will be there\n # too.\n strategy.extended.update(var1, fn, args=(var2, var3))\n ```\n\n Args:\n colocate_with_variable: A variable created in this strategy's `scope()`.\n Variables created while in the returned context manager will be on the\n same set of devices as `colocate_with_variable`.\n\n Returns:\n A context manager.\n \"\"\"\n def create_colocated_variable(next_creator, *args, **kwargs):\n _require_strategy_scope_extended(self)\n kwargs[\"use_resource\"] = True\n kwargs[\"colocate_with\"] = colocate_with_variable\n return next_creator(*args, **kwargs)\n\n _require_strategy_scope_extended(self)\n self._validate_colocate_with_variable(colocate_with_variable)\n return variable_scope.variable_creator_scope(create_colocated_variable)\n\n def _validate_colocate_with_variable(self, colocate_with_variable):\n \"\"\"Validate `colocate_with_variable` argument to `colocate_vars_with`.\"\"\"\n pass\n\n def _make_dataset_iterator(self, dataset):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def _make_input_fn_iterator(self, input_fn, replication_mode):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def experimental_make_numpy_dataset(self, numpy_input, session=None):\n \"\"\"Makes a dataset for input provided via a numpy array.\n\n This avoids adding `numpy_input` as a large constant in the graph,\n and copies the data to the machine or machines that will be processing\n the input.\n\n Args:\n numpy_input: A nest of NumPy input arrays that will be distributed evenly\n across all replicas. Note that lists of Numpy arrays are stacked,\n as that is normal `tf.data.Dataset` behavior.\n session: (TensorFlow v1.x graph execution only) A session used for\n initialization.\n\n Returns:\n A `tf.data.Dataset` representing `numpy_input`.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self)\n return self._experimental_make_numpy_dataset(numpy_input, session=session)\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def broadcast_to(self, tensor, destinations):\n \"\"\"Mirror a tensor on one device to all worker devices.\n\n Args:\n tensor: A Tensor value to broadcast.\n destinations: A mirrored variable or device string specifying the\n destination devices to copy `tensor` to.\n\n Returns:\n A value mirrored to `destinations` devices.\n \"\"\"\n assert destinations is not None # from old strategy.broadcast()\n # TODO(josh11b): More docstring\n _require_cross_replica_or_default_context_extended(self)\n assert not isinstance(destinations, (list, tuple))\n return self._broadcast_to(tensor, destinations)\n\n def _broadcast_to(self, tensor, destinations):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def experimental_run_steps_on_iterator(self, fn, iterator, iterations=1,\n initial_loop_values=None):\n \"\"\"Run `fn` with input from `iterator` for `iterations` times.\n\n This method can be used to run a step function for training a number of\n times using input from a dataset.\n\n Args:\n fn: function to run using this distribution strategy. The function must\n have the following signature: `def fn(context, inputs)`.\n `context` is an instance of `MultiStepContext` that will be passed when\n `fn` is run. `context` can be used to specify the outputs to be returned\n from `fn` by calling `context.set_last_step_output`. It can also be used\n to capture non tensor outputs by `context.set_non_tensor_output`.\n See `MultiStepContext` documentation for more information.\n `inputs` will have same type/structure as `iterator.get_next()`.\n Typically, `fn` will use `call_for_each_replica` method of the strategy\n to distribute the computation over multiple replicas.\n iterator: Iterator of a dataset that represents the input for `fn`. The\n caller is responsible for initializing the iterator as needed.\n iterations: (Optional) Number of iterations that `fn` should be run.\n Defaults to 1.\n initial_loop_values: (Optional) Initial values to be passed into the\n loop that runs `fn`. Defaults to `None`. # TODO(priyag): Remove\n initial_loop_values argument when we have a mechanism to infer the\n outputs of `fn`.\n\n Returns:\n Returns the `MultiStepContext` object which has the following properties,\n among other things:\n - run_op: An op that runs `fn` `iterations` times.\n - last_step_outputs: A dictionary containing tensors set using\n `context.set_last_step_output`. Evaluating this returns the value of\n the tensors after the last iteration.\n - non_tensor_outputs: A dictionatry containing anything that was set by\n `fn` by calling `context.set_non_tensor_output`.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self)\n with self._container_strategy().scope():\n return self._experimental_run_steps_on_iterator(\n fn, iterator, iterations, initial_loop_values)\n\n def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,\n initial_loop_values):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def call_for_each_replica(self, fn, args=(), kwargs=None):\n \"\"\"Run `fn` once per replica.\n\n `fn` may call `tf.get_replica_context()` to access methods such as\n `replica_id_in_sync_group` and `merge_call()`.\n\n `merge_call()` is used to communicate between the replicas and\n re-enter the cross-replica context. All replicas pause their execution\n having encountered a `merge_call()` call. After that the\n `merge_fn`-function is executed. Its results are then unwrapped and\n given back to each replica call. After that execution resumes until\n `fn` is complete or encounters another `merge_call()`. Example:\n\n ```python\n # Called once in \"cross-replica\" context.\n def merge_fn(distribution, three_plus_replica_id):\n # sum the values across replicas\n return sum(distribution.experimental_local_results(three_plus_replica_id))\n\n # Called once per replica in `distribution`, in a \"replica\" context.\n def fn(three):\n replica_ctx = tf.get_replica_context()\n v = three + replica_ctx.replica_id_in_sync_group\n # Computes the sum of the `v` values across all replicas.\n s = replica_ctx.merge_call(merge_fn, args=(v,))\n return s + v\n\n with distribution.scope():\n # in \"cross-replica\" context\n ...\n merged_results = distribution.call_for_each_replica(fn, args=[3])\n # merged_results has the values from every replica execution of `fn`.\n # This statement prints a list:\n print(distribution.experimental_local_results(merged_results))\n ```\n\n Args:\n fn: function to run (will be run once per replica).\n args: Tuple or list with positional arguments for `fn`.\n kwargs: Dict with keyword arguments for `fn`.\n\n Returns:\n Merged return value of `fn` across all replicas.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self)\n if kwargs is None:\n kwargs = {}\n with self._container_strategy().scope():\n return self._call_for_each_replica(fn, args, kwargs)\n\n def _call_for_each_replica(self, fn, args, kwargs):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def _reduce(self, reduce_op, value):\n # Default implementation until we have an implementation for each strategy.\n return self._local_results(\n self._reduce_to(reduce_op, value,\n device_util.current() or \"/device:CPU:0\"))[0]\n\n def reduce_to(self, reduce_op, value, destinations):\n \"\"\"Combine (via e.g. sum or mean) values across replicas.\n\n Args:\n reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.\n value: A per-replica value with one value per replica.\n destinations: A mirrored variable, a per-replica tensor, or a device\n string. The return value will be copied to all destination devices (or\n all the devices where the `destinations` value resides). To perform an\n all-reduction, pass `value` to `destinations`.\n\n Returns:\n A value mirrored to `destinations`.\n \"\"\"\n # TODO(josh11b): More docstring\n _require_cross_replica_or_default_context_extended(self)\n assert not isinstance(destinations, (list, tuple))\n assert not isinstance(reduce_op, variable_scope.VariableAggregation)\n assert (reduce_op == reduce_util.ReduceOp.SUM or\n reduce_op == reduce_util.ReduceOp.MEAN)\n return self._reduce_to(reduce_op, value, destinations)\n\n def _reduce_to(self, reduce_op, value, destinations):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def batch_reduce_to(self, reduce_op, value_destination_pairs):\n \"\"\"Combine multiple `reduce_to` calls into one for faster execution.\n\n Args:\n reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.\n value_destination_pairs: A sequence of (value, destinations)\n pairs. See `reduce_to()` for a description.\n\n Returns:\n A list of mirrored values, one per pair in `value_destination_pairs`.\n \"\"\"\n # TODO(josh11b): More docstring\n _require_cross_replica_or_default_context_extended(self)\n assert not isinstance(reduce_op, variable_scope.VariableAggregation)\n return self._batch_reduce_to(reduce_op, value_destination_pairs)\n\n def _batch_reduce_to(self, reduce_op, value_destination_pairs):\n return [\n self.reduce_to(reduce_op, t, destinations=v)\n for t, v in value_destination_pairs\n ]\n\n def update(self, var, fn, args=(), kwargs=None, group=True):\n \"\"\"Run `fn` to update `var` using inputs mirrored to the same devices.\n\n If `var` is mirrored across multiple devices, then this implements\n logic like:\n\n ```\n results = {}\n for device, v in var:\n with tf.device(device):\n # args and kwargs will be unwrapped if they are mirrored.\n results[device] = fn(v, *args, **kwargs)\n return merged(results)\n ```\n\n Otherwise this returns `fn(var, *args, **kwargs)` colocated with `var`.\n\n Neither `args` nor `kwargs` may contain per-replica values.\n If they contain mirrored values, they will be unwrapped before\n calling `fn`.\n\n Args:\n var: Variable, possibly mirrored to multiple devices, to operate on.\n fn: Function to call. Should take the variable as the first argument.\n args: Tuple or list. Additional positional arguments to pass to `fn()`.\n kwargs: Dict with keyword arguments to pass to `fn()`.\n group: Boolean. Defaults to True. If False, the return value will be\n unwrapped.\n\n Returns:\n By default, the merged return value of `fn` across all replicas. The\n merged result has dependencies to make sure that if it is evaluated at\n all, the side effects (updates) will happen on every replica. If instead\n \"group=False\" is specified, this function will return a nest of lists\n where each list has an element per replica, and the caller is responsible\n for ensuring all elements are executed.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self)\n if kwargs is None:\n kwargs = {}\n with self._container_strategy().scope():\n return self._update(var, fn, args, kwargs, group)\n\n def _update(self, var, fn, args, kwargs, group):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def update_non_slot(\n self, colocate_with, fn, args=(), kwargs=None, group=True):\n \"\"\"Runs `fn(*args, **kwargs)` on `colocate_with` devices.\n\n Args:\n colocate_with: The return value of `non_slot_devices()`.\n fn: Function to execute.\n args: Tuple or list. Positional arguments to pass to `fn()`.\n kwargs: Dict with keyword arguments to pass to `fn()`.\n group: Boolean. Defaults to True. If False, the return value will be\n unwrapped.\n\n Returns:\n Return value of `fn`, possibly merged across devices.\n \"\"\"\n _require_cross_replica_or_default_context_extended(self)\n if kwargs is None:\n kwargs = {}\n with self._container_strategy().scope():\n return self._update_non_slot(colocate_with, fn, args, kwargs, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def _local_results(self, distributed_value):\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def value_container(self, value):\n \"\"\"Returns the container that this per-replica `value` belongs to.\n\n Args:\n value: A value returned by `call_for_each_replica()` or a variable\n created in `scope()`.\n\n Returns:\n A container that `value` belongs to.\n If value does not belong to any container (including the case of\n container having been destroyed), returns the value itself.\n `value in experimental_local_results(value_container(value))` will\n always be true.\n \"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def _group(self, value, name=None):\n \"\"\"Implementation of `group`.\"\"\"\n value = nest.flatten(self._local_results(value))\n\n if len(value) != 1 or name is not None:\n return control_flow_ops.group(value, name=name)\n # Special handling for the common case of one op.\n v, = value\n if hasattr(v, \"op\"):\n v = v.op\n return v\n\n @property\n def experimental_require_static_shapes(self):\n return self._require_static_shapes\n\n @property\n def _num_replicas_in_sync(self):\n \"\"\"Returns number of replicas over which gradients are aggregated.\"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n @property\n def worker_devices(self):\n \"\"\"Returns the tuple of all devices used to for compute replica execution.\n \"\"\"\n # TODO(josh11b): More docstring\n raise NotImplementedError(\"must be implemented in descendants\")\n\n @property\n def parameter_devices(self):\n \"\"\"Returns the tuple of all devices used to place variables.\"\"\"\n # TODO(josh11b): More docstring\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def non_slot_devices(self, var_list):\n \"\"\"Device(s) for non-slot variables.\n\n Create variables on these devices in a\n `with colocate_vars_with(non_slot_devices(...)):` block.\n Update those using `update_non_slot()`.\n\n Args:\n var_list: The list of variables being optimized, needed with the\n default `tf.distribute.Strategy`.\n \"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n @property\n def experimental_between_graph(self):\n \"\"\"Whether the strategy uses between-graph replication or not.\n\n This is expected to return a constant value that will not be changed\n throughout its life cycle.\n \"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the strategy class.\"\"\"\n del session_config, cluster_spec, task_type, task_id\n\n def _update_config_proto(self, config_proto):\n return copy.deepcopy(config_proto)\n\n @property\n def experimental_should_init(self):\n \"\"\"Whether initialization is needed.\"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n @property\n def should_checkpoint(self):\n \"\"\"Whether checkpointing is needed.\"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n @property\n def should_save_summary(self):\n \"\"\"Whether saving summaries is needed.\"\"\"\n raise NotImplementedError(\"must be implemented in descendants\")\n\n\n# A note about the difference between the context managers\n# `ReplicaContext` (defined here) and `_CurrentDistributionContext`\n# (defined above) used by `DistributionStrategy.scope()`:\n#\n# * a ReplicaContext is only present during a `call_for_each_replica()`\n# call (except during a `merge_run` call) and in such a scope it\n# will be returned by calls to `get_replica_context()`. Implementers of new\n# DistributionStrategy descendants will frequently also need to\n# define a descendant of ReplicaContext, and are responsible for\n# entering and exiting this context.\n#\n# * DistributionStrategy.scope() sets up a variable_creator scope that\n# changes variable creation calls (e.g. to make mirrored\n# variables). This is intended as an outer scope that users enter once\n# around their model creation and graph definition. There is no\n# anticipated need to define descendants of _CurrentDistributionContext.\n# It sets the current DistributionStrategy for purposes of\n# `get_strategy()` and `has_strategy()`\n# and switches the thread mode to a \"cross-replica context\".\n@tf_export(\"distribute.ReplicaContext\")\nclass ReplicaContext(object):\n \"\"\"`tf.distribute.Strategy` API when in a replica context.\n\n To be used inside your replicated step function, such as in a\n `tf.distribute.StrategyExtended.call_for_each_replica` call.\n \"\"\"\n\n def __init__(self, strategy, replica_id_in_sync_group):\n self._strategy = strategy\n self._thread_context = distribution_strategy_context._InReplicaThreadMode( # pylint: disable=protected-access\n self)\n self._replica_id_in_sync_group = replica_id_in_sync_group\n self._summary_recording_distribution_strategy = None\n\n def __enter__(self):\n _push_per_thread_mode(self._thread_context)\n ctx = eager_context.context()\n\n def replica_id_is_zero():\n return math_ops.equal(self._replica_id_in_sync_group,\n constant_op.constant(0))\n\n self._summary_recording_distribution_strategy = (\n ctx.summary_recording_distribution_strategy)\n ctx.summary_recording_distribution_strategy = replica_id_is_zero\n\n def __exit__(self, exception_type, exception_value, traceback):\n ctx = eager_context.context()\n ctx.summary_recording_distribution_strategy = (\n self._summary_recording_distribution_strategy)\n _pop_per_thread_mode()\n\n def merge_call(self, merge_fn, args=(), kwargs=None):\n \"\"\"Merge args across replicas and run `merge_fn` in a cross-replica context.\n\n This allows communication and coordination when there are multiple calls\n to a model function triggered by a call to\n `strategy.extended.call_for_each_replica(model_fn, ...)`.\n\n See `tf.distribute.StrategyExtended.call_for_each_replica` for an\n explanation.\n\n If not inside a distributed scope, this is equivalent to:\n\n ```\n strategy = tf.distribute.get_strategy()\n with cross-replica-context(strategy):\n return merge_fn(strategy, *args, **kwargs)\n ```\n\n Args:\n merge_fn: function that joins arguments from threads that are given as\n PerReplica. It accepts `tf.distribute.Strategy` object as\n the first argument.\n args: List or tuple with positional per-thread arguments for `merge_fn`.\n kwargs: Dict with keyword per-thread arguments for `merge_fn`.\n\n Returns:\n The return value of `merge_fn`, except for `PerReplica` values which are\n unpacked.\n \"\"\"\n require_replica_context(self)\n if kwargs is None:\n kwargs = {}\n return self._merge_call(merge_fn, args, kwargs)\n\n def _merge_call(self, merge_fn, args, kwargs):\n \"\"\"Default implementation for single replica.\"\"\"\n _push_per_thread_mode( # thread-local, so not needed with multiple threads\n distribution_strategy_context._CrossReplicaThreadMode(self._strategy)) # pylint: disable=protected-access\n try:\n return merge_fn(self._strategy, *args, **kwargs)\n finally:\n _pop_per_thread_mode()\n\n @property\n def num_replicas_in_sync(self):\n \"\"\"Returns number of replicas over which gradients are aggregated.\"\"\"\n return self._strategy.num_replicas_in_sync\n\n @property\n def replica_id_in_sync_group(self):\n \"\"\"Which replica is being defined, from 0 to `num_replicas_in_sync - 1`.\"\"\"\n require_replica_context(self)\n return self._replica_id_in_sync_group\n\n @property\n def strategy(self):\n \"\"\"The current `tf.distribute.Strategy` object.\"\"\"\n return self._strategy\n\n @property\n def devices(self):\n \"\"\"The devices this replica is to be executed on, as a tuple of strings.\"\"\"\n require_replica_context(self)\n return (device_util.current(),)\n\n def all_reduce(self, reduce_op, value):\n \"\"\"All-reduces the given `Tensor` nest across replicas.\n\n If `all_reduce` is called in any replica, it must be called in all replicas.\n The nested structure and `Tensor` shapes must be identical in all replicas.\n\n IMPORTANT: The ordering of communications must be identical in all replicas.\n\n Example with two replicas:\n Replica 0 `value`: {'a': 1, 'b': [40, 1]}\n Replica 1 `value`: {'a': 3, 'b': [ 2, 98]}\n\n If `reduce_op` == `SUM`:\n Result (on all replicas): {'a': 4, 'b': [42, 99]}\n\n If `reduce_op` == `MEAN`:\n Result (on all replicas): {'a': 2, 'b': [21, 49.5]}\n\n Args:\n reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum.\n value: The nested structure of `Tensor`s to all-reduced.\n The structure must be compatible with `tf.nest`.\n\n Returns:\n A `Tensor` nest with the reduced `value`s from each replica.\n \"\"\"\n def batch_all_reduce(strategy, *value_flat):\n return strategy.extended.batch_reduce_to(\n reduce_op, [(v, _batch_reduce_destination(v)) for v in value_flat])\n\n if reduce_op in [reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN]:\n # TODO(cjfj): Work out why `batch_reduce` doesn't return the correct grad.\n @custom_gradient.custom_gradient\n def grad_wrapper(*xs):\n ys = self.merge_call(batch_all_reduce, args=xs)\n # The gradient of an all-sum is itself an all-sum (all-mean, likewise).\n return ys, lambda *dy_s: self.all_reduce(reduce_op, dy_s)\n return nest.pack_sequence_as(value, grad_wrapper(*nest.flatten(value)))\n else:\n # TODO(cjfj): Implement gradients for other reductions.\n reduced = nest.pack_sequence_as(\n value, self.merge_call(batch_all_reduce, args=nest.flatten(value)))\n return nest.map_structure(array_ops.prevent_gradient, reduced)\n\n # TODO(josh11b): Implement `start_all_reduce(method, t)` for efficient\n # all-reduce. It would return a function returning the result of reducing `t`\n # across all replicas. The caller would wait to call this function until they\n # needed the reduce result, allowing an efficient implementation:\n # * With eager execution, the reduction could be performed asynchronously\n # in the background, not blocking until the result was needed.\n # * When constructing a graph, it could batch up all reduction requests up\n # to that point that the first result is needed. Most likely this can be\n # implemented in terms of `merge_call()` and `batch_reduce_to()`.\n\n\ndef _batch_reduce_destination(x):\n \"\"\"Returns the destinations for batch all-reduce.\"\"\"\n if isinstance(x, ops.Tensor): # One device strategies.\n return x.device\n else:\n return x\n\n\n# ------------------------------------------------------------------------------\n\n\nclass _DefaultDistributionStrategy(DistributionStrategy):\n \"\"\"Default `tf.distribute.Strategy` if none is explicitly selected.\"\"\"\n\n def __init__(self):\n super(_DefaultDistributionStrategy, self).__init__(\n _DefaultDistributionExtended(self))\n\n\nclass _DefaultDistributionExtended(DistributionStrategyExtended):\n \"\"\"Implementation of _DefaultDistributionStrategy.\"\"\"\n\n def _scope(self, strategy):\n \"\"\"Context manager setting a variable creator and `self` as current.\"\"\"\n if distribution_strategy_context.has_strategy():\n raise RuntimeError(\"Must not nest tf.distribute.Strategy scopes.\")\n\n def creator(next_creator, *args, **kwargs):\n _require_strategy_scope_strategy(strategy)\n return next_creator(*args, **kwargs)\n\n return _CurrentDistributionContext(\n strategy, variable_scope.variable_creator_scope(creator))\n\n def colocate_vars_with(self, colocate_with_variable):\n \"\"\"Does not require `self.scope`.\"\"\"\n _require_strategy_scope_extended(self)\n return ops.colocate_with(colocate_with_variable)\n\n def variable_created_in_scope(self, v):\n return v._distribute_strategy is None # pylint: disable=protected-access\n\n def _make_dataset_iterator(self, dataset):\n return _DefaultDistributionExtended.DefaultInputIterator(dataset)\n\n def _make_input_fn_iterator(self,\n input_fn,\n replication_mode=InputReplicationMode.PER_WORKER):\n dataset = input_fn(InputContext())\n return _DefaultDistributionExtended.DefaultInputIterator(dataset)\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n numpy_flat = nest.flatten(numpy_input)\n vars_flat = tuple(\n variable_scope.variable(array_ops.zeros(i.shape, i.dtype),\n trainable=False, use_resource=True)\n for i in numpy_flat\n )\n for v, i in zip(vars_flat, numpy_flat):\n numpy_dataset.init_var_from_numpy(v, i, session)\n vars_nested = nest.pack_sequence_as(numpy_input, vars_flat)\n return dataset_ops.Dataset.from_tensor_slices(vars_nested)\n\n def _broadcast_to(self, tensor, destinations):\n if destinations is None:\n return tensor\n else:\n raise NotImplementedError(\"TODO\")\n\n def _call_for_each_replica(self, fn, args, kwargs):\n with ReplicaContext(\n self._container_strategy(),\n replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):\n return fn(*args, **kwargs)\n\n def _reduce_to(self, reduce_op, value, destinations):\n # TODO(josh11b): Use destinations?\n del reduce_op, destinations\n return value\n\n def _update(self, var, fn, args, kwargs, group):\n # The implementations of _update() and _update_non_slot() are identical\n # except _update() passes `var` as the first argument to `fn()`.\n return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, should_group):\n # TODO(josh11b): Figure out what we should be passing to UpdateContext()\n # once that value is used for something.\n with ops.colocate_with(colocate_with), UpdateContext(colocate_with):\n result = fn(*args, **kwargs)\n if should_group:\n return result\n else:\n return nest.map_structure(self._local_results, result)\n\n def read_var(self, replica_local_var):\n return array_ops.identity(replica_local_var)\n\n def _local_results(self, distributed_value):\n return (distributed_value,)\n\n def value_container(self, value):\n return value\n\n @property\n def _num_replicas_in_sync(self):\n return 1\n\n @property\n def worker_devices(self):\n raise RuntimeError(\"worker_devices() method unsupported by default \"\n \"tf.distribute.Strategy.\")\n\n @property\n def parameter_devices(self):\n raise RuntimeError(\"parameter_devices() method unsupported by default \"\n \"tf.distribute.Strategy.\")\n\n def non_slot_devices(self, var_list):\n return min(var_list, key=lambda x: x.name)\n\n # TODO(priyag): This should inherit from `InputIterator`, once dependency\n # issues have been resolved.\n class DefaultInputIterator(object):\n \"\"\"Default implementation of `InputIterator` for default strategy.\"\"\"\n\n def __init__(self, dataset):\n self._dataset = dataset\n if eager_context.executing_eagerly():\n self._iterator = dataset.make_one_shot_iterator()\n else:\n self._iterator = dataset.make_initializable_iterator()\n\n def get_next(self):\n return self._iterator.get_next()\n\n def initialize(self):\n if eager_context.executing_eagerly():\n self._iterator = self._dataset.make_one_shot_iterator()\n return []\n else:\n return [self._iterator.initializer]\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"Global and per-replica batching are equivalent for this strategy.\"\"\"\n return True\n\n\n# ------------------------------------------------------------------------------\n# We haven't yet implemented deserialization for DistributedVariables.\n# So here we catch any attempts to deserialize variables\n# when using distribution strategies.\n# pylint: disable=protected-access\n_original_from_proto = resource_variable_ops._from_proto_fn\n\n\ndef _from_proto_fn(v, import_scope=None):\n if distribution_strategy_context.has_strategy():\n raise NotImplementedError(\n \"Deserialization of variables is not yet supported when using a \"\n \"tf.distribute.Strategy.\")\n else:\n return _original_from_proto(v, import_scope=import_scope)\n\nresource_variable_ops._from_proto_fn = _from_proto_fn\n# pylint: enable=protected-access\n\n\n#-------------------------------------------------------------------------------\n# Shorthand for some methods from distribution_strategy_context.\n_push_per_thread_mode = distribution_strategy_context._push_per_thread_mode # pylint: disable=protected-access\n_get_per_thread_mode = distribution_strategy_context._get_per_thread_mode # pylint: disable=protected-access\n_pop_per_thread_mode = distribution_strategy_context._pop_per_thread_mode # pylint: disable=protected-access\n_get_default_replica_mode = (\n distribution_strategy_context._get_default_replica_mode) # pylint: disable=protected-access\n"
] | [
[
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.distribute.numpy_dataset.init_var_from_numpy",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.distribute.distribution_strategy_context._InReplicaThreadMode",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.distribute.distribution_strategy_context._CrossReplicaThreadMode",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.distribute.device_util.current",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
Naqu6/jax | [
"6411f8a03388ce63eb365188f2e2880815745125"
] | [
"tests/lax_test.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\nfrom functools import partial\nimport itertools\nimport operator\nimport unittest\nfrom unittest import SkipTest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nimport jax\nimport jax.numpy as jnp\nfrom jax import core\nfrom jax._src import dtypes\nfrom jax import lax\nfrom jax._src import test_util as jtu\nfrom jax import tree_util\nfrom jax._src import lax_reference\nfrom jax.test_util import check_grads\nimport jax.util\nfrom jax._src.util import prod\n\nfrom jax._src.lax.lax import _device_put_raw\n\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n\n### lax tests\n\n# For standard unops and binops, we can generate a large number of tests on\n# arguments of appropriate shapes and dtypes using the following table.\n\nfloat_dtypes = jtu.dtypes.all_floating\ncomplex_elem_dtypes = jtu.dtypes.floating\ncomplex_dtypes = jtu.dtypes.complex\ninexact_dtypes = jtu.dtypes.all_inexact\nint_dtypes = jtu.dtypes.all_integer\nuint_dtypes = jtu.dtypes.all_unsigned\nbool_dtypes = jtu.dtypes.boolean\ndefault_dtypes = float_dtypes + int_dtypes\nall_dtypes = float_dtypes + complex_dtypes + int_dtypes + uint_dtypes + bool_dtypes\npython_scalar_types = [bool, int, float, complex]\n\ncompatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]\n\n# We check cases where the preferred type is at least as wide as the input\n# type and where both are either both floating-point or both integral,\n# which are the only supported configurations.\npreferred_type_combinations = [\n (np.float16, np.float16), (np.float16, np.float32), (np.float16, np.float64),\n (dtypes.bfloat16, dtypes.bfloat16), (dtypes.bfloat16, np.float32),\n (dtypes.bfloat16, np.float64), (np.float32, np.float32), (np.float32, np.float64),\n (np.float64, np.float64), (np.int8, np.int8), (np.int8, np.int16), (np.int8, np.int32),\n (np.int8, np.int64), (np.int16, np.int16), (np.int16, np.int32), (np.int16, np.int64),\n (np.int32, np.int32), (np.int32, np.int64), (np.int64, np.int64),\n (np.complex64, np.complex64), (np.complex64, np.complex128), (np.complex128, np.complex128)]\n\n\nOpRecord = collections.namedtuple(\n \"OpRecord\", [\"op\", \"nargs\", \"dtypes\", \"rng_factory\", \"tol\"])\n\ndef op_record(op, nargs, dtypes, rng_factory, tol=None):\n return OpRecord(op, nargs, dtypes, rng_factory, tol)\n\nLAX_OPS = [\n op_record(\"neg\", 1, default_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"sign\", 1, default_dtypes + uint_dtypes, jtu.rand_small),\n op_record(\"floor\", 1, float_dtypes, jtu.rand_small),\n op_record(\"ceil\", 1, float_dtypes, jtu.rand_small),\n op_record(\"round\", 1, float_dtypes, jtu.rand_default),\n op_record(\"nextafter\", 2, [f for f in float_dtypes if f != dtypes.bfloat16],\n jtu.rand_default, tol=0),\n\n op_record(\"is_finite\", 1, float_dtypes, jtu.rand_small),\n\n op_record(\"exp\", 1, float_dtypes + complex_dtypes, jtu.rand_small),\n # TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32\n # precision.\n op_record(\"expm1\", 1, float_dtypes + complex_dtypes, jtu.rand_small,\n {np.float64: 1e-8}),\n op_record(\"log\", 1, float_dtypes + complex_dtypes, jtu.rand_positive),\n op_record(\"log1p\", 1, float_dtypes + complex_dtypes, jtu.rand_positive),\n # TODO(b/142975473): on CPU, tanh for complex128 is only accurate to\n # ~float32 precision.\n # TODO(b/143135720): on GPU, tanh has only ~float32 precision.\n op_record(\"tanh\", 1, float_dtypes + complex_dtypes, jtu.rand_small,\n {np.float64: 1e-9, np.complex128: 1e-7}),\n op_record(\"sin\", 1, float_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"cos\", 1, float_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"atan2\", 2, float_dtypes, jtu.rand_default),\n\n op_record(\"sqrt\", 1, float_dtypes, jtu.rand_positive),\n op_record(\"sqrt\", 1, complex_dtypes, jtu.rand_default),\n op_record(\"rsqrt\", 1, float_dtypes, jtu.rand_positive),\n op_record(\"rsqrt\", 1, complex_dtypes, jtu.rand_default),\n op_record(\"cbrt\", 1, float_dtypes, jtu.rand_default),\n op_record(\"square\", 1, float_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"reciprocal\", 1, float_dtypes + complex_dtypes, jtu.rand_positive),\n op_record(\"tan\", 1, float_dtypes + complex_dtypes, jtu.rand_default, {np.float32: 3e-5}),\n op_record(\"asin\", 1, float_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"acos\", 1, float_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"atan\", 1, float_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"asinh\", 1, float_dtypes + complex_dtypes, jtu.rand_default,\n tol={np.complex64: 1E-4, np.complex128: 1E-5}),\n op_record(\"acosh\", 1, float_dtypes + complex_dtypes, jtu.rand_positive),\n # TODO(b/155331781): atanh has only ~float precision\n op_record(\"atanh\", 1, float_dtypes + complex_dtypes, jtu.rand_small, {np.float64: 1e-9}),\n op_record(\"sinh\", 1, float_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"cosh\", 1, float_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"lgamma\", 1, float_dtypes, jtu.rand_positive,\n {np.float32: 1e-3 if jtu.device_under_test() == \"tpu\" else 1e-5,\n np.float64: 1e-14}),\n op_record(\"digamma\", 1, float_dtypes, jtu.rand_positive,\n {np.float64: 1e-14}),\n op_record(\"betainc\", 3, float_dtypes, jtu.rand_positive,\n {np.float64: 1e-14}),\n op_record(\"igamma\", 2,\n [f for f in float_dtypes if f not in [dtypes.bfloat16, np.float16]],\n jtu.rand_positive, {np.float64: 1e-14}),\n op_record(\"igammac\", 2,\n [f for f in float_dtypes if f not in [dtypes.bfloat16, np.float16]],\n jtu.rand_positive, {np.float64: 1e-14}),\n op_record(\"erf\", 1, float_dtypes, jtu.rand_small),\n op_record(\"erfc\", 1, float_dtypes, jtu.rand_small),\n # TODO(b/142976030): the approximation of erfinf used by XLA is only\n # accurate to float32 precision.\n op_record(\"erf_inv\", 1, float_dtypes, jtu.rand_small,\n {np.float64: 1e-9}),\n op_record(\"bessel_i0e\", 1, float_dtypes, jtu.rand_default),\n op_record(\"bessel_i1e\", 1, float_dtypes, jtu.rand_default),\n\n op_record(\"real\", 1, complex_dtypes, jtu.rand_default),\n op_record(\"imag\", 1, complex_dtypes, jtu.rand_default),\n op_record(\"complex\", 2, complex_elem_dtypes, jtu.rand_default),\n op_record(\"conj\", 1, complex_elem_dtypes + complex_dtypes,\n jtu.rand_default),\n op_record(\"abs\", 1, default_dtypes + complex_dtypes, jtu.rand_default),\n op_record(\"pow\", 2, float_dtypes + complex_dtypes, jtu.rand_positive),\n\n op_record(\"bitwise_and\", 2, bool_dtypes, jtu.rand_small),\n op_record(\"bitwise_not\", 1, bool_dtypes, jtu.rand_small),\n op_record(\"bitwise_or\", 2, bool_dtypes, jtu.rand_small),\n op_record(\"bitwise_xor\", 2, bool_dtypes, jtu.rand_small),\n op_record(\"population_count\", 1, int_dtypes + uint_dtypes, jtu.rand_int),\n op_record(\"clz\", 1, int_dtypes + uint_dtypes, jtu.rand_int),\n\n op_record(\"add\", 2, default_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"sub\", 2, default_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"mul\", 2, default_dtypes + complex_dtypes, jtu.rand_small),\n op_record(\"div\", 2, default_dtypes + complex_dtypes, jtu.rand_nonzero),\n op_record(\"rem\", 2, default_dtypes, jtu.rand_nonzero),\n\n op_record(\"max\", 2, all_dtypes, jtu.rand_small),\n op_record(\"min\", 2, all_dtypes, jtu.rand_small),\n\n op_record(\"eq\", 2, all_dtypes, jtu.rand_some_equal),\n op_record(\"ne\", 2, all_dtypes, jtu.rand_small),\n op_record(\"ge\", 2, default_dtypes, jtu.rand_small),\n op_record(\"gt\", 2, default_dtypes, jtu.rand_small),\n op_record(\"le\", 2, default_dtypes, jtu.rand_small),\n op_record(\"lt\", 2, default_dtypes, jtu.rand_small),\n]\n\n\nclass LaxTest(jtu.JaxTestCase):\n \"\"\"Numerical tests for LAX operations.\"\"\"\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.op, shapes, itertools.repeat(dtype)),\n \"op_name\": rec.op, \"rng_factory\": rec.rng_factory, \"shapes\": shapes,\n \"dtype\": dtype}\n for shape_group in compatible_shapes\n for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)\n for dtype in rec.dtypes)\n for rec in LAX_OPS))\n def testOp(self, op_name, rng_factory, shapes, dtype):\n rng = rng_factory(self.rng())\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = getattr(lax, op_name)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list(\n {\"testcase_name\": jtu.format_test_name_suffix(\n rec.op, shapes, itertools.repeat(dtype)),\n \"op_name\": rec.op, \"rng_factory\": rec.rng_factory, \"shapes\": shapes,\n \"dtype\": dtype, \"tol\": rec.tol}\n for shape_group in compatible_shapes\n for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)\n for dtype in rec.dtypes)\n for rec in LAX_OPS))\n def testOpAgainstNumpy(self, op_name, rng_factory, shapes, dtype, tol):\n if (not config.x64_enabled and op_name == \"nextafter\"\n and dtype == np.float64):\n raise SkipTest(\"64-bit mode disabled\")\n rng = rng_factory(self.rng())\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = getattr(lax, op_name)\n numpy_op = getattr(lax_reference, op_name)\n self._CheckAgainstNumpy(numpy_op, op, args_maker, tol=tol)\n\n # TODO test shift_left, shift_right_arithmetic, shift_right_logical\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}_weak_type={}\".format(\n from_dtype, to_dtype, weak_type),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"weak_type\": weak_type}\n for from_dtype, to_dtype in itertools.product(\n [None, np.float32, np.int32, \"float32\", \"int32\"], repeat=2)\n for weak_type in [True, False]))\n def testConvertElementType(self, from_dtype, to_dtype, weak_type):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax._convert_element_type(x, to_dtype, weak_type)\n self._CompileAndCheck(op, args_maker)\n\n x = rng((1,), from_dtype)\n out = op(x)\n self.assertEqual(out.dtype, dtypes.canonicalize_dtype(to_dtype or x.dtype))\n self.assertEqual(out.aval.weak_type, weak_type)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype}\n for from_dtype, to_dtype in itertools.product(\n [np.float32, np.int32, \"float32\", \"int32\"], repeat=2)))\n def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.convert_element_type(x, to_dtype)\n numpy_op = lambda x: lax_reference.convert_element_type(x, to_dtype)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype}\n for from_dtype, to_dtype in itertools.product(\n [np.float32, np.int32, \"float32\", \"int32\"], repeat=2)))\n def testBitcastConvertType(self, from_dtype, to_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.bitcast_convert_type(x, to_dtype)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}\"\n .format(from_dtype, to_dtype),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype}\n for from_dtype, to_dtype in itertools.product(\n [np.float32, np.int32, \"float32\", \"int32\"], repeat=2)))\n def testBitcastConvertTypeAgainstNumpy(self, from_dtype, to_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng((2, 3), from_dtype)]\n op = lambda x: lax.bitcast_convert_type(x, to_dtype)\n numpy_op = lambda x: lax_reference.bitcast_convert_type(x, to_dtype)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_from_dtype={}_to_dtype={}_weak_type={}\"\n .format(from_dtype, to_dtype, weak_type),\n \"from_dtype\": from_dtype, \"to_dtype\": to_dtype, \"weak_type\": weak_type}\n for from_dtype, to_dtype in itertools.product(\n [np.float32, np.int32, \"float32\", \"int32\"], repeat=2)\n for weak_type in [True, False]))\n def testBitcastConvertWeakType(self, from_dtype, to_dtype, weak_type):\n rng = jtu.rand_default(self.rng())\n x_in = lax._convert_element_type(rng((2, 3), from_dtype),\n weak_type=weak_type)\n op = lambda x: lax.bitcast_convert_type(x, to_dtype)\n self.assertEqual(dtypes.is_weakly_typed(x_in), weak_type)\n x_out = op(x_in)\n self.assertEqual(dtypes.is_weakly_typed(x_out), False)\n x_out_jit = jax.jit(op)(x_in)\n self.assertEqual(dtypes.is_weakly_typed(x_out_jit), False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_min_shape={}_operand_shape={}_max_shape={}\".format(\n jtu.format_shape_dtype_string(min_shape, dtype),\n jtu.format_shape_dtype_string(operand_shape, dtype),\n jtu.format_shape_dtype_string(max_shape, dtype)),\n \"min_shape\": min_shape, \"operand_shape\": operand_shape,\n \"max_shape\": max_shape, \"dtype\": dtype}\n for min_shape, operand_shape, max_shape in [\n [(), (2, 3), ()],\n [(2, 3), (2, 3), ()],\n [(), (2, 3), (2, 3)],\n [(2, 3), (2, 3), (2, 3)],\n ]\n for dtype in default_dtypes))\n def testClamp(self, min_shape, operand_shape, max_shape, dtype):\n rng = jtu.rand_default(self.rng())\n shapes = [min_shape, operand_shape, max_shape]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n self._CompileAndCheck(lax.clamp, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_min_shape={}_operand_shape={}_max_shape={}\".format(\n jtu.format_shape_dtype_string(min_shape, dtype),\n jtu.format_shape_dtype_string(operand_shape, dtype),\n jtu.format_shape_dtype_string(max_shape, dtype)),\n \"min_shape\": min_shape, \"operand_shape\": operand_shape,\n \"max_shape\": max_shape, \"dtype\": dtype}\n for min_shape, operand_shape, max_shape in [\n [(), (2, 3), ()],\n [(2, 3), (2, 3), ()],\n [(), (2, 3), (2, 3)],\n [(2, 3), (2, 3), (2, 3)],\n ]\n for dtype in default_dtypes))\n def testClampAgainstNumpy(self, min_shape, operand_shape, max_shape, dtype):\n rng = jtu.rand_default(self.rng())\n shapes = [min_shape, operand_shape, max_shape]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n self._CheckAgainstNumpy(lax_reference.clamp, lax.clamp, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dim={}_baseshape=[{}]_dtype={}_narrs={}\".format(\n dim, \",\".join(str(d) for d in base_shape), np.dtype(dtype).name,\n num_arrs),\n \"dim\": dim, \"base_shape\": base_shape, \"dtype\": dtype, \"num_arrs\": num_arrs}\n for num_arrs in [3]\n for dtype in default_dtypes\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for dim in range(len(base_shape))))\n def testConcatenate(self, dim, base_shape, dtype, num_arrs):\n rng = jtu.rand_default(self.rng())\n shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = lambda *args: lax.concatenate(args, dim)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dim={}_baseshape=[{}]_dtype={}_narrs={}\".format(\n dim, \",\".join(str(d) for d in base_shape), np.dtype(dtype).name,\n num_arrs),\n \"dim\": dim, \"base_shape\": base_shape, \"dtype\": dtype, \"num_arrs\": num_arrs}\n for num_arrs in [3]\n for dtype in default_dtypes\n for base_shape in [(4,), (3, 4), (2, 3, 4)]\n for dim in range(len(base_shape))))\n def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs):\n rng = jtu.rand_default(self.rng())\n shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]\n for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]\n args_maker = lambda: [rng(shape, dtype) for shape in shapes]\n op = lambda *args: lax.concatenate(args, dim)\n numpy_op = lambda *args: lax_reference.concatenate(args, dim)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype in float_dtypes\n for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [\"VALID\", \"SAME\"]))\n def testConv(self, lhs_shape, rhs_shape, dtype, strides, padding):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv(lhs, rhs, strides, padding)\n\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_preferred_element_type={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n preferred_element_type.__name__),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"preferred_element_type\": preferred_element_type}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype, preferred_element_type in preferred_type_combinations))\n def testConvPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):\n if (not config.x64_enabled and\n (dtype == np.float64 or preferred_element_type == np.float64\n or dtype == np.int64 or preferred_element_type == np.int64\n or dtype == np.complex128 or preferred_element_type == np.complex128)):\n raise SkipTest(\"64-bit mode disabled\")\n if jtu.device_under_test() == \"gpu\" and np.issubdtype(dtype, np.integer):\n # TODO(b/183565702): Support integer convolutions on CPU/GPU.\n raise SkipTest(\"Integer convolution not yet supported on GPU\")\n if (jtu.device_under_test() == \"tpu\" and\n (dtype == np.complex128 or preferred_element_type == np.complex128)):\n raise SkipTest(\"np.complex128 is not yet supported on TPU\")\n # x64 implementation is only accurate to ~float32 precision for this case.\n if dtype == np.complex64 and preferred_element_type == np.complex128:\n tol = 1e-5\n else:\n tol = {np.float64: 1e-14}\n rng = jtu.rand_default(self.rng())\n x = rng(lhs_shape, dtype)\n y = rng(rhs_shape, dtype)\n # We first compute the conv when both inputs are a lower-precision type and\n # preferred_element_type is a higher-precision type. We then compute results\n # where the inputs are first upcast to the higher-precision type and no\n # `preferred_element_type` is given. We expect the result to be extremely\n # similar given the semantics of `preferred_element_type`.\n result_with_preferred_type = lax.conv(\n x, y, (1, 1), \"VALID\",\n preferred_element_type=preferred_element_type)\n result_with_upcast_inputs = lax.conv(\n x.astype(preferred_element_type),\n y.astype(preferred_element_type),\n (1, 1), \"VALID\")\n self.assertArraysAllClose(\n result_with_preferred_type, result_with_upcast_inputs, rtol=tol, atol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([2, 3], repeat=3)]\n for dtype in float_dtypes\n for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [\"VALID\", \"SAME\"]))\n def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n op = lambda lhs, rhs: lax.conv(lhs, rhs, strides, padding)\n numpy_op = lambda lhs, rhs: lax_reference.conv(lhs, rhs, strides, padding)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([1, 2, 3], repeat=3)]\n for dtype in float_dtypes\n for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]\n for lhs_dilation, rhs_dilation in itertools.product(\n [(1, 1), (1, 2), (2, 2)], repeat=2)))\n def testConvWithGeneralPadding(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dilation, rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)\n\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation}\n for lhs_shape, rhs_shape in [\n ((b, i, 9, 10), (j, i, 4, 5))\n for b, i, j in itertools.product([1, 2, 3], repeat=3)]\n for dtype in [np.float32] for strides in [(1, 1), (1, 2), (2, 1)]\n for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]\n for lhs_dilation, rhs_dilation in itertools.product(\n [(1, 1), (1, 2), (2, 2)], repeat=2)))\n def testConvWithGeneralPaddingAgainstNumpy(\n self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,\n rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,\n precision=lax.Precision.HIGHEST)\n\n def numpy_fun(lhs, rhs):\n return lax_reference.conv_with_general_padding(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)\n\n self._CheckAgainstNumpy(numpy_fun, fun, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\"\n \"_lhs_dilation={}_rhs_dilation={}\"\n \"_dims={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n strides, padding, lhs_dilation, rhs_dilation,\n \",\".join(dim_nums)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"lhs_dilation\": lhs_dilation,\n \"rhs_dilation\": rhs_dilation, \"dimension_numbers\": dim_nums,\n \"feature_group_count\": feature_group_count,\n \"batch_group_count\": batch_group_count, \"perms\": perms\n } for batch_group_count, feature_group_count in s([(1, 1), (2, 1), (1, 2)])\n for lhs_shape, rhs_shape in s([\n ((b * batch_group_count, i * feature_group_count, 9, w),\n (j * feature_group_count * batch_group_count, i, 4, 5))\n for w in [0, 10]\n for b, i, j in itertools.product([2, 3], repeat=3)])\n for dtype in s(all_dtypes)\n for strides in s([(1, 1), (2, 1)])\n for padding in s([((1, 2), (2, 0)), ((10, 8), (7, 13))])\n for lhs_dilation, rhs_dilation in s(itertools.product(\n [(1, 1), (1, 2), (1, 4)], repeat=2))\n for dim_nums, perms in s([\n ((\"NCHW\", \"OIHW\", \"NCHW\"), ([0, 1, 2, 3], [0, 1, 2, 3])),\n ((\"NHWC\", \"HWIO\", \"NHWC\"), ([0, 2, 3, 1], [2, 3, 1, 0])),\n ((\"NCHW\", \"HWIO\", \"NHWC\"), ([0, 1, 2, 3], [2, 3, 1, 0])),\n ]))))\n def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides,\n padding, lhs_dilation, rhs_dilation,\n feature_group_count, batch_group_count,\n dimension_numbers, perms):\n if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):\n # TODO(b/183565702): Support integer convolutions on CPU/GPU.\n if jtu.device_under_test() == \"gpu\":\n raise SkipTest(\"Integer convolution not yet supported on GPU\")\n rng = jtu.rand_small(self.rng())\n lhs_perm, rhs_perm = perms # permute to compatible shapes\n\n def args_maker():\n return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),\n lax.transpose(rng(rhs_shape, dtype), rhs_perm)]\n\n def fun(lhs, rhs):\n return lax.conv_general_dilated(\n lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count=feature_group_count,\n batch_group_count=batch_group_count)\n\n self._CompileAndCheck(fun, args_maker)\n\n def testConvGeneralDilatedPatchesOverlapping1D(self):\n lhs = np.array([[1]], np.float32).reshape((1, 1))\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=(),\n window_strides=(),\n padding='SAME'\n )\n self.assertAllClose(lhs, patches)\n\n dn = ('NHC', 'OIH', 'NHC')\n lhs = np.array([1, 2, 3, 4, 5], np.float32).reshape((1, -1, 1))\n\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=(2,),\n window_strides=(2,),\n padding='VALID',\n dimension_numbers=dn\n )\n self.assertAllClose(\n np.array([[1, 2],\n [3, 4]], np.float32).reshape((1, 2, 2)), patches)\n\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=(3,),\n window_strides=(1,),\n padding='SAME',\n dimension_numbers=dn\n )\n self.assertAllClose(\n np.array([[0, 1, 2],\n [1, 2, 3],\n [2, 3, 4],\n [3, 4, 5],\n [4, 5, 0]], np.float32).reshape((1, 5, 3)), patches)\n\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=(3,),\n window_strides=(1,),\n padding='SAME',\n rhs_dilation=(2,),\n dimension_numbers=dn\n )\n self.assertAllClose(\n np.array([[0, 1, 3],\n [0, 2, 4],\n [1, 3, 5],\n [2, 4, 0],\n [3, 5, 0]], np.float32).reshape((1, 5, 3)), patches)\n\n def testConvGeneralDilatedPatchesOverlapping2D(self):\n lhs = np.array([[1, 2, 3],\n [4, 5, 6]], np.float32).reshape((1, 2, 3, 1))\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=(2, 2),\n window_strides=(1, 1),\n padding='SAME',\n dimension_numbers=('NHWC', 'OIHW', 'NHWC')\n )\n self.assertAllClose(np.array([[1, 2, 4, 5],\n [2, 3, 5, 6],\n [3, 0, 6, 0],\n [4, 5, 0, 0],\n [5, 6, 0, 0],\n [6, 0, 0, 0]],\n np.float32).reshape((1, 2, 3, 4)), patches)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_filter_shape={}_strides={}_padding={}\"\n \"_dims={}_precision={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(filter_shape, dtype),\n strides,\n padding,\n \"None\" if dim_nums is None else \",\".join(dim_nums),\n precision\n ),\n \"lhs_shape\": lhs_shape,\n \"filter_shape\": filter_shape,\n \"dtype\": dtype,\n \"strides\": strides,\n \"padding\": padding,\n \"dimension_numbers\": dim_nums,\n \"precision\": precision\n }\n for dtype in all_dtypes\n for lhs_shape, filter_shape, strides, padding, dim_nums in [\n ((2, 5), (), (), [], (\"NC\", \"OI\", \"CN\")),\n ((2, 3, 4), (2,), (2,), [(0, 2)], (\"CNH\", \"OHI\", \"HNC\")),\n ((3, 1, 4, 5), (1, 3), (1, 3), [(3, 1), (2, 2)],\n (\"NCHW\", \"OIHW\", \"NCHW\")),\n ((3, 2, 5, 6), (4, 3), (4, 3), [(5, 2), (2, 4)],\n None),\n ((1, 2, 3, 4), (1, 1), (1, 1), [(0, 0), (0, 0)],\n (\"NCWH\", \"OHWI\", \"CNHW\")),\n ((1, 2, 3, 4), (3, 2), (1, 1), [(0, 0), (0, 0)],\n (\"CWHN\", \"HOWI\", \"NCHW\")),\n ((2, 3, 4, 5, 6), (2, 1, 3), (2, 1, 3), [(1, 2), (5, 3), (3, 5)],\n (\"NHWDC\", \"HDIWO\", \"DCWNH\"))\n ]\n for precision in [None,\n lax.Precision.DEFAULT,\n lax.Precision.HIGH,\n lax.Precision.HIGHEST]\n ))\n def testConvGeneralDilatedPatchesNonOverlapping(self,\n lhs_shape,\n filter_shape,\n dtype,\n strides,\n padding,\n dimension_numbers,\n precision):\n if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):\n # TODO(b/183565702): Support integer convolutions on CPU/GPU.\n if jtu.device_under_test() == \"gpu\":\n raise SkipTest(\"Integer convolution not yet supported on GPU\")\n rng = jtu.rand_small(self.rng())\n lhs = rng(lhs_shape, dtype)\n\n if dimension_numbers is None:\n lhs_spec, rhs_spec, out_spec = \"NCHW\", \"OIHW\", \"NCHW\"\n else:\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n\n filter_spec = ''.join(c for c in rhs_spec if c not in ('I', 'O'))\n patches_spec = out_spec.replace('C', 'C' + filter_spec.lower())\n\n full_padding = []\n for c in lhs_spec:\n if c in ('N', 'C'):\n full_padding += [(0, 0)]\n else:\n full_padding += [padding[filter_spec.index(c)]]\n\n lhs_padded = np.pad(lhs, full_padding, 'constant')\n out = lax.transpose(lhs_padded, [lhs_spec.index(c) for c in out_spec])\n\n patches = lax.conv_general_dilated_patches(\n lhs=lhs,\n filter_shape=filter_shape,\n window_strides=strides,\n padding=padding,\n dimension_numbers=dimension_numbers,\n precision=precision\n )\n\n source = []\n\n # Test that output spatial shape is factored into `#patches x patch_size`.\n for c in out_spec:\n out_c = out.shape[out_spec.index(c)]\n patch_c = patches.shape[out_spec.index(c)]\n\n if c == 'N':\n self.assertEqual(out_c, patch_c)\n elif c == 'C':\n self.assertEqual(out_c * np.prod(filter_shape), patch_c)\n else:\n self.assertEqual(out_c, patch_c * filter_shape[filter_spec.index(c)])\n\n source += [patches_spec.index(c), patches_spec.index(c.lower())]\n\n # Test that stacking patches together gives the source image, padded.\n c = out_spec.index('C')\n patches = patches.reshape(patches.shape[:c] +\n (lhs_shape[lhs_spec.index('C')],) +\n filter_shape +\n patches.shape[c + 1:]\n )\n patches = np.moveaxis(patches, source, range(len(source)))\n for i in range(len(filter_shape)):\n patches = patches.reshape(patches.shape[:i] + (-1,) +\n patches.shape[2 + i:])\n patches = np.moveaxis(\n patches,\n range(len(filter_shape)),\n [out_spec.index(c) for c in out_spec if c not in ('N', 'C')])\n self.assertAllClose(out, patches)\n\n # TODO(mattjj): test conv_general_dilated against numpy\n\n def testConv0DIsDot(self):\n rng = jtu.rand_default(self.rng())\n def args_maker():\n return [rng((10, 5), np.float32), rng((5, 7), np.float32)]\n jnp_fun = partial(lax.conv_general_dilated, window_strides=(),\n padding='VALID', dimension_numbers=('NC', 'IO', 'NC'))\n self._CompileAndCheck(jnp_fun, args_maker)\n self._CheckAgainstNumpy(np.dot, jnp_fun, args_maker, tol=.1)\n\n def testGradConv0D(self):\n # Reproduces a failure in neural_tangents not caught in our presubmit tests\n # See cl/367416742.\n lhs = np.ones((2, 5), dtype=np.float32)\n rhs = np.ones((5, 10), dtype=np.float32)\n\n def f_jax(lhs, rhs):\n return lax.conv_general_dilated(\n lhs, rhs, window_strides=(),\n padding=(), lhs_dilation=(), rhs_dilation=(),\n dimension_numbers=lax.ConvDimensionNumbers((0, 1), (1, 0), (0, 1)),\n batch_group_count=1, feature_group_count=1, precision=None,\n preferred_element_type=None)\n res, pullback = jax.vjp(f_jax, lhs, rhs)\n grad = pullback(np.ones_like(res))\n self.assertAllClose((lhs * 10., rhs * 2.), grad)\n\n @staticmethod\n def _conv_transpose_via_grad(data, kernel, strides, padding,\n rhs_dilation=None, dimension_numbers=None):\n \"\"\"Helper method: calculates conv transpose via grad for testing.\"\"\"\n assert len(data.shape) == len(kernel.shape)\n nspatial = len(data.shape) - 2\n one = (1,) * nspatial\n rhs_dilation = rhs_dilation or one\n dn = lax.conv_dimension_numbers(data.shape, kernel.shape,\n dimension_numbers)\n in_shape = np.take(data.shape, dn.lhs_spec)\n in_sdims = in_shape[2:]\n k_shape = np.take(kernel.shape, dn.rhs_spec)\n k_sdims = k_shape[2:]\n e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]\n if padding == 'VALID':\n o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)\n for i in range(nspatial)]\n elif padding == 'SAME':\n o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]\n o_shape = [in_shape[0], k_shape[1]] + o_sdims\n out_spec_inv = [x[0] for x in\n sorted(enumerate(dn.out_spec), key=lambda x: x[1])]\n o_layout = np.take(np.array(o_shape), out_spec_inv)\n placeholder = np.ones(o_layout, data.dtype)\n conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,\n one, rhs_dilation, dn)\n _, g = jax.vjp(conv, placeholder)\n return g(data)[0]\n\n @staticmethod\n def _transpose_conv_kernel(data, kernel, dimension_numbers):\n dn = lax.conv_dimension_numbers(data.shape, kernel.shape,\n dimension_numbers)\n spatial_axes = np.array(dn.rhs_spec)[2:]\n for axis in spatial_axes:\n kernel = np.flip(kernel, axis)\n kernel = np.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])\n return kernel\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n \"dspec\": dspec}\n for lhs_shape, rhs_shape in [\n ((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in float_dtypes\n for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHWC', 'HWIO', 'NHWC'),]\n for rhs_dilation in [None, (2, 2)]))\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n # NB: this test calculates conv_transpose performing identically to the\n # lhs-grad of conv.\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec,\n transpose_kernel=True)\n\n def fun_via_grad(lhs, rhs):\n return self._conv_transpose_via_grad(lhs, rhs, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n \"dspec\": dspec}\n for lhs_shape, rhs_shape in [\n ((b, 9, 10, i), (k, k, i, j))\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in float_dtypes\n for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHWC', 'HWIO', 'NHWC'),]\n for rhs_dilation in [None, (2, 2)]))\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec,\n transpose_kernel=False)\n\n def fun_via_grad(lhs, rhs):\n rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\n return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n \"dspec\": dspec}\n for lhs_shape, rhs_shape in [\n ((b, 10, i), (k, i, j))\n for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n for dtype in float_dtypes\n for strides in [(1,), (2,), (3,)]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NHC', 'HIO', 'NHC'),]\n for rhs_dilation in [None, (2,)]))\n def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n dimension_numbers=dspec,\n rhs_dilation=rhs_dilation,\n transpose_kernel=False)\n\n def fun_via_grad(lhs, rhs):\n rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\n return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n \"dspec\": dspec}\n for lhs_shape, rhs_shape in [\n ((b, i), (i, j))\n for b, i, j in itertools.product([2,3],[2,3],[2,3])]\n for dtype in float_dtypes\n for strides in [()]\n for padding in [\"VALID\", \"SAME\"]\n for dspec in [('NC', 'IO', 'NC'),]\n for rhs_dilation in [None, ()]))\n def testConvTranspose0D(self, lhs_shape, rhs_shape, dtype, strides,\n padding, dspec, rhs_dilation):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.conv_transpose(lhs, rhs, strides, padding,\n dimension_numbers=dspec,\n rhs_dilation=rhs_dilation,\n transpose_kernel=False)\n\n def fun_via_grad(lhs, rhs):\n rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\n return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n rhs_dilation=rhs_dilation,\n dimension_numbers=dspec)\n\n # NB: below just checks for agreement, we're not calling numpy.\n self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)\n\n def testConvTransposePaddingList(self):\n # Regression test for https://github.com/google/jax/discussions/8695\n a = jnp.ones((28,28))\n b = jnp.ones((3,3))\n c = lax.conv_general_dilated(a[None, None], b[None, None], (1,1), [(0,0),(0,0)], (1,1))\n self.assertArraysEqual(c, 9 * jnp.ones((1, 1, 26, 26)))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_precision={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n precision),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"precision\": precision}\n for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]\n for dtype in all_dtypes\n for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,\n lax.Precision.HIGHEST,\n (lax.Precision.DEFAULT, lax.Precision.HIGHEST)]))\n def testDot(self, lhs_shape, rhs_shape, dtype, precision):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}_preferred_element_type={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n jtu.format_shape_dtype_string((), preferred_element_type)\n ),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype, \"preferred_element_type\": preferred_element_type\n }\n for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]\n for dtype, preferred_element_type in preferred_type_combinations))\n def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):\n if (not config.x64_enabled and\n (dtype == np.float64 or preferred_element_type == np.float64\n or dtype == np.int64 or preferred_element_type == np.int64)):\n raise SkipTest(\"64-bit mode disabled\")\n if (jtu.device_under_test() == \"tpu\" and\n (dtype == np.complex128 or preferred_element_type == np.complex128)):\n raise SkipTest(\"np.complex128 is not yet supported on TPU\")\n if jtu.device_under_test() == \"gpu\":\n # TODO(b/189287598)\n raise SkipTest(\"dot_general with preferred_element_type returns NaN non-deterministically on GPU\")\n rng = jtu.rand_default(self.rng())\n x = rng(lhs_shape, dtype)\n y = rng(rhs_shape, dtype)\n # We first compute the dot when both inputs are a lower-precision type and\n # preferred_element_type is a higher-precision type. We then compute results\n # where the inputs are first upcast to the higher-precision type and no\n # `preferred_element_type` is given. We expect the result to be extremely\n # similar given the semantics of `preferred_element_type`.\n result_with_preferred_type = lax.dot(x, y, preferred_element_type=preferred_element_type)\n result_with_upcast_inputs = lax.dot(\n x.astype(preferred_element_type),\n y.astype(preferred_element_type))\n self.assertArraysAllClose(result_with_preferred_type, result_with_upcast_inputs)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype}\n for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]\n for dtype in all_dtypes))\n def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n tol = {\n np.float16: 1e-2,\n np.float64: max(jtu.default_tolerance()[np.dtype(np.float64)], 1e-14),\n np.complex128: max(jtu.default_tolerance()[np.dtype(np.complex128)],\n 1e-14)\n }\n lax_op = partial(lax.dot, precision=lax.Precision.HIGHEST)\n self._CheckAgainstNumpy(lax_reference.dot, lax_op, args_maker, tol=tol)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n lhs_contracting, rhs_contracting),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"lhs_contracting\": lhs_contracting, \"rhs_contracting\": rhs_contracting}\n for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [\n [(5,), (5,), [0], [0]],\n [(5, 7), (5,), [0], [0]],\n [(7, 5), (5,), [1], [0]],\n [(3, 5), (2, 5), [1], [1]],\n [(5, 3), (5, 2), [0], [0]],\n [(5, 3, 2), (5, 2, 4), [0], [0]],\n [(5, 3, 2), (5, 2, 4), [0,2], [0,1]],\n [(5, 3, 2), (3, 5, 2, 4), [0,2], [1,2]],\n [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],\n [(3, 2), (2, 4), [1], [0]],\n ]\n for dtype in all_dtypes))\n def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,\n lhs_contracting, rhs_contracting):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))\n\n def fun(lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers}\n for lhs_shape, rhs_shape, dimension_numbers in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),\n ]\n for dtype in all_dtypes))\n def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n\n def fun(lhs, rhs):\n return lax.dot_general(lhs, rhs, dimension_numbers)\n\n self._CompileAndCheck(fun, args_maker, check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs_shape={}_rhs_shape={}_dimension_numbers={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n dimension_numbers),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"dimension_numbers\": dimension_numbers}\n for lhs_shape, rhs_shape, dimension_numbers in [\n ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),\n ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),\n ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),\n ]\n for dtype in all_dtypes))\n def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype,\n dimension_numbers):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n op = lambda x, y: lax.dot_general(x, y, dimension_numbers)\n numpy_op = lambda x, y: lax_reference.dot_general(x, y, dimension_numbers)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_dtype={}_broadcast_sizes={}\".format(\n shape, np.dtype(dtype).name, broadcast_sizes),\n \"shape\": shape, \"dtype\": dtype, \"broadcast_sizes\": broadcast_sizes}\n for shape in [(), (2, 3)]\n for dtype in default_dtypes\n for broadcast_sizes in [(), (2,), (1, 2)]))\n def testBroadcast(self, shape, dtype, broadcast_sizes):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.broadcast(x, broadcast_sizes)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_broadcast_sizes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), broadcast_sizes),\n \"shape\": shape, \"dtype\": dtype, \"broadcast_sizes\": broadcast_sizes}\n for shape in [(), (2, 3)]\n for dtype in default_dtypes\n for broadcast_sizes in [(), (2,), (1, 2)]))\n def testBroadcastAgainstNumpy(self, shape, dtype, broadcast_sizes):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.broadcast(x, broadcast_sizes)\n numpy_op = lambda x: lax_reference.broadcast(x, broadcast_sizes)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, dtype),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"dtype\": dtype, \"outshape\": outshape,\n \"dimensions\": broadcast_dimensions}\n for inshape, outshape, broadcast_dimensions in [\n ([2], [2, 2], [0]),\n ([2], [2, 2], [1]),\n ([2], [2, 3], [0]),\n ([], [2, 3], []),\n ([1], [2, 3], [1]),\n ]\n for dtype in default_dtypes))\n def testBroadcastInDim(self, inshape, dtype, outshape, dimensions):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(inshape, dtype)]\n op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)\n self._CompileAndCheck(op, args_maker)\n\n def testBroadcastInDimOperandShapeTranspose(self):\n # Regression test for https://github.com/google/jax/issues/5276\n def f(x):\n return lax.broadcast_in_dim(x, (2, 3, 4), broadcast_dimensions=(0, 1, 2)).sum()\n def g(x):\n return lax.broadcast_in_dim(x.reshape((3,)), (2, 3, 4), broadcast_dimensions=(1,)).sum()\n x = np.ones((1, 3, 1))\n self.assertArraysEqual(jax.grad(f)(x), jax.grad(g)(x))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, np.float32),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"outshape\": outshape,\n \"broadcast_dimensions\": broadcast_dimensions, \"err_msg\": err_msg}\n for inshape, outshape, broadcast_dimensions, err_msg in [\n ([2], [2, 2], [0, 1], ('broadcast_dimensions must have length equal to '\n 'operand ndim')),\n ([2, 2], [2], [0, 1], ('target broadcast shape must have equal or higher rank '\n 'to the operand shape')),\n ([2], [2, 3], [2], ('broadcast_in_dim broadcast_dimensions must be a subset of output '\n 'dimensions')),\n ([2], [3], [0], ('operand dimension sizes must either be 1, or be '\n 'equal to their corresponding dimensions in the target broadcast shape')),\n ([2, 2], [2, 2], [1, 0], ('broadcast_dimensions must be strictly increasing')),\n ]))\n def testBroadcastInDimShapeCheck(self, inshape, outshape, broadcast_dimensions, err_msg):\n rng = jtu.rand_default(self.rng())\n x = rng(inshape, np.float32)\n with self.assertRaisesRegex(TypeError, err_msg):\n lax.broadcast_in_dim(x, shape=outshape, broadcast_dimensions=broadcast_dimensions)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}_bcdims={}\".format(\n jtu.format_shape_dtype_string(inshape, dtype),\n outshape, broadcast_dimensions),\n \"inshape\": inshape, \"dtype\": dtype, \"outshape\": outshape,\n \"dimensions\": broadcast_dimensions}\n for inshape, outshape, broadcast_dimensions in [\n ([2], [2, 2], [0]),\n ([2], [2, 2], [1]),\n ([2], [2, 3], [0]),\n ([], [2, 3], []),\n ([1], [2, 3], [1]),\n ]\n for dtype in default_dtypes))\n def testBroadcastInDimAgainstNumpy(self, inshape, dtype, outshape, dimensions):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(inshape, dtype)]\n op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)\n numpy_op = lambda x: lax_reference.broadcast_in_dim(x, outshape, dimensions)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_dimensions={}\".format(\n jtu.format_shape_dtype_string(inshape, np.float32), dimensions),\n \"inshape\": inshape, \"dimensions\": dimensions, \"error_type\": error_type,\n \"err_msg\": err_msg}\n for inshape, dimensions, error_type, err_msg in [\n ((1, 2, 3), (0, 0), ValueError, 'dimensions are not unique'),\n ((1, 2, 3), (3,), ValueError, 'axis 3 is out of bounds'),\n ((1, 2, 3), (-4,), ValueError, 'axis -4 is out of bounds'),\n ((1, 2, 3), (1,), ValueError, 'cannot select an axis to squeeze out'),\n ((1, 2, 3), (None,), TypeError, 'cannot be interpreted as an integer'),\n ]))\n def testSqueezeShapeCheck(self, inshape, dimensions, error_type, err_msg):\n rng = jtu.rand_default(self.rng())\n x = rng(inshape, np.float32)\n with self.assertRaisesRegex(error_type, err_msg):\n lax.squeeze(x, dimensions=dimensions)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_dimensions={}\".format(\n jtu.format_shape_dtype_string(arg_shape, np.float32), dimensions),\n \"arg_shape\": arg_shape, \"dimensions\": dimensions}\n for arg_shape, dimensions in [\n [(1,), (0,)],\n [(1,), (-1,)],\n [(2, 1, 4), (1,)],\n [(2, 1, 3, 1), (1,)],\n [(2, 1, 3, 1), (1, 3)],\n [(2, 1, 3, 1), (3,)],\n ]))\n def testSqueeze(self, arg_shape, dimensions):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(arg_shape, np.float32)]\n op = lambda x: lax.squeeze(x, dimensions)\n numpy_op = lambda x: lax_reference.squeeze(x, dimensions)\n self._CompileAndCheck(op, args_maker)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n check_grads(op, args_maker(), 2, [\"fwd\", \"rev\"], eps=1.)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]\n ]))\n def testReshape(self, arg_shape, out_shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(arg_shape, dtype)]\n op = lambda x: lax.reshape(x, out_shape)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_outshape={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n jtu.format_shape_dtype_string(out_shape, dtype)),\n \"arg_shape\": arg_shape, \"out_shape\": out_shape, \"dtype\": dtype}\n for dtype in default_dtypes\n for arg_shape, out_shape in [\n [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]\n ]))\n def testReshapeAgainstNumpy(self, arg_shape, out_shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(arg_shape, dtype)]\n op = lambda x: lax.reshape(x, out_shape)\n numpy_op = lambda x: lax_reference.reshape(x, out_shape)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n def testRoundRoundingMethods(self):\n x = np.array([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5], dtype=np.float32)\n self.assertAllClose(lax.round(x, lax.RoundingMethod.AWAY_FROM_ZERO),\n np.array([-3, -2, -1, 1, 2, 3], dtype=np.float32))\n self.assertAllClose(lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN),\n np.array([-2, -2, 0, 0, 2, 2], dtype=np.float32))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_pads={}\"\n .format(jtu.format_shape_dtype_string(shape, dtype), pads),\n \"shape\": shape, \"dtype\": dtype, \"pads\": pads}\n for dtype in default_dtypes\n for shape, pads in [\n ((0, 2), [(1, 2, 1), (0, 1, 0)]),\n ((2, 3), [(1, 2, 1), (0, 1, 0)]),\n ((2,), [(1, 2, 0)]),\n ((1, 2), [(1, 2, 0), (3, 4, 0)]),\n ((1, 2), [(0, 0, 0), (0, 0, 0)]),\n ((2,), [(1, 2, 3),]),\n ((3, 2), [(1, 2, 1), (3, 4, 2)]),\n ((2,), [(-1, 2, 0),]),\n ((4, 2), [(-1, -2, 0), (1, 2, 0)]),\n ((4, 2), [(-1, 2, 0), (1, 2, 2)]),\n ((5,), [(-1, -2, 2),]),\n ((4, 2), [(-1, -2, 1), (1, 2, 2)])\n ]))\n def testPad(self, shape, dtype, pads):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n fun = lambda operand: lax.pad(operand, np.array(0, dtype), pads)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_inshape={}_pads={}\"\n .format(jtu.format_shape_dtype_string(shape, dtype), pads),\n \"shape\": shape, \"dtype\": dtype, \"pads\": pads}\n for shape in [(2, 3)]\n for dtype in default_dtypes\n for pads in [\n [(0, 0, 0), (0, 0, 0)], # no padding\n [(1, 1, 0), (2, 2, 0)], # only positive edge padding\n [(1, 2, 1), (0, 1, 0)], # edge padding and interior padding\n [(0, 0, 0), (-1, -1, 0)], # negative padding\n [(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges\n [(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension\n ]))\n def testPadAgainstNumpy(self, shape, dtype, pads):\n rng = jtu.rand_small(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.pad(x, np.array(0, dtype), pads)\n numpy_op = lambda x: lax_reference.pad(x, np.array(0, dtype), pads)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n def testPadErrors(self):\n with self.assertRaisesRegex(ValueError, \"padding_config\"):\n lax.pad(np.zeros(2), 0., [(0, 1, 0), (0, 1, 0)])\n with self.assertRaisesRegex(ValueError, \"interior padding in padding_config must be nonnegative\"):\n lax.pad(np.zeros(2), 0., [(0, 1, -1)])\n with self.assertRaisesRegex(ValueError, \"Dimension size after padding is not at least 0\"):\n lax.pad(np.zeros(2), 0., [(-3, 0, 0)])\n with self.assertRaisesRegex(ValueError, \"Dimension size after padding is not at least 0\"):\n lax.pad(np.zeros(2), 0., [(-4, 0, 1)])\n\n def testReverse(self):\n rev = jax.jit(lambda operand: lax.rev(operand, dimensions))\n\n dimensions = []\n self.assertAllClose(np.array([0, 1, 2, 3]), rev(np.array([0, 1, 2, 3])),\n check_dtypes=False)\n\n dimensions = [0]\n self.assertAllClose(np.array([3, 2, 1]), rev(np.array([1, 2, 3])),\n check_dtypes=False)\n\n dimensions = [0, 1]\n self.assertAllClose(np.array([[6, 5, 4], [3, 2, 1]]),\n rev(np.array([[1, 2, 3], [4, 5, 6]])),\n check_dtypes=False)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_predshape={}_argshapes={}\".format(\n jtu.format_shape_dtype_string(pred_shape, np.bool_),\n jtu.format_shape_dtype_string(arg_shape, arg_dtype)),\n \"pred_shape\": pred_shape, \"arg_shape\": arg_shape, \"arg_dtype\": arg_dtype}\n for arg_shape in [(), (3,), (2, 3)]\n for pred_shape in ([(), arg_shape] if arg_shape else [()])\n for arg_dtype in default_dtypes))\n def testSelect(self, pred_shape, arg_shape, arg_dtype):\n rng = jtu.rand_default(self.rng())\n def args_maker():\n return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),\n rng(arg_shape, arg_dtype)]\n return self._CompileAndCheck(lax.select, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_predshape={}_argshapes={}\".format(\n jtu.format_shape_dtype_string(pred_shape, np.bool_),\n jtu.format_shape_dtype_string(arg_shape, arg_dtype)),\n \"pred_shape\": pred_shape, \"arg_shape\": arg_shape, \"arg_dtype\": arg_dtype}\n for arg_shape in [(), (3,), (2, 3)]\n for pred_shape in ([(), arg_shape] if arg_shape else [()])\n for arg_dtype in default_dtypes))\n def testSelectAgainstNumpy(self, pred_shape, arg_shape, arg_dtype):\n rng = jtu.rand_default(self.rng())\n def args_maker():\n return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),\n rng(arg_shape, arg_dtype)]\n return self._CheckAgainstNumpy(lax_reference.select, lax.select, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_indices={}_limit_indices={}_strides={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, limit_indices, strides),\n \"shape\": shape, \"dtype\": dtype, \"starts\": indices,\n \"limits\": limit_indices, \"strides\": strides}\n for shape, indices, limit_indices, strides in [\n [(3,), (1,), (2,), None],\n [(7,), (4,), (7,), None],\n [(5,), (1,), (5,), (2,)],\n [(8,), (1,), (6,), (2,)],\n [(5, 3), (1, 1), (3, 2), None],\n [(5, 3), (1, 1), (3, 1), None],\n [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],\n [(5, 3), (1, 1), (2, 1), (1, 1)],\n [(5, 3), (1, 1), (5, 3), (2, 1)],\n ]\n for dtype in default_dtypes))\n def testSlice(self, shape, dtype, starts, limits, strides):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.slice(x, starts, limits, strides)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}_indices={}_limit_indices={}_strides={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, limit_indices, strides),\n \"shape\": shape, \"dtype\": dtype, \"starts\": indices,\n \"limits\": limit_indices, \"strides\": strides}\n for shape, indices, limit_indices, strides in [\n [(3,), (1,), (2,), None],\n [(7,), (4,), (7,), None],\n [(5,), (1,), (5,), (2,)],\n [(8,), (1,), (6,), (2,)],\n [(5, 3), (1, 1), (3, 2), None],\n [(5, 3), (1, 1), (3, 1), None],\n [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],\n [(5, 3), (1, 1), (2, 1), (1, 1)],\n [(5, 3), (1, 1), (5, 3), (2, 1)],\n ]\n for dtype in default_dtypes))\n def testSliceAgainstNumpy(self, shape, dtype, starts, limits, strides):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.slice(x, starts, limits, strides)\n numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_indices={}_size_indices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, size_indices),\n \"shape\": shape, \"dtype\": dtype, \"indices\": indices,\n \"size_indices\": size_indices}\n for shape, indices, size_indices in [\n [(3,), np.array((1,)), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(5, 3), np.array((1, 1)), (3, 1)],\n [(7, 5, 3), np.array((4, 1, 0)), (2, 0, 1)],\n ]\n for dtype in default_dtypes))\n def testDynamicSlice(self, shape, dtype, indices, size_indices):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), np.array(indices)]\n op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_indices={}_size_indices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, size_indices),\n \"shape\": shape, \"dtype\": dtype, \"indices\": indices,\n \"size_indices\": size_indices}\n for shape, indices, size_indices in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes))\n def testDynamicSliceAgainstNumpy(self, shape, dtype, indices, size_indices):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), np.array(indices)]\n op = lambda x, s: lax.dynamic_slice(x, s, size_indices)\n numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n def testDynamicSliceInDim(self):\n # Regression test for mixed type problem in dynamic_slice_in_dim.\n rng = jtu.rand_default(self.rng())\n x = rng((6, 7), np.int32)\n np.testing.assert_equal(lax.dynamic_slice_in_dim(x, 2, 3), x[2:5])\n\n def testDynamicSliceArraySliceSizes(self):\n rng = jtu.rand_default(self.rng())\n x = rng((6, 7), np.int32)\n np.testing.assert_equal(lax.dynamic_slice(x, [2, 3], jnp.array([2, 2])),\n x[2:4, 3:5])\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_indices={}_update_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, update_shape),\n \"shape\": shape, \"dtype\": dtype, \"indices\": indices,\n \"update_shape\": update_shape}\n for shape, indices, update_shape in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes))\n def testDynamicUpdateSlice(self, shape, dtype, indices, update_shape):\n rng = jtu.rand_default(self.rng())\n\n def args_maker():\n return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]\n\n self._CompileAndCheck(lax.dynamic_update_slice, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_indices={}_update_shape={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n indices, update_shape),\n \"shape\": shape, \"dtype\": dtype, \"indices\": indices,\n \"update_shape\": update_shape}\n for shape, indices, update_shape in [\n [(3,), (1,), (1,)],\n [(5, 3), (1, 1), (3, 1)],\n [(7, 5, 3), (4, 1, 0), (2, 0, 1)],\n ]\n for dtype in default_dtypes))\n def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, indices,\n update_shape):\n rng = jtu.rand_default(self.rng())\n\n def args_maker():\n return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]\n\n self._CheckAgainstNumpy(lax_reference.dynamic_update_slice,\n lax.dynamic_update_slice, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_perm={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm),\n \"shape\": shape, \"dtype\": dtype, \"perm\": perm}\n for shape, perm in [\n [(3, 4), (1, 0)],\n [(3, 4), (0, 1)],\n [(3, 4, 5), (2, 1, 0)],\n [(3, 4, 5), (1, 0, 2)],\n ]\n for dtype in default_dtypes))\n def testTranspose(self, shape, dtype, perm):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.transpose(x, perm)\n self._CompileAndCheck(op, args_maker)\n\n def testTransposeWithArrayPermutation(self):\n x = lax.transpose(np.ones((2, 3)), jnp.array([1, 0]))\n self.assertEqual((3, 2), x.shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_perm={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), perm),\n \"shape\": shape, \"dtype\": dtype, \"perm\": perm}\n for shape, perm in [\n [(3, 4), (1, 0)],\n [(3, 4), (0, 1)],\n [(3, 4, 5), (2, 1, 0)],\n [(3, 4, 5), (1, 0, 2)],\n ]\n for dtype in default_dtypes))\n def testTransposeAgainstNumpy(self, shape, dtype, perm):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.transpose(x, perm)\n numpy_op = lambda x: lax_reference.transpose(x, perm)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_inshape={}_reducedims={}_initval={}\"\n .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,\n init_val),\n \"op\": op, \"init_val\": init_val, \"shape\": shape, \"dtype\": dtype, \"dims\": dims}\n for init_val, op, types in [\n (0, lax.add, default_dtypes),\n (1, lax.mul, default_dtypes),\n (0, lax.max, all_dtypes), # non-monoidal\n (-np.inf, lax.max, float_dtypes),\n (dtypes.iinfo(np.int32).min, lax.max, [np.int32]),\n (dtypes.iinfo(np.int64).min, lax.max, [np.int64]),\n (np.inf, lax.min, float_dtypes),\n (dtypes.iinfo(np.int32).max, lax.min, [np.int32]),\n (dtypes.iinfo(np.int64).max, lax.min, [np.int64]),\n (dtypes.iinfo(np.uint32).max, lax.min, [np.uint32]),\n (dtypes.iinfo(np.uint64).max, lax.min, [np.uint64]),\n ]\n for dtype in types\n for shape, dims in [\n [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],\n [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]\n ]))\n def testReduce(self, op, init_val, shape, dtype, dims):\n rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)\n else jtu.rand_small)\n rng = rng_factory(self.rng())\n init_val = np.asarray(init_val, dtype=dtype)\n fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)\n args_maker = lambda: [rng(shape, dtype), init_val]\n self._CompileAndCheck(fun, args_maker)\n\n # we separately test the version that uses a concrete init_val because it\n # can hit different code paths\n fun = lambda operand: lax.reduce(operand, init_val, op, dims)\n args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}.{}_arr_weak_type={}_init_weak_type={}\"\n .format(op_namespace.__name__, op, arr_weak_type, init_weak_type),\n \"op\": op, \"op_namespace\": op_namespace, \"arr_weak_type\": arr_weak_type, \"init_weak_type\": init_weak_type}\n for op in [\"add\", \"mul\"]\n for op_namespace in [lax, operator]\n for arr_weak_type in [True, False]\n for init_weak_type in [True, False]))\n def testReduceWeakType(self, op_namespace, op, arr_weak_type, init_weak_type):\n op = getattr(op_namespace, op)\n arr = lax._convert_element_type(np.arange(10), int, weak_type=arr_weak_type)\n init = lax._convert_element_type(1, int, weak_type=init_weak_type)\n fun = lambda arr, init: lax.reduce(arr, init, op, (0,))\n out = fun(arr, init)\n self.assertEqual(dtypes.is_weakly_typed(out), arr_weak_type and init_weak_type)\n out_jit = jax.jit(fun)(arr, init)\n self.assertEqual(dtypes.is_weakly_typed(out_jit), arr_weak_type and init_weak_type)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": (\"_op={}_shape={}_dims={}_strides={}_padding={}\"\n \"_basedilation={}_windowdilation={}\")\n .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype),\n dims, strides, padding, base_dilation, window_dilation),\n \"op\": op, \"init_val\": init_val, \"dtype\": dtype, \"shape\": shape,\n \"dims\": dims, \"strides\": strides, \"padding\": padding,\n \"base_dilation\": base_dilation, \"window_dilation\": window_dilation}\n for init_val, op, dtypes in [\n (0, lax.add, [np.float32]),\n (-np.inf, lax.max, [np.float32]),\n (np.inf, lax.min, [np.float32]),\n ]\n for shape, dims, strides, padding, base_dilation, window_dilation in (\n itertools.chain(\n itertools.product(\n [(4, 6)],\n [(2, 1), (1, 2)],\n [(1, 1), (2, 1), (1, 2)],\n [\"VALID\", \"SAME\", [(0, 3), (1, 2)]],\n [(1, 1), (2, 3)],\n [(1, 1), (1, 2)]),\n itertools.product(\n [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],\n [(1, 2, 2, 1), (1, 1, 1, 1)],\n [\"VALID\", \"SAME\", [(0, 1), (1, 0), (2, 3), (0, 2)]],\n [(1, 1, 1, 1), (2, 1, 3, 2)],\n [(1, 1, 1, 1), (1, 2, 2, 1)])))\n for dtype in dtypes))\n def testReduceWindow(self, op, init_val, dtype, shape, dims, strides, padding,\n base_dilation, window_dilation):\n rng = jtu.rand_small(self.rng())\n init_val = np.asarray(init_val, dtype=dtype)\n\n def fun(operand, init_val):\n return lax.reduce_window(operand, init_val, op, dims, strides, padding,\n base_dilation, window_dilation)\n\n def reference_fun(operand, init_val):\n return lax_reference.reduce_window(operand, init_val, op, dims, strides,\n padding, base_dilation)\n\n args_maker = lambda: [rng(shape, dtype), init_val]\n self._CompileAndCheck(fun, args_maker)\n if all(d == 1 for d in window_dilation):\n self._CheckAgainstNumpy(reference_fun, fun, args_maker)\n\n # we separately test the version that uses a concrete init_val because it\n # can hit different code paths\n def fun(operand):\n return lax.reduce_window(operand, init_val, op, dims, strides, padding,\n base_dilation, window_dilation)\n\n args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": (\"_shape={}_dims={}_strides={}_padding={}\"\n \"_basedilation={}_windowdilation={}\")\n .format(jtu.format_shape_dtype_string(shape, dtype),\n dims, strides, padding, base_dilation, window_dilation),\n \"dtype\": dtype, \"shape\": shape,\n \"dims\": dims, \"strides\": strides, \"padding\": padding,\n \"base_dilation\": base_dilation, \"window_dilation\": window_dilation}\n for dtype in [np.float32]\n for shape, dims, strides, padding, base_dilation, window_dilation in (\n itertools.chain(\n itertools.product(\n [(4, 6)],\n [(2, 1), (1, 2)],\n [(1, 1), (2, 1), (1, 2)],\n [\"VALID\", \"SAME\", [(0, 3), (1, 2)]],\n [(1, 1), (2, 3)],\n [(1, 1), (1, 2)]),\n itertools.product(\n [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],\n [(1, 2, 2, 1), (1, 1, 1, 1)],\n [\"VALID\", \"SAME\", [(0, 1), (1, 0), (2, 3), (0, 2)]],\n [(1, 1, 1, 1), (2, 1, 3, 2)],\n [(1, 1, 1, 1), (1, 2, 2, 1)])))))\n # TODO(b/183233858): variadic reduce-window is not implemented on XLA:GPU\n @jtu.skip_on_devices(\"gpu\")\n def testReduceWindowVariadic(self, dtype, shape, dims, strides, padding,\n base_dilation, window_dilation):\n if (jtu.device_under_test() == \"tpu\" and\n any(d != 1 for d in window_dilation)):\n raise SkipTest(\"TPU support missing for arbitrary window dilation.\")\n rng = jtu.rand_small(self.rng())\n init_values = (np.asarray(0, dtype=dtype), np.array(-np.inf, dtype=dtype))\n\n def reducer(xs, ys):\n x1, x2 = xs\n y1, y2 = ys\n return (x1 + y1, lax.max(x2, y2))\n\n def fun(*operands):\n return lax.reduce_window(operands, init_values, reducer, dims, strides,\n padding, base_dilation, window_dilation)\n\n def reference_fun(*operands):\n return [\n lax_reference.reduce_window(operand, init_val, op, dims, strides,\n padding, base_dilation)\n for operand, init_val, op in zip(operands, init_values,\n [np.add, np.maximum])]\n\n args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker)\n if all(d == 1 for d in window_dilation):\n self._CheckAgainstNumpy(reference_fun, fun, args_maker)\n\n\n def testReduceWindowFailures(self):\n def empty_window_test():\n return lax.reduce_window(np.ones((1,)), 0., lax.add, padding='VALID',\n window_dimensions=(0,), window_strides=(1,))\n\n def zero_stride_test():\n return lax.reduce_window(np.ones((1,)), 0., lax.add, padding='VALID',\n window_dimensions=(1,), window_strides=(0,))\n\n for failure_fun in [empty_window_test, zero_stride_test]:\n with self.assertRaisesRegex(TypeError, \"must have every element be\"):\n failure_fun()\n\n with self.assertRaisesRegex(\n ValueError,\n \"reduce_window output must have the same tree structure as the \"\n \"operands.*\"):\n return lax.reduce_window(\n np.ones((1,)), 0., lambda x, y: [x + y],\n padding='VALID', window_dimensions=(1,), window_strides=(1,))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": (f\"_shape={shape}_windowdimensions={window_dimensions}\"\n f\"_basedilation={base_dilation}_windowdilation=\"\n f\"{window_dilation}\"),\n \"shape\": shape, \"window_dimensions\": window_dimensions,\n \"base_dilation\": base_dilation, \"window_dilation\": window_dilation}\n for shape, window_dimensions, base_dilation, window_dilation in (\n itertools.chain(\n itertools.product(\n [(4, 6)],\n [(1, 1), (3, 4)],\n [(1, 1), (1, 2), (2, 13), (40, 60)],\n [(1, 1), (1, 2), (2, 13), (40, 60)]),\n itertools.product(\n [(3, 2, 4, 6)],\n [(1, 1, 1, 1), (2, 1, 2, 1)],\n [(1, 1, 1, 1), (1, 2, 2, 1), (30, 40, 3, 2)],\n [(1, 1, 1, 1), (1, 2, 2, 1), (30, 40, 3, 2)])))))\n def testReduceWindowShapeDilation(self, shape, window_dimensions,\n base_dilation, window_dilation):\n operand, padding, strides = np.ones(shape), 'SAME', (1,) * len(shape)\n result = lax.reduce_window(operand, 0., lax.add, padding=padding,\n window_strides=strides,\n window_dimensions=window_dimensions)\n # With a stride of 1 in each direction and a padding of 'SAME', the\n # shape of the input should be equal to the shape of the result according\n # to https://www.tensorflow.org/xla/operation_semantics#reducewindow.\n self.assertEqual(shape, result.shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_op={}_shape={}_axis={}_reverse={}\"\n .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), axis,\n reverse),\n \"op\": op, \"np_op\": np_op, \"shape\": shape, \"dtype\": dtype,\n \"axis\": axis, \"reverse\": reverse}\n for op, np_op, types in [\n (lax.cumsum, np.cumsum, default_dtypes),\n (lax.cumprod, np.cumprod, default_dtypes),\n (lax.cummax, np.maximum.accumulate, default_dtypes),\n (lax.cummin, np.minimum.accumulate, default_dtypes),\n ]\n for dtype in types\n for shape in [[10], [3, 4, 5]]\n for axis in range(len(shape))\n for reverse in [False, True]))\n def testCumulativeReduce(self, op, np_op, shape, dtype, axis, reverse):\n rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)\n else jtu.rand_small)\n rng = rng_factory(self.rng())\n fun = partial(op, axis=axis, reverse=reverse)\n def np_fun(x):\n if reverse:\n return np.flip(np_op(np.flip(x, axis), axis=axis, dtype=dtype), axis)\n else:\n return np_op(x, axis=axis, dtype=dtype)\n args_maker = lambda: [rng(shape, dtype)]\n self._CompileAndCheck(fun, args_maker)\n self._CheckAgainstNumpy(np_fun, fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_out_dtype={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n jtu.format_shape_dtype_string(shape, out_dtype)),\n \"shape\": shape, \"dtype\": dtype, \"out_dtype\": out_dtype}\n for shape in [(), (3,), (3, 4)]\n for dtype in float_dtypes\n for out_dtype in float_dtypes))\n def testReducePrecision(self, shape, dtype, out_dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n info = dtypes.finfo(out_dtype)\n fun = lambda x: lax.reduce_precision(x, info.nexp, info.nmant)\n np_fun = lambda x: np.asarray(x).astype(out_dtype).astype(dtype)\n self._CheckAgainstNumpy(np_fun, fun, args_maker)\n self._CompileAndCheck(fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}_isstable={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, is_stable),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis, \"is_stable\": is_stable}\n for dtype in all_dtypes\n for shape in [(5,), (5, 7)]\n for axis in [-1, len(shape) - 1]\n for is_stable in [False, True]))\n def testSort(self, shape, dtype, axis, is_stable):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n fun = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": f\"_dtype={dtype.__name__}\", \"dtype\": dtype}\n for dtype in float_dtypes))\n def testSortFloatSpecialValues(self, dtype):\n # Test confirms that\n # - NaNs are sorted to the end, regardless of representation\n # - sign bit of 0.0 is ignored\n x = jnp.array([-np.inf, 0.0, -0.0, np.inf, np.nan, -np.nan], dtype=dtype)\n index = lax.iota(dtypes.int_, x.size)\n argsort = lambda x: lax.sort_key_val(x, lax.iota(dtypes.int_, x.size), is_stable=True)[1]\n self.assertArraysEqual(argsort(x), index)\n self.assertArraysEqual(jax.jit(argsort)(x), index)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_axis={}_isstable={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axis, is_stable),\n \"shape\": shape, \"dtype\": dtype, \"axis\": axis, \"is_stable\": is_stable}\n for dtype in all_dtypes\n for shape in [(5,), (5, 7)]\n for axis in [-1, len(shape) - 1]\n for is_stable in [False, True]))\n def testSortAgainstNumpy(self, shape, dtype, axis, is_stable):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n op = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)\n def numpy_op(x):\n if is_stable:\n return lax_reference.sort(x, axis, kind='stable')\n else:\n return lax_reference.sort(x, axis)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_keyshape={}_valshape={}_axis={}_isstable={}\".format(\n jtu.format_shape_dtype_string(shape, key_dtype),\n jtu.format_shape_dtype_string(shape, val_dtype),\n axis, is_stable),\n \"shape\": shape, \"key_dtype\": key_dtype, \"val_dtype\": val_dtype,\n \"axis\": axis, \"is_stable\": is_stable}\n for key_dtype in float_dtypes + complex_dtypes + int_dtypes + uint_dtypes\n for val_dtype in [np.float32, np.int32, np.uint32]\n for shape in [(3,), (5, 3)]\n for axis in [-1, len(shape) - 1]\n for is_stable in [False, True]))\n def testSortKeyVal(self, shape, key_dtype, val_dtype, axis, is_stable):\n if (np.issubdtype(key_dtype, np.complexfloating) and\n jtu.device_under_test() == \"cpu\"):\n raise SkipTest(\"Complex-valued sort not implemented\")\n rng = jtu.rand_default(self.rng())\n # This test relies on the property that wherever keys are tied, values are\n # too, since we don't guarantee the same ordering of values with equal keys.\n # To avoid that case, we generate unique keys (globally in the key array).\n def args_maker():\n flat_keys = np.arange(prod(shape), dtype=key_dtype)\n keys = self.rng().permutation(flat_keys).reshape(shape)\n values = rng(shape, val_dtype)\n return keys, values\n\n fun = lambda keys, values: lax.sort_key_val(keys, values, axis, is_stable)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_num_keys={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), num_keys),\n \"shape\": shape, \"dtype\": dtype, \"num_keys\": num_keys}\n for dtype in all_dtypes\n for shape in [(3, 5,), (4, 3)]\n for num_keys in range(1, shape[0] + 1)))\n def testSortNumKeys(self, shape, dtype, num_keys):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n lax_fun = lambda x: lax.sort(tuple(x), num_keys=num_keys)\n numpy_fun = lambda x: tuple(x[:, np.lexsort(x[:num_keys][::-1])])\n # self._CompileAndCheck(lax_fun, args_maker)\n self._CheckAgainstNumpy(numpy_fun, lax_fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_keyshape={}_valshape={}_axis={}\".format(\n jtu.format_shape_dtype_string(shape, key_dtype),\n jtu.format_shape_dtype_string(shape, val_dtype),\n axis),\n \"shape\": shape, \"key_dtype\": key_dtype, \"val_dtype\": val_dtype,\n \"axis\": axis}\n for key_dtype in float_dtypes + complex_dtypes + int_dtypes + uint_dtypes\n for val_dtype in [np.float32, np.int32, np.uint32]\n for shape in [(3,), (5, 3)]\n for axis in [-1, len(shape) - 1]))\n def testSortKeyValAgainstNumpy(self, shape, key_dtype, val_dtype, axis):\n if (np.issubdtype(key_dtype, np.complexfloating) and\n jtu.device_under_test() == \"cpu\"):\n raise SkipTest(\"Complex-valued sort not implemented\")\n rng = jtu.rand_default(self.rng())\n # This test relies on the property that wherever keys are tied, values are\n # too, since we don't guarantee the same ordering of values with equal keys.\n # To avoid that case, we generate unique keys (globally in the key array).\n def args_maker():\n flat_keys = np.arange(prod(shape), dtype=key_dtype)\n keys = self.rng().permutation(flat_keys).reshape(shape)\n values = rng(shape, val_dtype)\n return keys, values\n\n op = lambda ks, vs: lax.sort_key_val(ks, vs, axis)\n numpy_op = lambda ks, vs: lax_reference.sort_key_val(ks, vs, axis)\n self._CheckAgainstNumpy(numpy_op, op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_k={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), k),\n \"shape\": shape, \"dtype\": dtype, \"k\": k}\n for dtype in [np.float32, np.int32, np.uint32]\n for shape in [(3,), (5, 3)]\n for k in [1, 3]))\n def testTopK(self, shape, dtype, k):\n def args_maker():\n flat_values = np.arange(prod(shape), dtype=dtype)\n values = self.rng().permutation(flat_values).reshape(shape)\n return [values]\n def reference_top_k(x):\n bcast_idxs = np.broadcast_to(np.arange(shape[-1], dtype=np.int32), shape)\n sorted_vals, sorted_idxs = lax_reference.sort_key_val(x, bcast_idxs)\n return sorted_vals[..., :-k-1:-1], sorted_idxs[..., :-k-1:-1]\n op = lambda vs: lax.top_k(vs, k=k)\n self._CheckAgainstNumpy(op, reference_top_k, args_maker)\n self._CompileAndCheck(op, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\"\n .format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype)),\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype}\n for lhs_shape, rhs_shape in [((3, 2), (2, 4)),\n ((5, 3, 2), (5, 2, 4)),\n ((1, 2, 2, 3), (1, 2, 3, 1))]\n for dtype in float_dtypes))\n def testBatchMatMul(self, lhs_shape, rhs_shape, dtype):\n rng = jtu.rand_small(self.rng())\n arg_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n self._CompileAndCheck(lax.batch_matmul, arg_maker)\n\n def testCollapse(self):\n\n @jax.jit\n def collapse_first_two(x):\n return lax.collapse(x, 0, 2)\n\n self.assertEqual((6,), collapse_first_two(np.zeros((2, 3))).shape)\n self.assertEqual((6, 4), collapse_first_two(np.zeros((2, 3, 4))).shape)\n self.assertEqual((2, 3, 4),\n collapse_first_two(np.zeros((1, 2, 3, 4))).shape)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, axes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"axes\": axes}\n for dtype in all_dtypes\n for shape, idxs, axes in [\n [(3, 4, 5), (np.array([0, 2, 1]),), (0,)],\n [(3, 4, 5), (np.array([-1, -2]),), (0,)],\n [(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 1)],\n [(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 2)],\n ]))\n def testIndexTake(self, shape, dtype, idxs, axes):\n rng = jtu.rand_default(self.rng())\n rand_idxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)\n args_maker = lambda: [rng(shape, dtype), rand_idxs()]\n fun = lambda src, idxs: lax.index_take(src, idxs, axes)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_dnums={}_slice_sizes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,\n slice_sizes),\n \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"dnums\": dnums,\n \"slice_sizes\": slice_sizes}\n for dtype in all_dtypes\n for shape, idxs, dnums, slice_sizes in [\n ((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(\n offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1,)),\n ((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),\n (2,)),\n ((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),\n (1, 3)),\n ((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),\n (1, 3)),\n ]))\n def testGather(self, shape, dtype, idxs, dnums, slice_sizes):\n rng = jtu.rand_default(self.rng())\n rng_idx = jtu.rand_int(self.rng(), high=max(shape))\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(shape, dtype), rand_idxs()]\n fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n self._CompileAndCheck(fun, args_maker)\n\n # These tests are adapted from the corresponding tests in\n # tensorflow/compiler/xla/service/shape_inference_test.cc with slight\n # variations to account for the implicit setting of index_vector_dim in JAX.\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": f\"_{testcase_name}\", \"operand_shape\": operand_shape,\n \"indices_shape\": indices_shape,\n \"dimension_numbers\": lax.GatherDimensionNumbers(\n offset_dims=offset_dims,\n collapsed_slice_dims=collapsed_slice_dims,\n start_index_map=start_index_map),\n \"slice_sizes\": slice_sizes, \"msg\": msg}\n for (testcase_name, operand_shape, indices_shape, offset_dims,\n collapsed_slice_dims, start_index_map, slice_sizes, msg) in [\n (\"NonAscendingWindowIndices\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),\n (4, 5, 6, 8, 7), (), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"offset_dims in gather op must be sorted\"),\n (\"RepeatedWindowIndices\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),\n (4, 5, 6, 7, 7), (), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"offset_dims in gather op must not repeat\"),\n (\"WindowIndexOutOfBounds\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),\n (4, 5, 100, 101, 102), (), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"Offset dimension 2 in gather op is out of bounds\"),\n (\"WindowIndexBarelyOutOfBounds\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),\n (4, 5, 6, 7, 9), (), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"Offset dimension 4 in gather op is out of bounds\"),\n (\"MismatchingElidedWindowDims\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (4,), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n (\"All components of the offset index in a gather op must either be a \"\n \"offset dimension or explicitly collapsed\")),\n (\"OutOfBoundsWindowToInputMapping\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (0, 1, 2, 3, 19), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"Invalid collapsed_slice_dims set in gather op; valid range is\"),\n (\"RepeatedWindowToInputMapping\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (0, 1, 2, 3, 3), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"collapsed_slice_dims in gather op must not repeat\"),\n (\"MismatchingGatherToInputMapping\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (), (0, 1, 2, 3), (10, 9, 8, 7, 6),\n (\"Gather op has 4 elements in start_index_map and the bound of \"\n \"dimension index_vector_dim=4 of indices is 5. These two \"\n \"numbers must be equal.\")),\n (\"OutOfBoundsGatherToInputMapping\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (), (0, 1, 2, 3, 7), (10, 9, 8, 7, 6),\n \"Invalid start_index_map\"),\n (\"RepeatedGatherToInputMapping\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (), (0, 1, 2, 3, 3), (10, 9, 8, 7, 6),\n \"start_index_map in gather op must not repeat\"),\n (\"NonAscendingElidedWindowDims\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7, 8), (2, 1), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n \"collapsed_slice_dims in gather op must be sorted\"),\n (\"WindowBoundsTooLarge\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7), (2,), (0, 1, 2, 3, 4), (10, 9, 8, 100, 6),\n \"Slice size at index 3 in gather op is out of range\"),\n (\"MismatchingNumberOfWindowBounds\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7), (), (0, 1, 2, 3, 4), (10, 9, 8, 7),\n \"Gather op must have one slice size for every input dimension\"),\n (\"WindowBoundsNot1ForElidedDim\", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),\n (4, 5, 6, 7), (1,), (0, 1, 2, 3, 4), (10, 9, 8, 7, 6),\n (\"Gather op can only collapse slice dims with bound 1, but bound \"\n \"is 9 for index 1 at position 0.\"))\n ]\n ))\n def testGatherShapeCheckingRule(self, operand_shape, indices_shape,\n dimension_numbers, slice_sizes, msg):\n operand = np.ones(operand_shape, dtype=np.int32)\n indices = np.ones(indices_shape, dtype=np.int32)\n\n with self.assertRaisesRegex(TypeError, msg):\n lax.gather(operand, indices, dimension_numbers, slice_sizes)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums}\n for dtype in inexact_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]))\n def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums):\n rng = jtu.rand_default(self.rng())\n rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter_add, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]))\n def testScatterMin(self, arg_shape, dtype, idxs, update_shape, dnums):\n rng = jtu.rand_default(self.rng())\n rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter_min, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]))\n def testScatterMax(self, arg_shape, dtype, idxs, update_shape, dnums):\n rng = jtu.rand_default(self.rng())\n rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter_max, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_idxs={}_update={}_dnums={}\".format(\n jtu.format_shape_dtype_string(arg_shape, dtype),\n idxs, update_shape, dnums),\n \"arg_shape\": arg_shape, \"dtype\": dtype, \"idxs\": idxs,\n \"update_shape\": update_shape, \"dnums\": dnums}\n for dtype in float_dtypes\n for arg_shape, idxs, update_shape, dnums in [\n ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(\n update_window_dims=(), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(),\n scatter_dims_to_operand_dims=(0,))),\n ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(\n update_window_dims=(1,), inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,))),\n ]))\n def testScatter(self, arg_shape, dtype, idxs, update_shape, dnums):\n rng = jtu.rand_default(self.rng())\n rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))\n rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),\n rng(update_shape, dtype)]\n fun = partial(lax.scatter, dimension_numbers=dnums)\n self._CompileAndCheck(fun, args_maker)\n\n # These tests are adapted from the corresponding tests in\n # tensorflow/compiler/xla/service/shape_inference_test.cc with slight\n # variations to account for the implicit setting of index_vector_dim in JAX.\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": f\"_{testcase_name}\", \"operand_shape\": operand_shape,\n \"indices\": indices, \"update_shape\": update_shape,\n \"dimension_numbers\": lax.ScatterDimensionNumbers(\n update_window_dims=update_window_dims,\n inserted_window_dims=inserted_window_dims,\n scatter_dims_to_operand_dims=scatter_dims_to_operand_dims),\n \"msg\": msg}\n for (testcase_name, operand_shape, indices, update_shape,\n update_window_dims, inserted_window_dims,\n scatter_dims_to_operand_dims, msg) in [\n (\"ScatterWithUpdatesBiggerThanInput\", (64, 48), np.zeros((32, 1)),\n (65, 32), (0,), (1,), (1,), \"Bounds of the window dimensions\"),\n (\"ScatterWithUpdatesBiggerThanInputV2\", (64, 48),\n np.zeros((32, 1)), (32, 49), (1,), (0,), (1,),\n \"Bounds of the window dimensions\"),\n (\"ScatterWithUpdatesNotMatchingIndices\", (64, 48),\n np.zeros((32, 1)), (64, 31), (0,), (1,), (1,),\n \"Bounds of the scatter dimensions\"),\n (\"ScatterWithUpdatesNotMatchingIndicesV2\", (64, 48),\n np.zeros((32, 1)), (31, 48), (1,), (0,), (1,),\n \"Bounds of the scatter dimensions\"),\n (\"ScatterNdWithUpdatesBiggerThanInput\", (64, 48),\n np.zeros((10, 9, 8, 7, 1)), (10, 9, 8, 7, 65), (4,), (1,),\n (0,), \"Bounds of the window dimensions\"),\n (\"ScatterNdWithUpdatesNotMatchingIndices\", (64, 48),\n np.zeros((10, 9, 8, 7, 1)), (9, 9, 8, 7, 64), (4,), (1,), (0,),\n \"Bounds of the scatter dimensions\"),\n (\"InvalidUpdates\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4, 1),\n (4, 5, 6), (1, 2), (0, 1, 2, 3, 4),\n \"Updates tensor must be of rank 7; got 8.\"),\n (\"NonAscendingUpdateWindowDims\", (6, 5, 4, 3, 2),\n np.zeros((5, 4, 3, 2, 1)), (10, 9, 8, 7, 6, 5, 4, 3, 2),\n (4, 5, 6, 8, 7), (), (0, 1, 2, 3, 4),\n \"update_window_dims in scatter op must be sorted\"),\n (\"RepeatedUpdateWindowDims\", (6, 5, 4, 3, 2),\n np.zeros((5, 4, 3, 2, 1)), (10, 9, 8, 7, 6, 5, 4, 3, 2),\n (4, 5, 6, 7, 7), (), (0, 1, 2, 3, 4),\n \"update_window_dims in scatter op must not repeat\"),\n (\"OutOfBoundsUpdateWindowDims\", (6, 5, 4, 3, 2),\n np.zeros((5, 4, 3, 2, 1)), (10, 9, 8, 7, 6, 5, 4, 3, 2),\n (4, 5, 6, 7, 9), (), (0, 1, 2, 3, 4),\n \"Invalid update_window_dims set in scatter op\"),\n (\"NonAscendingInsertedWindowDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (2, 1), (0, 1, 2, 3, 4),\n \"inserted_window_dims in scatter op must be sorted\"),\n (\"RepeatedInsertedWindowDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1, 1), (0, 1, 2, 3, 4),\n \"inserted_window_dims in scatter op must not repeat\"),\n (\"OutOfBoundsInsertedWindowDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1, 5), (0, 1, 2, 3, 4),\n \"Invalid inserted_window_dims set in scatter op\"),\n (\"MismatchingScatterDimsToOperandDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1, 2), (0, 1, 2, 3),\n (\"Scatter op has 4 elements in scatter_dims_to_operand_dims and \"\n \"the bound of dimension index_vector_dim=4 of indices \"\n \"is 5. These two numbers must be equal\")),\n (\"OutOfBoundsScatterDimsToOperandDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1, 2), (0, 1, 2, 3, 10),\n \"Invalid scatter_dims_to_operand_dims mapping\"),\n (\"RepeatedValuesInScatterDimsToOperandDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1, 2), (0, 1, 2, 2, 3),\n \"scatter_dims_to_operand_dims in scatter op must not repeat\"),\n (\"InsufficientWindowDims\", (50, 49, 48, 47, 46),\n np.zeros((10, 9, 8, 7, 5)), (10, 9, 8, 7, 3, 2, 4),\n (4, 5, 6), (1,), (0, 1, 2, 3),\n (\"Scatter op has window of size 4; doesn't match operand of \"\n \"rank 5.\"))\n ]\n ))\n def testScatterShapeCheckingRule(self, operand_shape, indices,\n update_shape, dimension_numbers, msg):\n\n def f(x, y):\n operand = lax.broadcast(x, operand_shape)\n updates = lax.broadcast(y, update_shape)\n return lax.scatter(operand, indices, updates, dimension_numbers)\n with self.assertRaisesRegex(TypeError, msg):\n jax.eval_shape(f, np.int32(1), np.int32(1))\n\n def testIssue831(self):\n # Tests the DeviceTuple constant handler\n def f(x):\n g = lambda *args: args[1]\n return jax.jit(lax.fori_loop, static_argnums=(2,))( 0, 10, g, x)\n\n jax.jit(f)(1.) # doesn't crash\n\n def testReshapeWithUnusualShapes(self):\n ans = lax.reshape(np.ones((3,), np.float32), (lax.add(1, 2), 1))\n self.assertAllClose(ans, np.ones((3, 1), np.float32))\n\n self.assertRaisesRegex(\n TypeError,\n \"Shapes must be 1D sequences of concrete values of integer type.*\",\n lambda: lax.reshape(np.ones(3,), (np.array([3, 1]),)))\n\n self.assertRaisesRegex(\n TypeError,\n \"Shapes must be 1D sequences of concrete values of integer type.*\",\n lambda: lax.reshape(np.ones(3,), (1.5, 2.0)))\n\n def testDynamicSliceTypeErrors(self):\n self.assertRaisesRegex(\n TypeError,\n \"index arguments to dynamic_slice must be integers of the same type\",\n lambda: lax.dynamic_slice(np.ones((3, 4), dtype=np.float32),\n (np.int32(1), np.int16(2)), (2, 2)))\n\n def testDynamicUpdateSliceTypeErrors(self):\n self.assertRaisesRegex(\n TypeError,\n \"index arguments to dynamic_update_slice must be integers of the same \"\n \"type\",\n lambda: lax.dynamic_update_slice(np.ones((3, 4), dtype=np.float32),\n np.zeros((2, 2), dtype=np.float32),\n (np.int32(1), np.int16(2))))\n\n def test_tie_in_error(self):\n raise SkipTest(\"test no longer needed after trivializing tie_in\")\n # with core.skipping_checks():\n # with self.assertRaisesRegex(\n # TypeError, \".* of type .*tuple.* is not a valid JAX type\"):\n # jax.make_jaxpr(lambda x: lax.tie_in((x, x), 1))(1.)\n\n def test_primitive_jaxtype_error(self):\n with jax.enable_checks(False):\n with self.assertRaisesRegex(\n TypeError, \"Argument .* of type .* is not a valid JAX type\"):\n lax.add(1, 'hi')\n\n def test_reduction_with_repeated_axes_error(self):\n with self.assertRaisesRegex(ValueError, \"duplicate value in 'axes' .*\"):\n lax.reduce(np.arange(3), 0, lax.add, (0, 0))\n\n def test_population_count_booleans_not_supported(self):\n # https://github.com/google/jax/issues/3886\n msg = \"population_count does not accept dtype bool\"\n with self.assertRaisesRegex(TypeError, msg):\n lax.population_count(True)\n\n def test_conv_general_dilated_different_input_ranks_error(self):\n # https://github.com/google/jax/issues/4316\n msg = (\"conv_general_dilated lhs and rhs must have the same number of \"\n \"dimensions\")\n dimension_numbers = lax.ConvDimensionNumbers(lhs_spec=(0, 1, 2),\n rhs_spec=(0, 1, 2),\n out_spec=(0, 1, 2))\n kwargs = { 'window_strides': (1,)\n , 'padding': ((0, 0),)\n , 'lhs_dilation': (1,)\n , 'rhs_dilation': (1,)\n , 'dimension_numbers': dimension_numbers\n , 'feature_group_count': 1\n , 'batch_group_count': 1\n , 'precision': None\n }\n lhs, rhs = np.ones((1, 1, 1)), np.ones((1, 1, 1, 1))\n with self.assertRaisesRegex(ValueError, msg):\n lax.conv_general_dilated(lhs, rhs, **kwargs)\n\n def test_window_strides_dimension_shape_rule(self):\n # https://github.com/google/jax/issues/5087\n msg = (\"conv_general_dilated window and window_strides must have \"\n \"the same number of dimensions\")\n lhs = jax.numpy.zeros((1, 1, 3, 3))\n rhs = np.zeros((1, 1, 1, 1))\n with self.assertRaisesRegex(ValueError, msg):\n jax.lax.conv(lhs, rhs, [1], 'SAME')\n\n def test_reduce_window_scalar_init_value_shape_rule(self):\n # https://github.com/google/jax/issues/4574\n args = { \"operand\": np.ones((4, 4), dtype=np.int32)\n , \"init_value\": np.zeros((1,), dtype=np.int32)\n , \"computation\": lax.max\n , \"window_dimensions\": (2, 2)\n , \"window_strides\": (2, 2)\n , \"padding\": \"VALID\"\n , \"base_dilation\": (1, 1)\n , \"window_dilation\": (1, 1)\n }\n\n msg = (r\"reduce_window expected init_values to be scalars but init_values \"\n r\"have shapes \\[\\(1,\\)\\].\")\n with self.assertRaisesRegex(TypeError, msg):\n lax.reduce_window(**args)\n\n def test_reduce_correctly_works_with_pytrees(self):\n operands = {'x': [np.ones(5), np.arange(5)]}\n init_values = {'x': [0., 0]}\n result = lax.reduce(operands, init_values,\n lambda x, y: tree_util.tree_multimap(lax.add, x, y),\n [0])\n self.assertDictEqual(result, {'x': [5., 10.]})\n\n def test_reduce_with_mismatched_pytrees_errors(self):\n operands = {'x': np.ones(5)}\n bad_init_values = {'y': 0.}\n\n with self.assertRaisesRegex(ValueError, 'Operands must have the same '\n 'tree structure as init_values'):\n lax.reduce(operands, bad_init_values,\n lambda x, y: dict(x=x['x'] + y['x']), [0])\n\n def test_reduce_with_nonscalar_inits_errors(self):\n operands = {'x': np.ones(5)}\n bad_init_values = {'x': np.ones(5)}\n\n with self.assertRaisesRegex(ValueError,\n 'reduce found non-scalar initial value'):\n lax.reduce(operands, bad_init_values,\n lambda x, y: dict(x=x['x'] + y['x']), [0])\n\n def test_select_jvp_complexity(self):\n jaxpr = jax.make_jaxpr(lambda x: jax.jvp(lambda x: lax.select(True, x, x),\n (x,), (1.,)))(1.)\n self.assertLen(jaxpr.jaxpr.eqns, 2)\n\n def testRngBitGenerator(self):\n # This test covers the original behavior of lax.rng_bit_generator, which\n # required x64=True, and only checks shapes and jit invariance.\n if not config.x64_enabled:\n raise SkipTest(\"RngBitGenerator requires 64bit key\")\n\n key = np.array((1, 2)).astype(np.uint64)\n def fn(k):\n return lax.rng_bit_generator(\n k, shape=(5, 7), algorithm=lax.RandomAlgorithm.RNG_THREE_FRY)\n\n out = fn(key)\n out_jit = jax.jit(fn)(key)\n self.assertEqual(out[0].shape, (2,))\n self.assertEqual(out[1].shape, (5, 7))\n self.assertArraysEqual(out[0], out_jit[0])\n self.assertArraysEqual(out[1], out_jit[1])\n\n @jtu.skip_on_devices(\"tpu\")\n def testRngBitGeneratorReturnedKey(self):\n # This test ensures that the key bit-packing/unpacking operations used in\n # the translation rule for rng_bit_generator, on older jaxlibs and at time\n # of writing on GPU, are inverses of one another.\n key = np.array([3, 1, 4, 2], dtype=np.dtype('uint32'))\n new_key, _ = lax.rng_bit_generator(key, (0,))\n self.assertAllClose(key, new_key)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dtype={}_weak_type={}\".format(dtype.__name__, weak_type),\n \"dtype\": dtype, \"weak_type\": weak_type}\n for dtype in all_dtypes + python_scalar_types\n for weak_type in [True, False]))\n def test_const(self, dtype, weak_type):\n if dtype in set(python_scalar_types):\n val = dtype(0)\n else:\n val = lax._convert_element_type(0, dtype, weak_type=weak_type)\n\n const = lax._const(val, 0)\n self.assertEqual(dtypes.dtype(val, canonicalize=True),\n dtypes.dtype(const, canonicalize=True))\n\n\n def testIgammaSpecial(self):\n self.assertEqual(lax.igamma(1., np.inf), 1.)\n self.assertEqual(lax.igammac(1., np.inf), 0.)\n\n def testRegressionIssue5728(self):\n # The computation in this test gave garbage data on CPU due to an LLVM bug.\n @jax.jit\n def f(inputs):\n out_action_2 = lax.slice_in_dim(inputs, 0, 15, axis=-1)\n mask = lax.slice_in_dim(inputs, 7, 22, axis=-1)\n out_action_2 = lax.select(lax.eq(mask, np.float32(0)),\n lax.broadcast(np.float32(42), (1, 15)),\n out_action_2)\n return lax.pad(out_action_2, np.float32(42), [(0, 0, 0), (0, 15, 0)])\n self.assertArraysEqual(np.full((1, 30), np.float32(42)),\n f(np.zeros((1, 24), dtype=np.float32)))\n\n def testDynamicSliceU8Index(self):\n # Regression test for u8 index in dynamic-slice (#6122)\n # TODO(b/183216273): enable this test for CPU & GPU when possible.\n if jtu.device_under_test() == \"cpu\":\n raise unittest.SkipTest(\"DynamicSliceU8Index test is a known failure on CPU.\")\n if jtu.device_under_test() == \"gpu\":\n raise unittest.SkipTest(\"DynamicSliceU8Index test is a known failure on GPU.\")\n x = np.arange(200)\n np.testing.assert_equal(\n np.array(lax.dynamic_slice(x, np.uint8([128]), (1,))), [128])\n\n\nclass LazyConstantTest(jtu.JaxTestCase):\n def _Check(self, make_const, expected):\n # check casting to ndarray works\n asarray_result = np.asarray(make_const())\n\n # check passing as an argument works (should hit constant handler)\n zero = np.array(0, expected.dtype)\n argument_result = lax.add(zero, make_const())\n\n # check looping into a compiled computation works\n jit_result = jax.jit(lambda x: lax.add(x, make_const()))(zero)\n\n # ensure they're all the same\n self.assertAllClose(asarray_result, expected)\n self.assertAllClose(argument_result, expected)\n self.assertAllClose(jit_result, expected)\n\n # ensure repr doesn't crash\n repr(make_const())\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_fill={}\".format(\n jtu.format_shape_dtype_string(shape, dtype) if dtype else shape,\n fill_value),\n \"shape\": shape, \"dtype\": dtype, \"fill_value\": fill_value}\n for dtype in itertools.chain(default_dtypes, [None])\n for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001)]\n for fill_value in [0, 1, np.pi]))\n def testFilledConstant(self, shape, fill_value, dtype):\n make_const = lambda: lax.full(shape, fill_value, dtype)\n expected = np.full(shape, fill_value,\n dtype or dtypes.dtype(fill_value))\n self._Check(make_const, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_dim={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), dimension),\n \"shape\": shape, \"dtype\": dtype, \"dimension\": dimension}\n for dtype in default_dtypes\n for shape in [(), (3,), (2, 3), (2, 3, 4),\n # TODO(mattjj): re-enable\n # (1001, 1001), (101, 101, 101),\n ]\n for dimension in range(len(shape))))\n def testIotaConstant(self, dtype, shape, dimension):\n make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension)\n\n arr = np.arange(shape[dimension], dtype=dtypes.canonicalize_dtype(dtype))\n singleton_shape = [1] * len(shape)\n singleton_shape[dimension] = shape[dimension]\n expected = np.broadcast_to(arr.reshape(singleton_shape), shape)\n\n self._Check(make_const, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}_axes={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), axes),\n \"shape\": shape, \"dtype\": dtype, \"axes\": axes}\n for dtype in default_dtypes\n for shape, axes in [\n [(2, 3), (0, 1)],\n [(2, 3, 4), (0, 1)],\n [(2, 3, 4), (0, 2)],\n [(2, 3, 4), (1, 2)],\n [(2, 3, 4), (0, 1, 2)],\n [(2, 3, 4, 2), (0, 1, 2)],\n [(2, 3, 4, 2), (0, 2, 3)],\n [(1001, 1001), (0, 1)],\n ]))\n def testDeltaConstant(self, dtype, shape, axes):\n make_const = lambda: lax._delta(dtype, shape, axes)\n # don't check the asarray case, just assume it's right\n expected = np.asarray(make_const())\n self._Check(make_const, expected)\n\n def testBroadcastInDim(self):\n arr = lax.full((2, 1), 1.) + 1.\n arr_np = np.full((2, 1), 1.) + 1.\n expected = lax_reference.broadcast_in_dim(arr_np, (2, 1, 3), (0, 2))\n make_const = lambda: lax.broadcast_in_dim(arr, (2, 1, 3), (0, 2))\n self._Check(make_const, expected)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_input_type={}_dtype={}_value={}_jit={}\".format(\n input_type.__name__, dtype.__name__, value, jit),\n \"input_type\": input_type, \"dtype\": dtype, \"value\": value, \"jit\": jit}\n for input_type in [int, float, np.int32, np.float32, np.array]\n for dtype in [np.int32, np.float32]\n for jit in [True, False]\n for value in [0, 1]))\n def testConvertElementReturnType(self, input_type, dtype, value, jit):\n op = lambda x: lax.convert_element_type(x, dtype)\n if jit:\n op = jax.jit(op)\n result = op(input_type(value))\n assert isinstance(result, jnp.DeviceArray)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_dtype_in={}_dtype_out={}\".format(\n dtype_in.__name__, dtype_out.__name__),\n \"dtype_in\": dtype_in, \"dtype_out\": dtype_out}\n for dtype_in in all_dtypes for dtype_out in all_dtypes))\n @jtu.ignore_warning(category=np.ComplexWarning)\n def testConvertElementTypeAvoidsCopies(self, dtype_in, dtype_out):\n x = _device_put_raw(np.zeros(5, dtype_in))\n self.assertEqual(x.dtype, dtype_in)\n y = lax.convert_element_type(x, dtype_out)\n self.assertEqual(y.dtype, dtype_out)\n if np.dtype(dtype_in) == np.dtype(dtype_out):\n self.assertIs(x.device_buffer, y.device_buffer)\n else:\n self.assertFalse(x.device_buffer is y.device_buffer)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_fn={}_indexdtype={}\"\n .format(jax_fn.__name__, np.dtype(index_dtype).name),\n \"index_dtype\": index_dtype, \"jax_fn\": jax_fn}\n for index_dtype in jtu.dtypes.all_inexact + jtu.dtypes.boolean\n for jax_fn in [lax.argmin, lax.argmax]))\n def testArgMinMaxIndexDtypeError(self, jax_fn, index_dtype):\n with self.assertRaisesRegex(TypeError,\n \"index_dtype must be an integer type\"):\n jax_fn(np.ones((2, 2)), axis=0, index_dtype=index_dtype)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_fn={}\".format(jax_fn.__name__),\n \"jax_fn\": jax_fn}\n for jax_fn in [lax.argmin, lax.argmax]))\n def testArgMinMaxEmptyError(self, jax_fn):\n with self.assertRaisesRegex(ValueError,\n \"require non-empty reduced dimension\"):\n jax_fn(np.ones((0, 2)), axis=0, index_dtype=np.int32)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_fn={}\".format(jax_fn.__name__),\n \"jax_fn\": jax_fn}\n for jax_fn in [lax.argmin, lax.argmax]))\n def testArgMinMaxInvalidAxisError(self, jax_fn):\n with self.assertRaisesRegex(ValueError,\n \"Invalid axis -1 for operand\"):\n jax_fn(np.ones((2, 3)), axis=-1, index_dtype=np.int32)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_fn={}_weaktype={}\".format(jax_fn.__name__, weak_type),\n \"jax_fn\": jax_fn, \"weak_type\": weak_type}\n for jax_fn in [lax.argmin, lax.argmax]\n for weak_type in [True, False]))\n def testArgMinMaxWeakType(self, jax_fn, weak_type):\n op = lambda x: jax_fn(x, axis=0, index_dtype=np.int32)\n x_in = lax._convert_element_type(np.ones((2, 2)), weak_type=weak_type)\n self.assertEqual(dtypes.is_weakly_typed(x_in), weak_type)\n x_out = op(x_in)\n self.assertEqual(dtypes.is_weakly_typed(x_out), False)\n x_out_jit = jax.jit(op)(x_in)\n self.assertEqual(dtypes.is_weakly_typed(x_out_jit), False)\n\n def testArgMaxOfNanChoosesNaN(self):\n self.assertEqual(lax.argmax(np.array([0., np.nan]), axis=0,\n index_dtype=np.int32), 1)\n\n unary_op_types = {}\n for r in LAX_OPS:\n if r.nargs == 1:\n unary_op_types[r.op] = (unary_op_types.get(r.op, set()) |\n set(np.dtype(t) for t in r.dtypes))\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(op), \"op_name\": op, \"rec_dtypes\": dtypes}\n for op, dtypes in unary_op_types.items()))\n def testUnaryWeakTypes(self, op_name, rec_dtypes):\n \"\"\"Test that all lax unary ops propagate weak_type information appropriately.\"\"\"\n # Find a valid dtype for the function.\n for dtype in [np.float_, np.int_, np.complex_, np.bool_]:\n dtype = dtypes.canonicalize_dtype(dtype)\n if dtype in rec_dtypes:\n py_val = dtype.type(1).item()\n lax_val = lax.full((), py_val, dtype)\n break\n else:\n raise ValueError(f\"no available dtypes in {rec_dtypes}\")\n\n op = getattr(lax, op_name)\n py_op = op(py_val)\n lax_op = op(lax_val)\n\n self.assertAllClose(py_op, lax_op, check_dtypes=True)\n self.assertTrue(py_op.aval.weak_type)\n self.assertFalse(lax_op.aval.weak_type)\n\n def testCumsumLengthOne(self):\n # regression test for issue 4672\n x = lax.full((1,), 1)\n out = lax.cumsum(x)\n self.assertArraysEqual(out, x)\n\n def testLog1pNearOne(self):\n np.testing.assert_array_almost_equal_nulp(\n np.log1p(np.float32(1e-5)), lax.log1p(np.float32(1e-5)))\n np.testing.assert_array_almost_equal_nulp(\n np.log1p(np.float32(1e-5)), lax.log1p(np.complex64(1e-5)))\n\n\nclass LaxNamedShapeTest(jtu.JaxTestCase):\n\n def test_abstract_eval(self):\n aval1 = core.ShapedArray((2, 3), np.float32, False, {'i': 10})\n out = lax.sin_p.abstract_eval(aval1)\n self.assertEqual(out, aval1)\n\n aval1 = core.ShapedArray((2, 3), np.float32, False, {'i': 10})\n aval2 = core.ShapedArray((2, 3), np.float32, False, {'j': 5})\n expected = core.ShapedArray((2, 3), np.float32, False, {'i': 10, 'j': 5})\n out = lax.add_p.abstract_eval(aval1, aval2)\n self.assertEqual(out, expected)\n\n def test_abstract_eval_collective(self):\n with core.extend_axis_env('i', 10, None):\n aval1 = core.ShapedArray((2, 3), np.float32, False, {'i': 10, 'j': 5})\n expected = core.ShapedArray((2, 3), np.float32, False, {'j': 5})\n out, = lax.psum_p.abstract_eval(aval1, axes=('i',), axis_index_groups=None)\n self.assertEqual(out, expected)\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] | [
[
"numpy.take",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.complex64",
"numpy.swapaxes",
"numpy.ones_like",
"numpy.pad",
"numpy.arange",
"numpy.uint8",
"numpy.lexsort",
"numpy.full",
"numpy.float32",
"numpy.zeros",
"numpy.array",
"numpy.flip",
"numpy.int32",
"numpy.ones",
"numpy.int16",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kellywzhang/OpenNMT-py | [
"2a38fb035fed0597f22694f4580303d1490cbb39"
] | [
"onmt/Optim.py"
] | [
"import torch.optim as optim\nfrom torch.nn.utils import clip_grad_norm\n\n\nclass Optim(object):\n \"\"\"\n Controller class for optimization. Mostly a thin\n wrapper for `optim`, but also useful for implementing\n rate scheduling beyond what is currently available.\n Also implements necessary methods for training RNNs such\n as grad manipulations.\n\n Args:\n method (:obj:`str`): one of [sgd, adagrad, adadelta, adam]\n lr (float): learning rate\n lr_decay (float, optional): learning rate decay multiplier\n start_decay_at (int, optional): epoch to start learning rate decay\n beta1, beta2 (float, optional): parameters for adam\n adagrad_accum (float, optional): initialization parameter for adagrad\n decay_method (str, option): custom decay options\n warmup_steps (int, option): parameter for `noam` decay\n model_size (int, option): parameter for `noam` decay\n \"\"\"\n # We use the default parameters for Adam that are suggested by\n # the original paper https://arxiv.org/pdf/1412.6980.pdf\n # These values are also used by other established implementations,\n # e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer\n # https://keras.io/optimizers/\n # Recently there are slightly different values used in the paper\n # \"Attention is all you need\"\n # https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98\n # was used there however, beta2=0.999 is still arguably the more\n # established value, so we use that here as well\n def __init__(self, method, lr, max_grad_norm,\n lr_decay=1, start_decay_at=None,\n beta1=0.9, beta2=0.999,\n adagrad_accum=0.0,\n decay_method=None,\n warmup_steps=4000,\n model_size=None,\n patience=0):\n self.last_ppl = None\n self.lr = lr\n self.original_lr = lr\n self.max_grad_norm = max_grad_norm\n self.method = method\n self.lr_decay = lr_decay\n self.start_decay_at = start_decay_at\n self.start_decay = False\n self._step = 0\n self.betas = [beta1, beta2]\n self.adagrad_accum = adagrad_accum\n self.decay_method = decay_method\n self.warmup_steps = warmup_steps\n self.model_size = model_size\n self.patience = patience\n self.patience_cnt = 0\n\n def set_parameters(self, params):\n self.params = [p for p in params if p.requires_grad]\n if self.method == 'sgd':\n self.optimizer = optim.SGD(self.params, lr=self.lr)\n elif self.method == 'adagrad':\n self.optimizer = optim.Adagrad(self.params, lr=self.lr)\n for group in self.optimizer.param_groups:\n for p in group['params']:\n self.optimizer.state[p]['sum'] = self.optimizer\\\n .state[p]['sum'].fill_(self.adagrad_accum)\n elif self.method == 'adadelta':\n self.optimizer = optim.Adadelta(self.params, lr=self.lr)\n elif self.method == 'adam':\n self.optimizer = optim.Adam(self.params, lr=self.lr,\n betas=self.betas, eps=1e-9)\n else:\n raise RuntimeError(\"Invalid optim method: \" + self.method)\n\n def _set_rate(self, lr):\n self.lr = lr\n self.optimizer.param_groups[0]['lr'] = self.lr\n\n def step(self):\n \"\"\"Update the model parameters based on current gradients.\n\n Optionally, will employ gradient modification or update learning\n rate.\n \"\"\"\n self._step += 1\n\n # Decay method used in tensor2tensor.\n if self.decay_method == \"noam\":\n self._set_rate(\n self.original_lr *\n (self.model_size ** (-0.5) *\n min(self._step ** (-0.5),\n self._step * self.warmup_steps**(-1.5))))\n\n if self.max_grad_norm:\n clip_grad_norm(self.params, self.max_grad_norm)\n self.optimizer.step()\n\n def update_learning_rate(self, ppl, epoch):\n \"\"\"\n Decay learning rate if val perf does not improve\n or we hit the start_decay_at limit.\n \"\"\"\n\n if self.start_decay_at is not None and epoch >= self.start_decay_at:\n self.start_decay = True\n # Change from original OpenNMT option\n if self.last_ppl is not None and ppl > self.last_ppl:\n self.patience_cnt += 1\n if self.patience_cnt > self.patience:\n self.start_decay = True\n else:\n self.start_decay = False\n self.patience_cnt = 0\n\n if self.start_decay:\n self.lr = self.lr * self.lr_decay\n print(\"Decaying learning rate to %g\" % self.lr)\n\n self.last_ppl = ppl\n self.optimizer.param_groups[0]['lr'] = self.lr\n"
] | [
[
"torch.optim.Adam",
"torch.optim.Adagrad",
"torch.nn.utils.clip_grad_norm",
"torch.optim.SGD",
"torch.optim.Adadelta"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fierval/retina | [
"2bc50b3354e5e37f1cfd34f11fbe4b0a4178b7ac",
"2bc50b3354e5e37f1cfd34f11fbe4b0a4178b7ac"
] | [
"DiabeticRetinopathy/Refactoring/kobra/tr_utils.py",
"plot_compare_methods.py"
] | [
"import numpy as np\nfrom time import gmtime, strftime, localtime\nimport csv\nimport os\nfrom os import path\nimport shutil\nimport pandas as pd\nfrom pandas.io.parsers import csv\n\ndef prep_out_path(out_path):\n if path.exists(out_path):\n shutil.rmtree(out_path)\n os.makedirs(out_path)\n\ndef append_to_arr(arr, a, axis = 0):\n '''\n Append a to a numpy array arr. a - scalar, list or numpy array\n '''\n if isinstance(a, list) or isinstance(a, np.ndarray):\n a = np.array(a)\n\n if arr.shape[0] == 0:\n arr = a.reshape(1, a.shape[0])\n else:\n arr = np.append(arr, a.reshape(1, a.shape[0]), axis = axis)\n else:\n if arr.size == 0:\n arr = np.array([a]) # make sure it is a 1-dimensional array\n else:\n arr = np.append(arr, a)\n return arr\n\ndef time_now_str():\n return strftime(\"%d %b %Y %H:%M:%S\", localtime())\n\ndef merge_two_dicts(x, y):\n '''Given two dicts, merge them into a new dict.\n '''\n z = x.copy()\n z.update(y)\n return z\n\ndef vote(proba_list, weight_list):\n '''\n Given a list of probability arrays and a list of weights,\n Compute the final array by summiing probabilities and multiplying by their weights\n '''\n wts = np.array(weight_list)\n if wts[wts == 1].shape[0] == wts.shape[0]:\n proba = np.array([x for x in proba_list])\n return proba.mean(0)\n else:\n proba = np.array([x[0] * x[1] for x in zip(proba_list, weight_list)])\n return proba.sum(0)\n \ndef vote_reduce(arrs, weights):\n '''\n Given two arrays and a list of two weights, apply the voting rule as in vote(), unless\n a 0 or a 1 is encountered. In the former case pick the unweighted non-zero element, in the latter - the element\n with value of 1.\n '''\n def func (x, y):\n w2 = y[1]; y = y[0]\n for i, k in enumerate(np.nditer(y, ['c_index'])):\n if x[i] == 0 or y[i] == 1.0:\n x[i] = y[i]\n elif x[i] != 1 and y[i] != 0:\n x[i] = x[i] + y[i] * w2\n return x\n\n def init(x):\n return np.array([x * weights[0] if x != 1.0 else x for x in np.nditer(x, ['c_index'])])\n\n res = np.array([])\n probs = np.array(arrs)\n\n for i in range(0, probs.shape[1]):\n samples = probs[:, i, :].reshape(probs.shape[0], probs.shape[2])\n cur_proba = reduce(func, zip(samples[1:, :], np.array(weights)[1:]), init(samples[0]))\n res = append_to_arr(res, cur_proba) \n return res\n\ndef isEmpty(arr):\n return len(arr) == 0\n\ndef write_to_csv(task_labels, labels, probs, out_file):\n predict_columns = [\"Prediction{:1d}\".format(i) for i in range(1, 10) ]\n\n existing_rows = pd.read_csv(task_labels, header=0, quoting=csv.QUOTE_NONNUMERIC)\n file_names = pd.DataFrame(labels, columns= [\"Id\"])\n probas = pd.DataFrame(probs, columns = predict_columns)\n out = pd.concat([file_names, probas], axis=1)\n out = pd.concat([existing_rows, out])\n\n out.to_csv(out_file, index=False, quoting=csv.QUOTE_NONNUMERIC)",
"\"\"\"\n=========================================\n Comparison of Manifold Learning methods\n=========================================\n\nAn illustration of dimensionality reduction on the S-curve dataset\nwith various manifold learning methods.\n\nFor a discussion and comparison of these algorithms, see the\n:ref:`manifold module page <manifold>`\n\nFor a similar example, where the methods are applied to a\nsphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`\n\nNote that the purpose of the MDS is to find a low-dimensional\nrepresentation of the data (here 2D) in which the distances respect well\nthe distances in the original high-dimensional space, unlike other\nmanifold-learning algorithms, it does not seeks an isotropic\nrepresentation of the data in the low-dimensional space.\n\"\"\"\n\n# Author: Jake Vanderplas -- <[email protected]>\n\nprint(__doc__)\n\nfrom time import time\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import NullFormatter\n\nfrom sklearn import manifold, datasets\n\n# Next line to silence pyflakes. This import is needed.\nAxes3D\n\nn_points = 1000\nX, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)\nn_neighbors = 10\nn_components = 2\n\nfig = plt.figure(figsize=(15, 8))\nplt.suptitle(\"Manifold Learning with %i points, %i neighbors\"\n % (1000, n_neighbors), fontsize=14)\n\ntry:\n # compatibility matplotlib < 1.0\n ax = fig.add_subplot(251, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)\n ax.view_init(4, -72)\nexcept:\n ax = fig.add_subplot(251, projection='3d')\n plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)\n\nmethods = ['standard', 'ltsa', 'hessian', 'modified']\nlabels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']\n\nfor i, method in enumerate(methods):\n t0 = time()\n Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,\n eigen_solver='auto',\n method=method).fit_transform(X)\n t1 = time()\n print(\"%s: %.2g sec\" % (methods[i], t1 - t0))\n\n ax = fig.add_subplot(252 + i)\n plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)\n plt.title(\"%s (%.2g sec)\" % (labels[i], t1 - t0))\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n plt.axis('tight')\n\nt0 = time()\nY = manifold.Isomap(n_neighbors, n_components).fit_transform(X)\nt1 = time()\nprint(\"Isomap: %.2g sec\" % (t1 - t0))\nax = fig.add_subplot(257)\nplt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)\nplt.title(\"Isomap (%.2g sec)\" % (t1 - t0))\nax.xaxis.set_major_formatter(NullFormatter())\nax.yaxis.set_major_formatter(NullFormatter())\nplt.axis('tight')\n\n\nt0 = time()\nmds = manifold.MDS(n_components, max_iter=100, n_init=1)\nY = mds.fit_transform(X)\nt1 = time()\nprint(\"MDS: %.2g sec\" % (t1 - t0))\nax = fig.add_subplot(258)\nplt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)\nplt.title(\"MDS (%.2g sec)\" % (t1 - t0))\nax.xaxis.set_major_formatter(NullFormatter())\nax.yaxis.set_major_formatter(NullFormatter())\nplt.axis('tight')\n\n\nt0 = time()\nse = manifold.SpectralEmbedding(n_components=n_components,\n n_neighbors=n_neighbors)\nY = se.fit_transform(X)\nt1 = time()\nprint(\"SpectralEmbedding: %.2g sec\" % (t1 - t0))\nax = fig.add_subplot(259)\nplt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)\nplt.title(\"SpectralEmbedding (%.2g sec)\" % (t1 - t0))\nax.xaxis.set_major_formatter(NullFormatter())\nax.yaxis.set_major_formatter(NullFormatter())\nplt.axis('tight')\n\nt0 = time()\ntsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)\nY = tsne.fit_transform(X)\nt1 = time()\nprint(\"t-SNE: %.2g sec\" % (t1 - t0))\nax = fig.add_subplot(250)\nplt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)\nplt.title(\"t-SNE (%.2g sec)\" % (t1 - t0))\nax.xaxis.set_major_formatter(NullFormatter())\nax.yaxis.set_major_formatter(NullFormatter())\nplt.axis('tight')\n\nplt.show()\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.nditer",
"pandas.DataFrame",
"numpy.append",
"numpy.array"
],
[
"sklearn.manifold.SpectralEmbedding",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"sklearn.manifold.Isomap",
"sklearn.manifold.LocallyLinearEmbedding",
"sklearn.manifold.MDS",
"sklearn.manifold.TSNE",
"matplotlib.ticker.NullFormatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"sklearn.datasets.samples_generator.make_s_curve",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chocomunk/caltech-ee148-spring2020-hw01 | [
"f4a5a1450560018c6098f706fca27138cedf55a0"
] | [
"strategies.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage.exposure import equalize_adapthist\n\nfrom util import hsv2rgb, rgb2hsv, histogram_equalization, CLAHE, correlate, \\\n black_tophat\n\n\ndef threshold_strategy(I, r_b=85, r_t=145, g_b=0, g_t=0, b_b=75, b_t=130):\n img = black_tophat(I, 11)\n img = rgb2hsv(img)\n img = black_tophat(img, 11)\n return (r_b <= img[:,:,0]) & (img[:,:,0] <= r_t) & \\\n (g_b <= img[:,:,1]) & (img[:,:,1] <= g_t) & \\\n (b_b <= img[:,:,2]) & (img[:,:,2] <= b_t)\n\n\ndef correlate_strategy(I, filters):\n # Using scipy's equalize_adapthist is preferred for better balancing\n img = rgb2hsv(I)\n # img[:,:,2] = CLAHE(img[:,:,2], tile_size=(16,16))\n # img[:,:,2] = equalize_adapthist(img[:,:,2], clip_limit=0.03) * 255\n img[:,:,2] = histogram_equalization(img[:,:,2])\n img = hsv2rgb(img)\n\n # Find cossim against original image\n output = np.zeros(img.shape[:2], dtype=np.float32)\n for filt in filters:\n corr = correlate(img, filt, step=2)\n output = np.maximum(output, corr)\n return output, img"
] | [
[
"numpy.maximum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dzwallkilled/pytorch-cifar10 | [
"fee201da5a3b516a57104e0b6338e05008079b8b"
] | [
"models/VGG.py"
] | [
"import torch.nn as nn\n\n\ncfg = {\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\nclass VGG(nn.Module):\n def __init__(self, vgg_name):\n super(VGG, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.classifier = nn.Linear(512, 10)\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\ndef VGG11():\n return VGG('VGG11')\n\n\ndef VGG13():\n return VGG('VGG13')\n\n\ndef VGG16():\n return VGG('VGG16')\n\n\ndef VGG19():\n return VGG('VGG19')\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
asnt/moderngl | [
"b39cedd8cf216c34e43371b4aec822f6084f0f79",
"b39cedd8cf216c34e43371b4aec822f6084f0f79"
] | [
"docs/the_guide/first.3.py",
"examples/matplotlib_as_texture.py"
] | [
"import moderngl\nimport numpy as np\n\nctx = moderngl.create_standalone_context()\n\nprog = ctx.program(\n vertex_shader='''\n #version 330\n\n in vec2 in_vert;\n in vec3 in_color;\n\n out vec3 v_color;\n\n void main() {\n v_color = in_color;\n gl_Position = vec4(in_vert, 0.0, 1.0);\n }\n ''',\n fragment_shader='''\n #version 330\n\n in vec3 v_color;\n\n out vec3 f_color;\n\n void main() {\n f_color = v_color;\n }\n ''',\n)\n\nx = np.linspace(-1.0, 1.0, 50)\ny = np.random.rand(50) - 0.5\nr = np.ones(50)\ng = np.zeros(50)\nb = np.zeros(50)\n\nvertices = np.dstack([x, y, r, g, b])\n\nvbo = ctx.buffer(vertices.astype('f4').tobytes())\nvao = ctx.simple_vertex_array(prog, vbo, 'in_vert', 'in_color')\n",
"import io\n\nimport numpy as np\nfrom PIL import Image\n\nfrom basic_colors_and_texture import ColorsAndTexture\n\nimport matplotlib\nmatplotlib.use('svg')\nimport matplotlib.pyplot as plt\n\n\nclass MatplotlibTexture(ColorsAndTexture):\n title = \"Matplotlib as Texture\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n figure_size = (640, 360)\n\n temp = io.BytesIO()\n plt.figure(0, figsize=(figure_size[0] / 72, figure_size[1] / 72))\n\n mu, sigma = 100, 15\n x = mu + sigma * np.random.randn(10000)\n n, bins, patches = plt.hist(x, 50, normed=1, facecolor='r', alpha=0.75)\n\n plt.axis([40, 160, 0, 0.03])\n plt.grid(True)\n plt.show()\n\n plt.savefig(temp, format='raw', dpi=72)\n temp.seek(0)\n\n img = Image.frombytes('RGBA', figure_size, temp.read()).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')\n self.texture = self.ctx.texture(img.size, 3, img.tobytes())\n self.texture.build_mipmaps()\n\n\nif __name__ == '__main__':\n MatplotlibTexture.run()\n"
] | [
[
"numpy.linspace",
"numpy.dstack",
"numpy.ones",
"numpy.random.rand",
"numpy.zeros"
],
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"numpy.random.randn",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CV-IP/interfacegan | [
"5a556b8e693f6e1888f769f653aaafaaccca5dc2"
] | [
"models/pggan_tf_official/dataset_tool.py"
] | [
"# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\nimport os\nimport sys\nimport glob\nimport argparse\nimport threading\nimport six.moves.queue as Queue\nimport traceback\nimport numpy as np\nimport tensorflow as tf\nimport PIL.Image\n\nimport tfutil\nimport dataset\n\n#----------------------------------------------------------------------------\n\ndef error(msg):\n print('Error: ' + msg)\n exit(1)\n\n#----------------------------------------------------------------------------\n\nclass TFRecordExporter:\n def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):\n self.tfrecord_dir = tfrecord_dir\n self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))\n self.expected_images = expected_images\n self.cur_images = 0\n self.shape = None\n self.resolution_log2 = None\n self.tfr_writers = []\n self.print_progress = print_progress\n self.progress_interval = progress_interval\n if self.print_progress:\n print('Creating dataset \"%s\"' % tfrecord_dir)\n if not os.path.isdir(self.tfrecord_dir):\n os.makedirs(self.tfrecord_dir)\n assert(os.path.isdir(self.tfrecord_dir))\n \n def close(self):\n if self.print_progress:\n print('%-40s\\r' % 'Flushing data...', end='', flush=True)\n for tfr_writer in self.tfr_writers:\n tfr_writer.close()\n self.tfr_writers = []\n if self.print_progress:\n print('%-40s\\r' % '', end='', flush=True)\n print('Added %d images.' % self.cur_images)\n\n def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.\n order = np.arange(self.expected_images)\n np.random.RandomState(123).shuffle(order)\n return order\n\n def add_image(self, img):\n if self.print_progress and self.cur_images % self.progress_interval == 0:\n print('%d / %d\\r' % (self.cur_images, self.expected_images), end='', flush=True)\n if self.shape is None:\n self.shape = img.shape\n self.resolution_log2 = int(np.log2(self.shape[1]))\n assert self.shape[0] in [1, 3]\n assert self.shape[1] == self.shape[2]\n assert self.shape[1] == 2**self.resolution_log2\n tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)\n for lod in range(self.resolution_log2 - 1):\n tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)\n self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))\n assert img.shape == self.shape\n for lod, tfr_writer in enumerate(self.tfr_writers):\n if lod:\n img = img.astype(np.float32)\n img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25\n quant = np.rint(img).clip(0, 255).astype(np.uint8)\n ex = tf.train.Example(features=tf.train.Features(feature={\n 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),\n 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))\n tfr_writer.write(ex.SerializeToString())\n self.cur_images += 1\n\n def add_labels(self, labels):\n if self.print_progress:\n print('%-40s\\r' % 'Saving labels...', end='', flush=True)\n assert labels.shape[0] == self.cur_images\n with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:\n np.save(f, labels.astype(np.float32))\n \n def __enter__(self):\n return self\n \n def __exit__(self, *args):\n self.close()\n\n#----------------------------------------------------------------------------\n\nclass ExceptionInfo(object):\n def __init__(self):\n self.value = sys.exc_info()[1]\n self.traceback = traceback.format_exc()\n\n#----------------------------------------------------------------------------\n\nclass WorkerThread(threading.Thread):\n def __init__(self, task_queue):\n threading.Thread.__init__(self)\n self.task_queue = task_queue\n\n def run(self):\n while True:\n func, args, result_queue = self.task_queue.get()\n if func is None:\n break\n try:\n result = func(*args)\n except:\n result = ExceptionInfo()\n result_queue.put((result, args))\n\n#----------------------------------------------------------------------------\n\nclass ThreadPool(object):\n def __init__(self, num_threads):\n assert num_threads >= 1\n self.task_queue = Queue.Queue()\n self.result_queues = dict()\n self.num_threads = num_threads\n for idx in range(self.num_threads):\n thread = WorkerThread(self.task_queue)\n thread.daemon = True\n thread.start()\n\n def add_task(self, func, args=()):\n assert hasattr(func, '__call__') # must be a function\n if func not in self.result_queues:\n self.result_queues[func] = Queue.Queue()\n self.task_queue.put((func, args, self.result_queues[func]))\n\n def get_result(self, func): # returns (result, args)\n result, args = self.result_queues[func].get()\n if isinstance(result, ExceptionInfo):\n print('\\n\\nWorker thread caught an exception:\\n' + result.traceback)\n raise result.value\n return result, args\n\n def finish(self):\n for idx in range(self.num_threads):\n self.task_queue.put((None, (), None))\n\n def __enter__(self): # for 'with' statement\n return self\n\n def __exit__(self, *excinfo):\n self.finish()\n\n def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):\n if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4\n assert max_items_in_flight >= 1\n results = []\n retire_idx = [0]\n\n def task_func(prepared, idx):\n return process_func(prepared)\n \n def retire_result():\n processed, (prepared, idx) = self.get_result(task_func)\n results[idx] = processed\n while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:\n yield post_func(results[retire_idx[0]])\n results[retire_idx[0]] = None\n retire_idx[0] += 1\n \n for idx, item in enumerate(item_iterator):\n prepared = pre_func(item)\n results.append(None)\n self.add_task(func=task_func, args=(prepared, idx))\n while retire_idx[0] < idx - max_items_in_flight + 2:\n for res in retire_result(): yield res\n while retire_idx[0] < len(results):\n for res in retire_result(): yield res\n\n#----------------------------------------------------------------------------\n\ndef display(tfrecord_dir):\n print('Loading dataset \"%s\"' % tfrecord_dir)\n tfutil.init_tf({'gpu_options.allow_growth': True})\n dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)\n tfutil.init_uninited_vars()\n \n idx = 0\n while True:\n try:\n images, labels = dset.get_minibatch_np(1)\n except tf.errors.OutOfRangeError:\n break\n if idx == 0:\n print('Displaying images')\n import cv2 # pip install opencv-python\n cv2.namedWindow('dataset_tool')\n print('Press SPACE or ENTER to advance, ESC to exit')\n print('\\nidx = %-8d\\nlabel = %s' % (idx, labels[0].tolist()))\n cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR\n idx += 1\n if cv2.waitKey() == 27:\n break\n print('\\nDisplayed %d images.' % idx)\n\n#----------------------------------------------------------------------------\n\ndef extract(tfrecord_dir, output_dir):\n print('Loading dataset \"%s\"' % tfrecord_dir)\n tfutil.init_tf({'gpu_options.allow_growth': True})\n dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)\n tfutil.init_uninited_vars()\n \n print('Extracting images to \"%s\"' % output_dir)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n idx = 0\n while True:\n if idx % 10 == 0:\n print('%d\\r' % idx, end='', flush=True)\n try:\n images, labels = dset.get_minibatch_np(1)\n except tf.errors.OutOfRangeError:\n break\n if images.shape[1] == 1:\n img = PIL.Image.fromarray(images[0][0], 'L')\n else:\n img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')\n img.save(os.path.join(output_dir, 'img%08d.png' % idx))\n idx += 1\n print('Extracted %d images.' % idx)\n\n#----------------------------------------------------------------------------\n\ndef compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):\n max_label_size = 0 if ignore_labels else 'full'\n print('Loading dataset \"%s\"' % tfrecord_dir_a)\n tfutil.init_tf({'gpu_options.allow_growth': True})\n dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)\n print('Loading dataset \"%s\"' % tfrecord_dir_b)\n dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)\n tfutil.init_uninited_vars()\n \n print('Comparing datasets')\n idx = 0\n identical_images = 0\n identical_labels = 0\n while True:\n if idx % 100 == 0:\n print('%d\\r' % idx, end='', flush=True)\n try:\n images_a, labels_a = dset_a.get_minibatch_np(1)\n except tf.errors.OutOfRangeError:\n images_a, labels_a = None, None\n try:\n images_b, labels_b = dset_b.get_minibatch_np(1)\n except tf.errors.OutOfRangeError:\n images_b, labels_b = None, None\n if images_a is None or images_b is None:\n if images_a is not None or images_b is not None:\n print('Datasets contain different number of images')\n break\n if images_a.shape == images_b.shape and np.all(images_a == images_b):\n identical_images += 1\n else:\n print('Image %d is different' % idx)\n if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):\n identical_labels += 1\n else:\n print('Label %d is different' % idx)\n idx += 1\n print('Identical images: %d / %d' % (identical_images, idx))\n if not ignore_labels:\n print('Identical labels: %d / %d' % (identical_labels, idx))\n\n#----------------------------------------------------------------------------\n\ndef create_mnist(tfrecord_dir, mnist_dir):\n print('Loading MNIST from \"%s\"' % mnist_dir)\n import gzip\n with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:\n images = np.frombuffer(file.read(), np.uint8, offset=16)\n with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:\n labels = np.frombuffer(file.read(), np.uint8, offset=8)\n images = images.reshape(-1, 1, 28, 28)\n images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)\n assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8\n assert labels.shape == (60000,) and labels.dtype == np.uint8\n assert np.min(images) == 0 and np.max(images) == 255\n assert np.min(labels) == 0 and np.max(labels) == 9\n onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)\n onehot[np.arange(labels.size), labels] = 1.0\n \n with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:\n order = tfr.choose_shuffled_order()\n for idx in range(order.size):\n tfr.add_image(images[order[idx]])\n tfr.add_labels(onehot[order])\n\n#----------------------------------------------------------------------------\n\ndef create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):\n print('Loading MNIST from \"%s\"' % mnist_dir)\n import gzip\n with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:\n images = np.frombuffer(file.read(), np.uint8, offset=16)\n images = images.reshape(-1, 28, 28)\n images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)\n assert images.shape == (60000, 32, 32) and images.dtype == np.uint8\n assert np.min(images) == 0 and np.max(images) == 255\n \n with TFRecordExporter(tfrecord_dir, num_images) as tfr:\n rnd = np.random.RandomState(random_seed)\n for idx in range(num_images):\n tfr.add_image(images[rnd.randint(images.shape[0], size=3)])\n\n#----------------------------------------------------------------------------\n\ndef create_cifar10(tfrecord_dir, cifar10_dir):\n print('Loading CIFAR-10 from \"%s\"' % cifar10_dir)\n import pickle\n images = []\n labels = []\n for batch in range(1, 6):\n with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:\n data = pickle.load(file, encoding='latin1')\n images.append(data['data'].reshape(-1, 3, 32, 32))\n labels.append(data['labels'])\n images = np.concatenate(images)\n labels = np.concatenate(labels)\n assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8\n assert labels.shape == (50000,) and labels.dtype == np.int32\n assert np.min(images) == 0 and np.max(images) == 255\n assert np.min(labels) == 0 and np.max(labels) == 9\n onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)\n onehot[np.arange(labels.size), labels] = 1.0\n\n with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:\n order = tfr.choose_shuffled_order()\n for idx in range(order.size):\n tfr.add_image(images[order[idx]])\n tfr.add_labels(onehot[order])\n\n#----------------------------------------------------------------------------\n\ndef create_cifar100(tfrecord_dir, cifar100_dir):\n print('Loading CIFAR-100 from \"%s\"' % cifar100_dir)\n import pickle\n with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:\n data = pickle.load(file, encoding='latin1')\n images = data['data'].reshape(-1, 3, 32, 32)\n labels = np.array(data['fine_labels'])\n assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8\n assert labels.shape == (50000,) and labels.dtype == np.int32\n assert np.min(images) == 0 and np.max(images) == 255\n assert np.min(labels) == 0 and np.max(labels) == 99\n onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)\n onehot[np.arange(labels.size), labels] = 1.0\n\n with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:\n order = tfr.choose_shuffled_order()\n for idx in range(order.size):\n tfr.add_image(images[order[idx]])\n tfr.add_labels(onehot[order])\n\n#----------------------------------------------------------------------------\n\ndef create_svhn(tfrecord_dir, svhn_dir):\n print('Loading SVHN from \"%s\"' % svhn_dir)\n import pickle\n images = []\n labels = []\n for batch in range(1, 4):\n with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:\n data = pickle.load(file, encoding='latin1')\n images.append(data[0])\n labels.append(data[1])\n images = np.concatenate(images)\n labels = np.concatenate(labels)\n assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8\n assert labels.shape == (73257,) and labels.dtype == np.uint8\n assert np.min(images) == 0 and np.max(images) == 255\n assert np.min(labels) == 0 and np.max(labels) == 9\n onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)\n onehot[np.arange(labels.size), labels] = 1.0\n\n with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:\n order = tfr.choose_shuffled_order()\n for idx in range(order.size):\n tfr.add_image(images[order[idx]])\n tfr.add_labels(onehot[order])\n\n#----------------------------------------------------------------------------\n\ndef create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):\n print('Loading LSUN dataset from \"%s\"' % lmdb_dir)\n import lmdb # pip install lmdb\n import cv2 # pip install opencv-python\n import io\n with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:\n total_images = txn.stat()['entries']\n if max_images is None:\n max_images = total_images\n with TFRecordExporter(tfrecord_dir, max_images) as tfr:\n for idx, (key, value) in enumerate(txn.cursor()):\n try:\n try:\n img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)\n if img is None:\n raise IOError('cv2.imdecode failed')\n img = img[:, :, ::-1] # BGR => RGB\n except IOError:\n img = np.asarray(PIL.Image.open(io.BytesIO(value)))\n crop = np.min(img.shape[:2])\n img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]\n img = PIL.Image.fromarray(img, 'RGB')\n img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)\n img = np.asarray(img)\n img = img.transpose(2, 0, 1) # HWC => CHW\n tfr.add_image(img)\n except:\n print(sys.exc_info()[1])\n if tfr.cur_images == max_images:\n break\n \n#----------------------------------------------------------------------------\n\ndef create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):\n print('Loading CelebA from \"%s\"' % celeba_dir)\n glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')\n image_filenames = sorted(glob.glob(glob_pattern))\n expected_images = 202599\n if len(image_filenames) != expected_images:\n error('Expected to find %d images' % expected_images)\n \n with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:\n order = tfr.choose_shuffled_order()\n for idx in range(order.size):\n img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))\n assert img.shape == (218, 178, 3)\n img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]\n img = img.transpose(2, 0, 1) # HWC => CHW\n tfr.add_image(img)\n\n#----------------------------------------------------------------------------\n\ndef create_celebahq(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100):\n print('Loading CelebA from \"%s\"' % celeba_dir)\n expected_images = 202599\n if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:\n error('Expected to find %d images' % expected_images)\n with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:\n landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]\n landmarks = np.float32(landmarks).reshape(-1, 5, 2)\n \n print('Loading CelebA-HQ deltas from \"%s\"' % delta_dir)\n import scipy.ndimage\n import hashlib\n import bz2\n import zipfile\n import base64\n import cryptography.hazmat.primitives.hashes\n import cryptography.hazmat.backends\n import cryptography.hazmat.primitives.kdf.pbkdf2\n import cryptography.fernet\n expected_zips = 30\n if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:\n error('Expected to find %d zips' % expected_zips)\n with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:\n lines = [line.split() for line in file]\n fields = dict()\n for idx, field in enumerate(lines[0]):\n type = int if field.endswith('idx') else str\n fields[field] = [type(line[idx]) for line in lines[1:]]\n indices = np.array(fields['idx'])\n\n # Must use pillow version 3.1.1 for everything to work correctly.\n if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':\n error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1\n \n # Must use libjpeg version 8d for everything to work correctly.\n img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))\n md5 = hashlib.md5()\n md5.update(img.tobytes())\n if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':\n error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d\n\n def rot90(v):\n return np.array([-v[1], v[0]])\n\n def process_func(idx):\n # Load original image.\n orig_idx = fields['orig_idx'][idx]\n orig_file = fields['orig_file'][idx]\n orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)\n img = PIL.Image.open(orig_path)\n\n # Choose oriented crop rectangle.\n lm = landmarks[orig_idx]\n eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5\n mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5\n eye_to_eye = lm[1] - lm[0]\n eye_to_mouth = mouth_avg - eye_avg\n x = eye_to_eye - rot90(eye_to_mouth)\n x /= np.hypot(*x)\n x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)\n y = rot90(x)\n c = eye_avg + eye_to_mouth * 0.1\n quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])\n zoom = 1024 / (np.hypot(*x) * 2)\n\n # Shrink.\n shrink = int(np.floor(0.5 / zoom))\n if shrink > 1:\n size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))\n img = img.resize(size, PIL.Image.ANTIALIAS)\n quad /= shrink\n zoom *= shrink\n\n # Crop.\n border = max(int(np.round(1024 * 0.1 / zoom)), 3)\n crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))\n if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:\n img = img.crop(crop)\n quad -= crop[0:2]\n\n # Simulate super-resolution.\n superres = int(np.exp2(np.ceil(np.log2(zoom))))\n if superres > 1:\n img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)\n quad *= superres\n zoom /= superres\n\n # Pad.\n pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))\n if max(pad) > border - 4:\n pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))\n img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')\n h, w, _ = img.shape\n y, x, _ = np.mgrid[:h, :w, :1]\n mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))\n blur = 1024 * 0.02 / zoom\n img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)\n img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)\n img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')\n quad += pad[0:2]\n \n # Transform.\n img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)\n img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)\n img = np.asarray(img).transpose(2, 0, 1)\n \n # Verify MD5.\n md5 = hashlib.md5()\n md5.update(img.tobytes())\n assert md5.hexdigest() == fields['proc_md5'][idx]\n \n # Load delta image and original JPG.\n with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:\n delta_bytes = zip.read('delta%05d.dat' % idx)\n with open(orig_path, 'rb') as file:\n orig_bytes = file.read()\n \n # Decrypt delta image, using original JPG data as decryption key.\n algorithm = cryptography.hazmat.primitives.hashes.SHA256()\n backend = cryptography.hazmat.backends.default_backend()\n salt = bytes(orig_file, 'ascii')\n kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)\n key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))\n delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)\n \n # Apply delta image.\n img = img + delta\n \n # Verify MD5.\n md5 = hashlib.md5()\n md5.update(img.tobytes())\n assert md5.hexdigest() == fields['final_md5'][idx]\n return img\n\n with TFRecordExporter(tfrecord_dir, indices.size) as tfr:\n order = tfr.choose_shuffled_order()\n with ThreadPool(num_threads) as pool:\n for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks):\n tfr.add_image(img)\n\n#----------------------------------------------------------------------------\n\ndef create_from_images(tfrecord_dir, image_dir, shuffle):\n print('Loading images from \"%s\"' % image_dir)\n image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))\n if len(image_filenames) == 0:\n error('No input images found')\n \n img = np.asarray(PIL.Image.open(image_filenames[0]))\n resolution = img.shape[0]\n channels = img.shape[2] if img.ndim == 3 else 1\n if img.shape[1] != resolution:\n error('Input images must have the same width and height')\n if resolution != 2 ** int(np.floor(np.log2(resolution))):\n error('Input image resolution must be a power-of-two')\n if channels not in [1, 3]:\n error('Input images must be stored as RGB or grayscale')\n \n with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:\n order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))\n for idx in range(order.size):\n img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))\n if channels == 1:\n img = img[np.newaxis, :, :] # HW => CHW\n else:\n img = img.transpose(2, 0, 1) # HWC => CHW\n tfr.add_image(img)\n\n#----------------------------------------------------------------------------\n\ndef create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):\n print('Loading HDF5 archive from \"%s\"' % hdf5_filename)\n import h5py # conda install h5py\n with h5py.File(hdf5_filename, 'r') as hdf5_file:\n hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])\n with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:\n order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])\n for idx in range(order.size):\n tfr.add_image(hdf5_data[order[idx]])\n npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'\n if os.path.isfile(npy_filename):\n tfr.add_labels(np.load(npy_filename)[order])\n\n#----------------------------------------------------------------------------\n\ndef execute_cmdline(argv):\n prog = argv[0]\n parser = argparse.ArgumentParser(\n prog = prog,\n description = 'Tool for creating, extracting, and visualizing Progressive GAN datasets.',\n epilog = 'Type \"%s <command> -h\" for more information.' % prog)\n \n subparsers = parser.add_subparsers(dest='command')\n subparsers.required = True\n def add_command(cmd, desc, example=None):\n epilog = 'Example: %s %s' % (prog, example) if example is not None else None\n return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)\n\n p = add_command( 'display', 'Display images in dataset.',\n 'display datasets/mnist')\n p.add_argument( 'tfrecord_dir', help='Directory containing dataset')\n \n p = add_command( 'extract', 'Extract images from dataset.',\n 'extract datasets/mnist mnist-images')\n p.add_argument( 'tfrecord_dir', help='Directory containing dataset')\n p.add_argument( 'output_dir', help='Directory to extract the images into')\n\n p = add_command( 'compare', 'Compare two datasets.',\n 'compare datasets/mydataset datasets/mnist')\n p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')\n p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')\n p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)\n\n p = add_command( 'create_mnist', 'Create dataset for MNIST.',\n 'create_mnist datasets/mnist ~/downloads/mnist')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'mnist_dir', help='Directory containing MNIST')\n\n p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',\n 'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'mnist_dir', help='Directory containing MNIST')\n p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)\n p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)\n\n p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',\n 'create_cifar10 datasets/cifar10 ~/downloads/cifar10')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')\n\n p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',\n 'create_cifar100 datasets/cifar100 ~/downloads/cifar100')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')\n\n p = add_command( 'create_svhn', 'Create dataset for SVHN.',\n 'create_svhn datasets/svhn ~/downloads/svhn')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'svhn_dir', help='Directory containing SVHN')\n\n p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',\n 'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')\n p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)\n p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)\n\n p = add_command( 'create_celeba', 'Create dataset for CelebA.',\n 'create_celeba datasets/celeba ~/downloads/celeba')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'celeba_dir', help='Directory containing CelebA')\n p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)\n p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)\n\n p = add_command( 'create_celebahq', 'Create dataset for CelebA-HQ.',\n 'create_celebahq datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'celeba_dir', help='Directory containing CelebA')\n p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')\n p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)\n p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)\n\n p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',\n 'create_from_images datasets/mydataset myimagedir')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'image_dir', help='Directory containing the images')\n p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)\n\n p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',\n 'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')\n p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')\n p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')\n p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)\n\n args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])\n func = globals()[args.command]\n del args.command\n func(**vars(args))\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n execute_cmdline(sys.argv)\n\n#----------------------------------------------------------------------------\n"
] | [
[
"numpy.asarray",
"numpy.concatenate",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.hypot",
"tensorflow.python_io.TFRecordOptions",
"tensorflow.train.Int64List",
"numpy.pad",
"numpy.clip",
"numpy.arange",
"numpy.stack",
"tensorflow.python_io.TFRecordWriter",
"numpy.float32",
"numpy.load",
"numpy.min",
"numpy.median",
"numpy.rint",
"numpy.floor",
"numpy.array",
"numpy.random.RandomState",
"numpy.log2",
"numpy.fromstring"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
BAMresearch/ctsimu-toolbox | [
"2329fe0bba8a89061430649c043c70c58835a435"
] | [
"ctsimu/image.py"
] | [
"# -*- coding: UTF-8 -*-\r\n\"\"\"\r\nThis module provides classes for the virtual processing of images.\r\n\r\n* `Image` reads, stores, writes and handles image data.\r\n* `ImageFile` gathers information about an image file: file name, data type,\r\n byte order. It is used to instruct the `Image.read()` and `Image.save()`\r\n routines.\r\n* `ImageStack` represents a stack of images in the file system. It can be used\r\n in combination with a processing pipeline (see `ctsimu.processing`).\r\n* `ImageROI` defines a pixel region of interest in an image.\r\n\r\nImages\r\n------\r\nTo import a single image, you can specify its file name in the constructor\r\nand then use the `Image.read()` function to import it into the internal memory.\r\nIt will be stored in `Image.px` as a float64 NumPy array. When writing an\r\nimage using `Image.save()`, you have to specify the data type for the new file.\r\n\r\n from ctsimu.image import Image\r\n \r\n myImage = Image(\"example.tif\")\r\n myImage.read()\r\n \r\n # Mirror horizontally:\r\n myImage.flipHorizontal()\r\n \r\n myImage.save(\"example_mirrored.raw\", dataType=\"float32\")\r\n\r\n\r\nRAW File Handling\r\n-----------------\r\nTo read raw image data, its dimensions, data type, byte order and header size\r\nmust be specified:\r\n\r\n from ctsimu.image import Image\r\n\r\n myImage = Image(\"example_mirrored.raw\")\r\n myImage.read(width=501,\r\n height=501,\r\n dataType=\"float32\",\r\n byteOrder=\"little\",\r\n fileHeaderSize=0)\r\n\r\n # Export as big endian, uint16:\r\n myImage.save(\"example_converted.raw\",\r\n dataType=\"uint16\",\r\n byteOrder=\"big\")\r\n\r\n\"\"\"\r\n\r\nimport numpy\r\nimport os # File and path handling\r\nimport sys # To get native byte order ('little' or 'big' endian?)\r\nimport math\r\nimport copy\r\nfrom numpy.random import default_rng\r\n\r\n# Scipy:\r\n# 'ndimage' class for image processing\r\n# 'optimize' class for intensity fit\r\n# 'signal' class for drift analysis using FFT Convolution\r\nfrom scipy import ndimage, optimize, stats, signal, fft\r\n\r\nfrom .helpers import *\r\nfrom .primitives import * # Vectors and Polygons\r\nfrom .tiffy import tiff\r\n\r\n# pixelHalfDiagonal: longest distance a pixel center can have from a line\r\n# while still touching the line with a corner point:\r\npixelHalfDiagonal = 1.0/math.sqrt(2.0)\r\n\r\ndef isTIFF(filename: str) -> bool:\r\n \"\"\"Check if file name signifies a TIFF image.\"\"\"\r\n if filename is not None:\r\n if(filename.casefold().endswith('.tif') or filename.casefold().endswith('.tiff')):\r\n return True\r\n \r\n return False\r\n\r\ndef createImageStack(stack):\r\n \"\"\" Return an ImageStack object, if string is given. \"\"\"\r\n if isinstance(stack, ImageStack):\r\n return stack\r\n elif isinstance(stack, str):\r\n return ImageStack(stack)\r\n elif stack is None:\r\n return None\r\n else:\r\n raise Exception(\"Not a valid image file stack definition: {}\".format(stack))\r\n\r\nclass ImageFile:\r\n \"\"\"Fundamental image file properties used for input and output.\"\"\"\r\n\r\n def __init__(self, filename=None, dataType=None, byteOrder=None, flipByteOrder=False):\r\n self.filename = None\r\n self.dataType = None\r\n self.byteOrder = None # 'little' or 'big' endian\r\n self.flipByteOrder = False\r\n\r\n self.setFilename(filename)\r\n self.setDataType(dataType)\r\n self.setByteOrder(byteOrder)\r\n self.setFlipByteOrder(flipByteOrder)\r\n\r\n def setFilename(self, filename):\r\n self.filename = filename\r\n\r\n def getFilename(self) -> str:\r\n return self.filename\r\n\r\n def getFileBasename(self) -> str:\r\n return os.path.basename(self.filename)\r\n\r\n def getDataType(self) -> str:\r\n return self.dataType\r\n\r\n def getByteOrder(self) -> str:\r\n return self.byteOrder\r\n\r\n def doFlipByteOrder(self) -> bool:\r\n return self.flipByteOrder\r\n\r\n def setDataType(self, dataType: str):\r\n \"\"\" Set data type, either from numpy.dtype object or string. \"\"\"\r\n if isinstance(dataType, numpy.dtype):\r\n self.dataType = dataType\r\n elif dataType is None:\r\n self.dataType = None\r\n elif isinstance(dataType, str): # from string\r\n dt = numpy.dtype(dataType)\r\n self.setDataType(dt)\r\n else:\r\n raise Exception(\"{} is generally not a valid data type.\".format(dataType))\r\n\r\n def setByteOrder(self, byteOrder: str):\r\n \"\"\" Set endianness, do sanity check before. \"\"\"\r\n if byteOrder=='little' or byteOrder=='big' or byteOrder==None:\r\n self.byteOrder = byteOrder\r\n else:\r\n raise Exception(\"{} is not a valid byte order. Must be 'little' or 'big'.\".format(byteOrder))\r\n\r\n def setFlipByteOrder(self, flipByteOrder: bool):\r\n self.flipByteOrder = flipByteOrder\r\n\r\n def isInt(self) -> bool:\r\n \"\"\" True if data type is supported int data type. \"\"\"\r\n return numpy.issubdtype(self.dataType, numpy.integer)\r\n\r\n def isFloat(self) -> bool:\r\n \"\"\" True if data type is supported float data type. \"\"\"\r\n return numpy.issubdtype(self.dataType, numpy.floating)\r\n\r\nclass ImageROI:\r\n \"\"\" Defines a region of interest: upper left and lower right corner. \"\"\"\r\n\r\n def __init__(self, x0, y0, x1, y1):\r\n self.x0 = 0\r\n self.y0 = 0\r\n self.x1 = 0\r\n self.y1 = 0\r\n self.set(x0, y0, x1, y1)\r\n\r\n def __str__(self):\r\n return \"({x0}, {y0}) -- ({x1}, {y1})\".format(x0=self.x0, y0=self.y0, x1=self.x1, y1=self.y1)\r\n\r\n def set(self, x0, y0, x1, y1):\r\n if x1 < x0:\r\n x0, x1 = x1, x0\r\n\r\n if y1 < y0:\r\n y0, y1 = y1, y0\r\n\r\n self.x0 = int(x0)\r\n self.y0 = int(y0)\r\n self.x1 = int(x1)\r\n self.y1 = int(y1)\r\n\r\n def width(self):\r\n return self.x1 - self.x0\r\n\r\n def height(self):\r\n return self.y1 - self.y0\r\n\r\n def area(self):\r\n return self.width()*self.height()\r\n\r\n def grow(self, amount):\r\n amount = int(amount)\r\n self.set(self.x0-amount, self.y0-amount, self.x1+amount, self.y1+amount)\r\n\r\n\r\nclass Image:\r\n \"\"\" Stores pixel data, provides image processing routines. \"\"\"\r\n\r\n def __init__(self, inputFile=None, outputFile=None):\r\n self.inputFile = None # type ImageFile or string\r\n self.outputFile = None # type ImageFile or string\r\n self.px = 0 # 2D numpy array that contains the pixel values.\r\n self.height = 0 # Image height in px.\r\n self.width = 0 # Image width in px.\r\n self.index = 0 # Slice number in a 3D volume.\r\n\r\n self.rotation = None\r\n self.flipHorz = False\r\n self.flipVert = False\r\n\r\n self.n_accumulations = 0 # Counts number of accumulated pictures for averaging (mean)\r\n self.boundingBoxX0 = 0 # After cropping: bounding box offset relative to original image.\r\n self.boundingBoxY0 = 0\r\n self.resolution = 1 # After binning: new resolution relative to original image.\r\n\r\n self.setInputFile(inputFile)\r\n self.setOutputFile(outputFile)\r\n\r\n def __add__(self, other):\r\n if self.dimensionsMatch(other):\r\n result = copy.deepcopy(self)\r\n result.px += other.px\r\n return result\r\n else:\r\n raise Exception(\"Cannot add images of different dimensions.\")\r\n\r\n def __sub__(self, other):\r\n if self.dimensionsMatch(other):\r\n result = copy.deepcopy(self)\r\n result.px -= other.px\r\n return result\r\n else:\r\n raise Exception(\"Cannot subtract images of different dimensions.\")\r\n\r\n def __mul__(self, other):\r\n if self.dimensionsMatch(other):\r\n result = copy.deepcopy(self)\r\n result.px *= other.px\r\n return result\r\n else:\r\n raise Exception(\"Cannot multiply images of different dimensions.\")\r\n\r\n def __truediv__(self, other):\r\n if self.dimensionsMatch(other):\r\n result = copy.deepcopy(self)\r\n result.px[numpy.nonzero(other.px)] /= other.px[numpy.nonzero(other.px)]\r\n result.px = numpy.where(other.px==0, 0, result.px)\r\n return result\r\n else:\r\n raise Exception(\"Cannot divide images of different dimensions.\")\r\n\r\n def __floordiv__(self, other):\r\n if self.dimensionsMatch(other):\r\n result = copy.deepcopy(self)\r\n result.px[numpy.nonzero(other.px)] //= other.px[numpy.nonzero(other.px)]\r\n result = numpy.where(other.px==0, 0, result.px)\r\n return result\r\n else:\r\n raise Exception(\"Cannot divide images of different dimensions.\")\r\n\r\n def __del__(self):\r\n \"\"\" Delete pixel map upon object destruction. \"\"\"\r\n self.px =0\r\n\r\n def setInputFile(self, inputFile):\r\n \"\"\" Set input file properties from ImageFile object or string. \"\"\"\r\n if isinstance(inputFile, ImageFile) or (inputFile is None):\r\n self.inputFile = inputFile\r\n elif isinstance(inputFile, str): # string given\r\n self.inputFile = ImageFile(inputFile)\r\n else:\r\n raise Exception(\"{} is not a valid file identifier.\")\r\n\r\n def setOutputFile(self, outputFile):\r\n \"\"\" Set output file properties from ImageFile object or string. \"\"\"\r\n if isinstance(outputFile, ImageFile) or (outputFile is None):\r\n self.outputFile = outputFile\r\n elif isinstance(outputFile, str): # string given\r\n self.outputFile = ImageFile(outputFile)\r\n else:\r\n raise Exception(\"{} is not a valid file identifier.\")\r\n\r\n def setHeight(self, height):\r\n \"\"\" Set image height in px. \"\"\"\r\n self.height = height\r\n\r\n def setWidth(self, width):\r\n \"\"\" Set image width in px. \"\"\"\r\n self.width = width\r\n\r\n def setIndex(self, index):\r\n \"\"\" Set image index position in 3D stack (in px). \"\"\"\r\n self.index = index\r\n\r\n def shape(self, width, height, index=0, dataType=None, value=0):\r\n \"\"\" Re-format image to given dimensions and data type. \"\"\"\r\n self.setWidth(width)\r\n self.setHeight(height)\r\n self.setIndex(index)\r\n\r\n if dataType is None:\r\n dataType = self.getInternalDataType()\r\n\r\n self.erase(value=0, dataType=dataType)\r\n\r\n def shapeLike(self, otherImg, dataType=None):\r\n self.setWidth(otherImg.getWidth())\r\n self.setHeight(otherImg.getHeight())\r\n self.setIndex(otherImg.getIndex())\r\n\r\n if dataType is None:\r\n dataType = otherImg.getInternalDataType()\r\n\r\n self.erase(value=0, dataType=dataType)\r\n\r\n def erase(self, value=0, dataType=None):\r\n \"\"\" Set all pixels to 'value'. \"\"\"\r\n w = self.getWidth()\r\n h = self.getHeight()\r\n\r\n if dataType is None:\r\n dataType = self.getInternalDataType()\r\n\r\n self.px = 0\r\n self.px = numpy.full((h, w), fill_value=value, dtype=dataType)\r\n \r\n def getPixelMap(self):\r\n return self.px\r\n\r\n def setPixelMap(self, px):\r\n self.px = px\r\n\r\n def setPixel(self, x, y, value):\r\n self.px[y][x] = value\r\n\r\n def getPixel(self, x, y):\r\n return self.px[y][x]\r\n\r\n def isSet(self):\r\n \"\"\" Check if image has a valid width and height. \"\"\"\r\n if(self.getHeight() > 0):\r\n if(self.getWidth() > 0):\r\n return True\r\n\r\n return False\r\n\r\n def contains(self, x, y):\r\n \"\"\" Check if (x, y) is within image dimensions. \"\"\"\r\n if x >= 0:\r\n if y >= 0:\r\n if x < self.getWidth():\r\n if y < self.getHeight():\r\n return True\r\n\r\n return False\r\n\r\n def getWidth(self):\r\n return self.width\r\n\r\n def getHeight(self):\r\n return self.height\r\n\r\n def getNPixels(self):\r\n \"\"\" Calculate number of pixels in image. \"\"\"\r\n return (self.getWidth() * self.getHeight())\r\n\r\n def getIndex(self):\r\n return self.index\r\n\r\n def getBoundingBoxX0(self):\r\n return self.boundingBoxX0\r\n\r\n def getBoundingBoxY0(self):\r\n return self.boundingBoxY0\r\n\r\n def getResolution(self):\r\n return self.resolution\r\n\r\n def getFileByteOrder(self):\r\n return self.fileByteOrder\r\n\r\n def max(self, ROI=None):\r\n \"\"\" Return maximum intensity in image. \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n return numpy.amax(self.px)\r\n\r\n return numpy.amax(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])\r\n\r\n def min(self, ROI=None):\r\n \"\"\" Return minimum intensity in image. \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n return numpy.amin(self.px)\r\n\r\n return numpy.amin(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])\r\n\r\n def mean(self, ROI=None):\r\n \"\"\" Return arithmetic mean of the image grey values. \"\"\"\r\n \r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n return numpy.mean(self.px)\r\n\r\n return numpy.mean(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])\r\n\r\n def stdDev(self, ROI=None):\r\n \"\"\" Return the standard deviation of the image grey values. \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n return numpy.std(self.px)\r\n\r\n return numpy.std(self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1])\r\n\r\n def centerOfMass(self):\r\n return ndimage.center_of_mass(self.px)\r\n\r\n def setRotation(self, rotation):\r\n self.rotation = rotation\r\n\r\n def getRotation(self):\r\n return self.rotation\r\n\r\n def rot90(self):\r\n if self.isSet():\r\n self.px = numpy.require(numpy.rot90(self.px, k=1), requirements=['C_CONTIGUOUS'])\r\n self.width, self.height = self.height, self.width\r\n\r\n def rot180(self):\r\n if self.isSet():\r\n self.px = numpy.require(numpy.rot90(self.px, k=2), requirements=['C_CONTIGUOUS'])\r\n\r\n def rot270(self):\r\n if self.isSet():\r\n self.px = numpy.require(numpy.rot90(self.px, k=-1), requirements=['C_CONTIGUOUS'])\r\n self.width, self.height = self.height, self.width\r\n\r\n def rotate(self, rotation):\r\n if rotation is None:\r\n rotation = self.rotation\r\n else:\r\n self.setRotation(rotation)\r\n\r\n if rotation == \"90\":\r\n self.rot90()\r\n elif rotation == \"180\":\r\n self.rot180()\r\n elif rotation == \"270\":\r\n self.rot270()\r\n\r\n def flipHorizontal(self):\r\n self.flipHorz = not self.flipHorz\r\n if self.isSet():\r\n self.px = numpy.require(numpy.fliplr(self.px), requirements=['C_CONTIGUOUS'])\r\n\r\n def flipVertical(self):\r\n self.flipVert = not self.flipVert\r\n if self.isSet():\r\n self.px = numpy.require(numpy.flipud(self.px), requirements=['C_CONTIGUOUS'])\r\n\r\n def setFlip(self, horz=False, vert=False):\r\n self.flipHorz = horz\r\n self.flipVert = vert\r\n\r\n def getHorizontalFlip(self):\r\n return self.flipHorz\r\n\r\n def getVerticalFlip(self):\r\n return self.flipVert\r\n\r\n def flip(self, horizontal=False, vertical=False):\r\n if horizontal:\r\n self.flipHorizontal()\r\n if vertical:\r\n self.flipVertical()\r\n\r\n def getInternalDataType(self):\r\n \"\"\" Data type used internally for all image data. \"\"\"\r\n return numpy.dtype('float64')\r\n\r\n def containsPixelValue(self, value):\r\n \"\"\" Check if image contains a certain grey value. \"\"\"\r\n return numpy.any(self.px == value)\r\n\r\n def dimensionsMatch(self, img):\r\n \"\"\" Check if image dimensions match with another image. \"\"\"\r\n if self.isSet() and img.isSet():\r\n if(self.getHeight() == img.getHeight()):\r\n if(self.getWidth() == img.getWidth()):\r\n return True\r\n\r\n raise Exception(\"Pixel dimensions do not match: {}x{} vs. {}x{}\".format(self.getWidth(), self.getHeight(), img.getWidth(), img.getHeight()))\r\n \r\n return False\r\n\r\n def read(self, filename=None, width=None, height=None, index=0, dataType=None, byteOrder=None, fileHeaderSize=0, imageHeaderSize=0):\r\n \"\"\" Read TIFF or RAW, decide by file name. \"\"\"\r\n if filename is None:\r\n filename = self.inputFile.getFilename()\r\n else:\r\n self.setInputFile(filename)\r\n\r\n # If no internal file name is specified, do nothing.\r\n if filename is None:\r\n return\r\n\r\n if isTIFF(self.inputFile.getFilename()):\r\n self.readTIFF(self.inputFile.doFlipByteOrder())\r\n else:\r\n self.readRAW(width=width, height=height, index=index, dataType=dataType, byteOrder=byteOrder, fileHeaderSize=fileHeaderSize, imageHeaderSize=imageHeaderSize)\r\n\r\n def readTIFF(self, flipByteOrder=False, obeyOrientation=True):\r\n \"\"\" Import TIFF file. \"\"\"\r\n if os.path.isfile(self.inputFile.getFilename()):\r\n basename = self.inputFile.getFileBasename()\r\n \r\n tiffimg = tiff()\r\n tiffimg.read(self.inputFile.getFilename())\r\n img = tiffimg.imageData(subfile=0, channel=0, obeyOrientation=obeyOrientation) # get a greyscale image from TIFF subfile 0\r\n width = tiffimg.getWidth(subfile=0)\r\n height = tiffimg.getHeight(subfile=0)\r\n\r\n self.inputFile.setDataType(img.dtype) \r\n\r\n if flipByteOrder:\r\n img.byteswap(inplace=True)\r\n\r\n # Convert to internal data type for either int or float:\r\n self.px = img.astype(self.getInternalDataType())\r\n\r\n # Check if array in memory has the dimensions stated in the TIFF file:\r\n if((height == len(self.px)) and (width == len(self.px[0]))):\r\n self.setHeight(height)\r\n self.setWidth(width)\r\n else:\r\n raise Exception(\"Width ({}px) and height ({}px) from the TIFF header do not match the data width ({}px) and height ({}px) that has been read.\".format(width, height, len(self.px[0]), len(self.px)))\r\n else:\r\n raise Exception(\"Can't find \" + self.inputFile.getFilename())\r\n\r\n def readRAW(self, width, height, index=0, dataType=None, byteOrder=None, fileHeaderSize=0, imageHeaderSize=0):\r\n \"\"\" Import RAW image file. \"\"\"\r\n if not isinstance(self.inputFile, ImageFile):\r\n raise Exception(\"No valid input file defined.\")\r\n\r\n if dataType is None:\r\n dataType = self.inputFile.getDataType()\r\n else:\r\n self.inputFile.setDataType(dataType)\r\n\r\n if byteOrder is None:\r\n byteOrder = self.inputFile.getByteOrder()\r\n if byteOrder is None:\r\n byteOrder = sys.byteorder\r\n\r\n self.inputFile.setByteOrder(byteOrder)\r\n\r\n if os.path.isfile(self.inputFile.getFilename()):\r\n self.shape(width, height, index, self.inputFile.getDataType())\r\n\r\n basename = self.inputFile.getFileBasename()\r\n #log(\"Reading RAW file {}...\".format(basename))\r\n\r\n byteOffset = fileHeaderSize + (index+1)*imageHeaderSize + index*(self.getNPixels() * self.inputFile.getDataType().itemsize)\r\n\r\n with open(self.inputFile.getFilename(), 'rb') as f:\r\n f.seek(byteOffset)\r\n self.px = numpy.fromfile(f, dtype=self.inputFile.getDataType(), count=self.getNPixels(), sep=\"\")\r\n\r\n if len(self.px) > 0:\r\n # Treat endianness. If the native byte order of the system is different\r\n # than the given file byte order, the bytes are swapped in memory\r\n # so that it matches the native byte order.\r\n nativeEndian = sys.byteorder\r\n if nativeEndian == 'little':\r\n if byteOrder == 'big':\r\n self.px.byteswap(inplace=True)\r\n elif nativeEndian == 'big':\r\n if byteOrder == 'little':\r\n self.px.byteswap(inplace=True)\r\n\r\n # Convert to internal data type:\r\n self.px = self.px.astype(self.getInternalDataType())\r\n\r\n # Reshape to 2D array:\r\n self.px = numpy.reshape(self.px, (height, width))\r\n else:\r\n raise Exception(\"Error reading RAW file {f}.\\nGot no data for index {idx}.\".format(f=self.inputFile.getFilename(), idx=index))\r\n\r\n else:\r\n raise Exception(\"Can't find \" + self.inputFile.getFilename())\r\n\r\n def getDataTypeClippingBoundaries(self, dataType):\r\n # Get clipping boundaries if grey values have to be\r\n # clipped to the interval supported by the int image type:\r\n clipMin = 0\r\n clipMax = 1\r\n if numpy.issubdtype(dataType, numpy.integer):\r\n intInfo = numpy.iinfo(dataType)\r\n clipMin = intInfo.min\r\n clipMax = intInfo.max\r\n elif numpy.issubdtype(dataType, numpy.floating):\r\n floatInfo = numpy.finfo(dataType)\r\n clipMin = floatInfo.min\r\n clipMax = floatInfo.max\r\n\r\n return clipMin, clipMax\r\n\r\n def touchFolder(self, filename):\r\n \"\"\" Check if folder exists. Otherwise, create. \"\"\"\r\n folder = os.path.dirname(filename)\r\n if folder == \"\" or folder is None:\r\n folder = \".\"\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n\r\n def save(self, filename=None, dataType=None, byteOrder=None, appendChunk=False, clipValues=True):\r\n \"\"\" Save image as TIFF or RAW. \"\"\"\r\n if not isinstance(self.outputFile, ImageFile):\r\n self.outputFile = ImageFile()\r\n\r\n if (filename is None) or (filename == \"\"):\r\n filename = self.outputFile.getFilename()\r\n if (filename is None) or (filename == \"\"):\r\n raise Exception(\"No output file name specified.\")\r\n else:\r\n self.outputFile.setFilename(filename)\r\n\r\n if dataType is None:\r\n dataType = self.outputFile.getDataType()\r\n if dataType is None:\r\n if isinstance(self.inputFile, ImageFile):\r\n dataType = self.inputFile.getDataType()\r\n if(dataType != None):\r\n self.outputFile.setDataType(dataType)\r\n else:\r\n raise Exception(\"Please specify a data type for the output file: {filename}\".format(filename=filename))\r\n else:\r\n raise Exception(\"Please specify a data type for the output file: {filename}\".format(filename=filename))\r\n else:\r\n self.outputFile.setDataType(dataType)\r\n\r\n if byteOrder is None:\r\n byteOrder = self.outputFile.getByteOrder()\r\n if byteOrder is None:\r\n if isinstance(self.inputFile, ImageFile):\r\n byteOrder = self.inputFile.getByteOrder()\r\n self.outputFile.setByteOrder(byteOrder)\r\n\r\n if byteOrder is None:\r\n byteOrder = \"little\"\r\n\r\n self.outputFile.setByteOrder(byteOrder)\r\n\r\n if isTIFF(filename):\r\n self.saveTIFF(filename, dataType, clipValues)\r\n else:\r\n self.saveRAW(filename, dataType, byteOrder, appendChunk, clipValues, addInfo=False)\r\n\r\n def saveTIFF(self, filename=None, dataType=None, clipValues=True):\r\n if (filename != None) and (len(filename) > 0):\r\n fileBaseName = os.path.basename(filename)\r\n if (fileBaseName == \"\") or (fileBaseName is None):\r\n raise Exception(\"No output file name specified for the image to be saved.\")\r\n\r\n if dataType != None:\r\n if not isTIFF(filename):\r\n filename += \".tif\"\r\n\r\n self.touchFolder(filename)\r\n \r\n tiffdata = None\r\n if clipValues: # Clipping\r\n clipMin, clipMax = self.getDataTypeClippingBoundaries(dataType)\r\n tiffdata = numpy.clip(self.px, clipMin, clipMax).astype(dataType)\r\n else: # No clipping or float\r\n tiffdata = self.px.astype(dataType)\r\n\r\n tiffimg = tiff()\r\n tiffimg.set(tiffdata)\r\n tiffimg.save(filename=filename, endian='little')\r\n else:\r\n raise Exception(\"Please specify a data type for the output file: {filename}\".format(filename=filename))\r\n else:\r\n raise Exception(\"No output file name specified for the image to be saved.\")\r\n \r\n def saveRAW(self, filename=None, dataType=None, byteOrder=None, appendChunk=False, clipValues=True, addInfo=False):\r\n if (filename != None) and (len(filename) > 0):\r\n fileBaseName = os.path.basename(filename)\r\n if (fileBaseName == \"\") or (fileBaseName is None):\r\n raise Exception(\"No output file name specified for the image to be saved.\")\r\n\r\n if dataType != None:\r\n if byteOrder is None:\r\n byteOrder = \"little\"\r\n\r\n # Reshape to 1D array and convert to file data type (from internal 64bit data type)\r\n outBytes = numpy.reshape(self.px, int(self.width)*int(self.height))\r\n\r\n if clipValues: # Clipping\r\n clipMin, clipMax = self.getDataTypeClippingBoundaries(dataType)\r\n outBytes = numpy.clip(outBytes, clipMin, clipMax)\r\n\r\n outBytes = outBytes.astype(dataType)\r\n\r\n # Treat endianness. If the native byte order of the system is different\r\n # than the desired file byte order, the bytes are swapped in memory\r\n # before writing to disk.\r\n nativeEndian = sys.byteorder\r\n if nativeEndian == 'little':\r\n if byteOrder == 'big':\r\n outBytes.byteswap(inplace=True)\r\n elif nativeEndian == 'big':\r\n if byteOrder == 'little':\r\n outBytes.byteswap(inplace=True)\r\n\r\n if addInfo:\r\n shortEndian = \"LE\"\r\n if byteOrder == \"big\":\r\n shortEndian = \"BE\"\r\n\r\n infoString = \"_{width}x{height}_{dataType}_{endian}\".format(width=self.width, height=self.height, dataType=dataType, endian=shortEndian)\r\n\r\n basename, extension = os.path.splitext(filename)\r\n filename = basename + infoString + extension\r\n\r\n self.touchFolder(filename)\r\n if not appendChunk: # save as single raw file\r\n with open(filename, 'w+b') as file:\r\n file.write(outBytes)\r\n file.close()\r\n #outBytes.tofile(filename, sep=\"\")\r\n else: # append to the bytes of the chunk file\r\n with open(filename, 'a+b') as file:\r\n file.write(outBytes)\r\n file.close()\r\n else:\r\n raise Exception(\"Please specify a data type for the output file: {filename}\".format(filename=filename))\r\n else:\r\n raise Exception(\"No output file name specified for the image to be saved.\")\r\n\r\n def calcRelativeShift(self, referenceImage):\r\n if self.dimensionsMatch(referenceImage):\r\n # Convolution of this pixmap with the vertically and horizontally mirrored reference pixmap\r\n img1 = self.px - int(numpy.mean(self.px))\r\n img2 = referenceImage.getPixelMap() - numpy.mean(referenceImage.getPixelMap())\r\n\r\n convolution = signal.fftconvolve(img1, img2[::-1,::-1], mode='same')\r\n\r\n maximum = numpy.unravel_index(numpy.argmax(convolution), convolution.shape)\r\n\r\n return (maximum[1] - self.getWidth()/2, maximum[0] - self.getHeight()/2)\r\n else:\r\n raise Exception(\"Dimensions of image ({}, {}) and reference image ({}, {}) must match for convolution.\".format(self.getWidth(), self.getHeight(), referenceImage.getWidth(), referenceImage.getHeight()))\r\n\r\n def getShiftedPixmap(self, xShift, yShift):\r\n return ndimage.interpolation.shift(self.px, (int(xShift), int(yShift)), mode='nearest')\r\n\r\n def accumulate(self, addImg, compensateShift=False, roiX0=None, roiY0=None, roiX1=None, roiY1=None):\r\n if (compensateShift == True) and (self.n_accumulations > 0):\r\n shift = (0, 0)\r\n\r\n if (roiX0 is None) or (roiY0 is None) or (roiX1 is None) or (roiY1 is None):\r\n shift = self.calcRelativeShift(addImg)\r\n else:\r\n # Crop image to drift ROI,\r\n croppedRef = copy.deepcopy(self)\r\n croppedRef.crop(x0=roiX0, y0=roiY0, x1=roiX1, y1=roiY1)\r\n\r\n croppedImg = copy.deepcopy(addImg)\r\n croppedImg.crop(x0=roiX0, y0=roiY0, x1=roiX1, y1=roiY1)\r\n\r\n shift = croppedImg.calcRelativeShift(croppedRef)\r\n\r\n log(\"Shift: {}\".format(shift))\r\n shiftedPixMap = addImg.getShiftedPixmap(shift[1], shift[0])\r\n addImg.setPixelMap(shiftedPixMap)\r\n\r\n if self.n_accumulations == 0:\r\n self.setPixelMap(addImg.getPixelMap())\r\n else:\r\n if (self.dimensionsMatch(addImg)):\r\n self.px += addImg.getPixelMap()\r\n else:\r\n raise Exception(\"Current pixel dimensions ({currentX}x{currentY}) don't match dimensions of new file ({newX}x{newY}): {filename}\".format(currentX=self.getWidth(), currentY=self.getHeight(), newX=addImg.getWidth(), newY=addImg.getHeight(), filename=addImg.inputFile.getFilename()))\r\n\r\n self.n_accumulations += 1\r\n\r\n def resetAccumulations(self):\r\n self.n_accumulations = 0\r\n\r\n def averageAccumulations(self):\r\n if self.n_accumulations > 1:\r\n self.px = self.px / self.n_accumulations\r\n log(\"Accumulated and averaged {} images.\".format(self.n_accumulations))\r\n self.n_accumulations = 1\r\n\r\n def applyDark(self, dark):\r\n \"\"\" Apply dark image correction (offset). \"\"\"\r\n if self.dimensionsMatch(dark):\r\n self.px = self.px - dark.getPixelMap()\r\n else:\r\n raise Exception(\"The dimensions of the image do not match the dimensions of the dark image for offset correction.\")\r\n\r\n def applyFlatfield(self, ref, rescaleFactor=1):\r\n \"\"\" Apply flat field correction (free beam white image / gain correction). \"\"\"\r\n if self.dimensionsMatch(ref):\r\n if(not ref.containsPixelValue(0)): # avoid division by zero\r\n self.px = (self.px / ref.getPixelMap()) * float(rescaleFactor)\r\n else: # avoid division by zero\r\n self.px = (self.px / numpy.clip(ref.getPixelMap(), 0.1, None)) * float(rescaleFactor)\r\n else:\r\n raise Exception(\"The dimensions of the image do not match the dimensions of the flat image for flat field correction.\")\r\n\r\n def verticalProfile(self, xPos):\r\n if xPos < self.getWidth():\r\n return numpy.ravel(self.px[:,xPos])\r\n else:\r\n raise Exception(\"Requested position for vertical profile is out of bounds: x={} in an image that has {} rows.\".format(xPos, self.getWidth()))\r\n\r\n def verticalROIProfile(self, ROI):\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n ROI = ImageROI(0, 0, self.getWidth(), self.getHeight())\r\n\r\n slc = self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1]\r\n\r\n profile = slc.mean(axis=1)\r\n return numpy.ravel(profile)\r\n\r\n def horizontalProfile(self, yPos):\r\n if yPos < self.getHeight():\r\n return self.px[yPos]\r\n else:\r\n raise Exception(\"Requested position for horizontal profile is out of bounds: y={} in an image that has {} rows.\".format(yPos, self.getHeight()))\r\n\r\n def horizontalROIProfile(self, ROI):\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n ROI = ImageROI(0, 0, self.getWidth(), self.getHeight())\r\n\r\n slc = self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1]\r\n\r\n profile = slc.mean(axis=0)\r\n return profile\r\n\r\n def pixelsInShape(self, shape, seedPoint=None, mode='center', calculateWeights=False):\r\n \"\"\" Returns all pixels in the given shape (of class Polygon). \r\n\r\n mode:\r\n 'center' : a pixel's center must be within the shape to be accepted.\r\n 'full' : all corner points of a pixel must be within the shape to be accepted.\r\n 'partial' : only one corner point of a pixel must be within the shape to be accepted.\r\n\r\n calculateWeights:\r\n True : includes weights in returned pixel coordinate tuples,\r\n False : does not include weights in returned pixel coordinate tuples.\r\n \"\"\"\r\n\r\n if seedPoint != None:\r\n seedX = int(round(seedPoint.x))\r\n seedY = int(round(seedPoint.y))\r\n else:\r\n # Start at point p1 of shape:\r\n seedX = int(shape.points[0].x)\r\n seedY = int(shape.points[0].y)\r\n\r\n # Make a map of visited pixels. A visited pixel will get value 1:\r\n visited = numpy.zeros_like(a=self.px, dtype=numpy.dtype('uint8'))\r\n\r\n # Collect all points that belong to the shape in a list:\r\n contributions = []\r\n\r\n stack = [] # stack of pixels to visit\r\n stack.append((seedX, seedY))\r\n\r\n # Add seed's neighors to the stack as well:\r\n for offsetX in [-1, 0, 1]:\r\n for offsetY in [-1, 0, 1]:\r\n if not (offsetX==0 and offsetY==0):\r\n nx = seedX+offsetX\r\n ny = seedY+offsetY\r\n stack.append((nx, ny))\r\n\r\n while len(stack) > 0:\r\n pixel = stack.pop()\r\n x = pixel[0]\r\n y = pixel[1]\r\n\r\n if self.contains(x, y):\r\n if visited[y][x] == 0:\r\n visited[y][x] = 1\r\n\r\n # The pixel coordinate system is shifted by -0.5px against the shape coordinate system. Upper left pixel corner is its coordinate in the shape coordinate system. \r\n inside = False\r\n\r\n # Reserve names but set them up later only when they are needed.\r\n center = None\r\n upperLeft = None\r\n upperRight = None\r\n lowerLeft = None\r\n lowerRight = None\r\n\r\n center = Vector(x+0.5, y+0.5, 0)\r\n\r\n if mode == 'center':\r\n inside = shape.isInside2D(center)\r\n else:\r\n upperLeft = Vector(x, y, 0)\r\n upperRight = Vector(x+1, y, 0)\r\n lowerLeft = Vector(x, y+1, 0)\r\n lowerRight = Vector(x+1, y+1, 0)\r\n\r\n if mode == 'full':\r\n inside = shape.isInside2D(upperLeft) and shape.isInside2D(upperRight) and shape.isInside2D(lowerLeft) and shape.isInside2D(lowerRight)\r\n elif mode == 'partial':\r\n inside = True\r\n calculateWeights = True\r\n \r\n if inside:\r\n if calculateWeights:\r\n # Calculate pixel weight from the area of the clipped pixel:\r\n pixelPolygon = Polygon(upperLeft, upperRight, lowerRight, lowerLeft) # Clockwise order because pixel CS is y-flipped.\r\n\r\n clippedPixel = pixelPolygon.clip(shape)\r\n\r\n weight = clippedPixel.area()\r\n\r\n if weight > 0:\r\n contributions.append((x, y, weight))\r\n else:\r\n continue\r\n else:\r\n contributions.append((x, y, 0))\r\n\r\n # Now add neighbors to the stack:\r\n for offsetX in [-1, 0, 1]:\r\n for offsetY in [-1, 0, 1]:\r\n if not (offsetX==0 and offsetY==0):\r\n nx = x+offsetX\r\n ny = y+offsetY\r\n stack.append((nx, ny))\r\n\r\n return contributions\r\n\r\n @staticmethod\r\n def getPixelWeight(x, y, clipPolygon):\r\n # Calculate pixel weight from the area of the clipped pixel:\r\n upperLeft = Vector2D(x, y)\r\n upperRight = Vector2D(x+1, y)\r\n lowerLeft = Vector2D(x, y+1)\r\n lowerRight = Vector2D(x+1, y+1)\r\n pixelPolygon = Polygon(upperLeft, upperRight, lowerRight, lowerLeft) # Clockwise order because pixel CS is y-flipped.\r\n\r\n clippedPixel = pixelPolygon.clip(clipPolygon)\r\n weight = clippedPixel.area()\r\n\r\n return weight\r\n\r\n def meanGVinBin_polygonClipping(self, binCenter, sUnit, tUnit, sBoundary, tBoundary, binShape, weightFunction):\r\n \"\"\" Returns all pixels in the bin on the given vector s.\r\n\r\n binCenter: center of bin in world CS\r\n s: unit vector along profile axis\r\n t: unit vector along width axis\r\n \"\"\"\r\n\r\n roi_x0, roi_y0, roi_x1, roi_y1 = binShape.getBoundingBox()\r\n\r\n # Create a map with pixels' distances to the bin:\r\n # (measured parallel to s vector):\r\n roi_height = roi_y1 - roi_y0\r\n roi_width = roi_x1 - roi_x0\r\n\r\n roi_xaxis = numpy.linspace(start=roi_x0, stop=roi_x1, num=roi_width+1, endpoint=True, dtype=numpy.dtype('float64'))\r\n roi_yaxis = numpy.linspace(start=roi_y0, stop=roi_y1, num=roi_height+1, endpoint=True, dtype=numpy.dtype('float64'))\r\n\r\n roi_gridx, roi_gridy = numpy.meshgrid(roi_xaxis, roi_yaxis)\r\n\r\n # Shift by half a pixel, because they must represent\r\n # pixel centers in shape coordinate system. Also,\r\n # origin should be the bin center:\r\n roi_gridx = roi_gridx + 0.5 - binCenter.x\r\n roi_gridy = roi_gridy + 0.5 - binCenter.y\r\n\r\n # Transform coordinates into bin coordinate system (s and t axes):\r\n bin_grid_dist_s = numpy.abs(roi_gridx*sUnit.x + roi_gridy*sUnit.y)\r\n #bin_grid_dist_t = numpy.abs(roi_gridx*tUnit.x + roi_gridy*tUnit.y)\r\n\r\n # Set those that are too far from bin center in s and t direction to zero:\r\n bin_grid_dist_s = numpy.where(bin_grid_dist_s < sBoundary, bin_grid_dist_s, 0)\r\n #bin_grid_dist_t = numpy.where(bin_grid_dist_t < tBoundary, bin_grid_dist_t, 0)\r\n #bin_grid_dist_mul = bin_grid_dist_s * bin_grid_dist_t\r\n #pixel_indices = numpy.nonzero(bin_grid_dist_mul)\r\n pixel_indices = numpy.nonzero(bin_grid_dist_s)\r\n pixels_x = pixel_indices[1] + roi_x0\r\n pixels_y = pixel_indices[0] + roi_y0\r\n\r\n weights = weightFunction(pixels_x, pixels_y, binShape) # vectorized getPixelWeight()\r\n\r\n gvWeighted = self.px[pixels_y,pixels_x] * weights\r\n weightSum = numpy.sum(weights)\r\n meanGV = 0\r\n if weightSum > 0:\r\n meanGV = numpy.sum(gvWeighted) / weightSum\r\n\r\n return meanGV\r\n\r\n def meanGVinBin(self, binCenter, sUnit, tUnit, sBoundary, tBoundary, binShape, weightFunction):\r\n \"\"\" Returns all pixels in the bin on the given vector s.\r\n\r\n binCenter: center of bin in world CS\r\n s: unit vector along profile axis\r\n t: unit vector along width axis\r\n \"\"\"\r\n\r\n roi_x0, roi_y0, roi_x1, roi_y1 = binShape.getBoundingBox()\r\n\r\n # Create a map with pixels' distances to the bin:\r\n # (measured parallel to s vector):\r\n roi_height = roi_y1 - roi_y0\r\n roi_width = roi_x1 - roi_x0\r\n\r\n roi_xaxis = numpy.linspace(start=roi_x0, stop=roi_x1, num=roi_width+1, endpoint=True, dtype=numpy.dtype('float64'))\r\n roi_yaxis = numpy.linspace(start=roi_y0, stop=roi_y1, num=roi_height+1, endpoint=True, dtype=numpy.dtype('float64'))\r\n\r\n roi_gridx, roi_gridy = numpy.meshgrid(roi_xaxis, roi_yaxis)\r\n\r\n # Shift by half a pixel, because they must represent\r\n # pixel centers in shape coordinate system. Also,\r\n # origin should be the bin center:\r\n roi_gridx = roi_gridx + 0.5 - binCenter.x\r\n roi_gridy = roi_gridy + 0.5 - binCenter.y\r\n\r\n # Transform coordinates into bin coordinate system (s and t axes):\r\n bin_grid_dist_s = numpy.abs(roi_gridx*sUnit.x + roi_gridy*sUnit.y)\r\n #bin_grid_dist_t = numpy.abs(roi_gridx*tUnit.x + roi_gridy*tUnit.y)\r\n\r\n # Set those that are too far from bin center in s and t direction to zero:\r\n #bin_grid_dist_s = numpy.where(bin_grid_dist_s < sBoundary, bin_grid_dist_s, 0)\r\n #bin_grid_dist_t = numpy.where(bin_grid_dist_t < tBoundary, bin_grid_dist_t, 0)\r\n #bin_grid_dist_mul = bin_grid_dist_s * bin_grid_dist_t\r\n #pixel_indices = numpy.nonzero(bin_grid_dist_mul)\r\n\r\n pixel_indices = numpy.nonzero(bin_grid_dist_s < sBoundary)\r\n weights = bin_grid_dist_s[pixel_indices]\r\n pixels_x = pixel_indices[1] + roi_x0\r\n pixels_y = pixel_indices[0] + roi_y0\r\n\r\n weights = weightFunction(pixels_x, pixels_y, binShape) # vectorized getPixelWeight()\r\n\r\n gvWeighted = self.px[pixels_y,pixels_x] * weights\r\n weightSum = numpy.sum(weights)\r\n meanGV = 0\r\n if weightSum > 0:\r\n meanGV = numpy.sum(gvWeighted) / weightSum\r\n\r\n return meanGV\r\n\r\n \"\"\"\r\n def lineProfile_projectPixelsIntoProfileBins(self, x0, y0, x1, y1, width=1, resolution=1):\r\n # Vector pointing in direction of the requested line:\r\n s = Vector(x1-x0+1, y1-y0+1, 0) # +1 to fully include pixel (x1, y1)\r\n\r\n # Calculate vector t, perpendicular to s: t = s x z\r\n z = Vector(0, 0, 1) \r\n t = s.cross(z)\r\n t.makeUnitVector()\r\n t.scale(0.5*width)\r\n\r\n # Define a rectangle along the line and its width, separated into two triangles.\r\n origin = Vector(x0, y0, 0)\r\n A = origin - t\r\n B = origin + s - t\r\n C = origin + s + t\r\n D = origin + t\r\n\r\n rect = Polygon(A, B, C, D)\r\n\r\n print(\"s: {}\".format(s))\r\n print(\"t: {}\".format(t))\r\n\r\n print(rect)\r\n\r\n ceilLength = math.ceil(s.length())\r\n\r\n nSamples = int( ceilLength / resolution ) + 1 # +1 for endpoint\r\n\r\n # Set a seed point at the center of the rectangle:\r\n t.scale(0.5)\r\n s.scale(0.5)\r\n seed = A + t + s\r\n\r\n # Make a list of unique pixel coordinates within this rectangle:\r\n pixelsInRect = self.pixelsInShape(shape=rect, seedPoint=seed)\r\n\r\n # Create a histogram:\r\n sPositions, sStepSize = numpy.linspace(start=0, stop=ceilLength, num=nSamples, endpoint=True, retstep=True)\r\n sCounts = numpy.zeros_like(a=sPositions, dtype=numpy.dtype('float64')) # Number of contributions, for correct re-normalization, same datatype for efficiency during division later on...\r\n sSum = numpy.zeros_like(a=sPositions, dtype=numpy.dtype('float64')) # The sum of all grey value contributions\r\n\r\n # Make s a unit vector to correctly calculate projections using the dot product:\r\n s.makeUnitVector()\r\n\r\n # print(\"shape of positions: {}\".format(numpy.shape(sPositions)))\r\n\r\n print(\"{} pixels in rect.\".format(len(pixelsInRect)))\r\n\r\n offset = Vector(0.5, 0.5, 0)\r\n\r\n for pixel in pixelsInRect:\r\n # Project this pixel onto the s vector (pointing in direction of the line):\r\n\r\n # Move to line origin:\r\n p = pixel - origin + offset\r\n\r\n # Position on s axis:\r\n sPos = p.dot(s)\r\n\r\n # Find bin where this grey value should be counted:\r\n binPos = int(math.floor(sPos / sStepSize))\r\n\r\n #print(\"({x}, {y}): sPos: {spos}, binPos: {binpos}\".format(x=p.x, y=p.y, spos=sPos, binpos=binPos))\r\n\r\n sCounts[binPos] += 1\r\n sSum[binPos] += self.getPixel(int(pixel.x), int(pixel.y))\r\n\r\n # Replace zero counts by 1 to avoid div by zero:\r\n sCounts[sCounts==0] = 1\r\n\r\n sProfile = sSum / sCounts\r\n\r\n return sProfile, sPositions, sStepSize\r\n \"\"\"\r\n\r\n def lineProfile(self, x0, y0, x1, y1, width=1, resolution=1):\r\n \"\"\" Find line profile by adding weighted contributions of pixel grey values\r\n into bins of size (width x resolution).\r\n\r\n We always work in the 'shape coordinate system' with its origin\r\n at (0, 0) in the upper left corner.\r\n Center of pixel (0, 0) has shape CS coordinates (0.5, 0.5).\r\n\r\n x0, y0, x1 and y1 are shape coordinates.\r\n\r\n Returned 'sPositions' array contains bin center positions.\r\n \"\"\"\r\n\r\n # Vector pointing in direction of the requested line:\r\n s = Vector(x1-x0, y1-y0, 0)\r\n\r\n # Calculate vector t, perpendicular to s: t = s x z\r\n z = Vector(0, 0, 1) \r\n t = s.cross(z)\r\n t.makeUnitVector()\r\n\r\n # Convert to 2D vectors:\r\n s = Vector2D(s.x, s.y)\r\n t = Vector2D(t.x, t.y)\r\n\r\n tUnit = copy.deepcopy(t)\r\n\r\n t.scale(0.5*width) # t points from line origin half way in direction of width\r\n\r\n # Define a rectangle along the line and its width.\r\n origin = Vector2D(x0, y0)\r\n\r\n nSamples = math.ceil( s.length() / resolution ) #+ 1 # +1 for endpoint\r\n ceilLength = nSamples * resolution\r\n\r\n # Create a histogram:\r\n sPositions, sStepSize = numpy.linspace(start=0, stop=ceilLength, num=nSamples, endpoint=False, retstep=True)\r\n sProfile = numpy.zeros_like(a=sPositions, dtype=numpy.dtype('float64')) # Grey value profile\r\n\r\n # Create a unit vector in s direction:\r\n sUnit = copy.deepcopy(s)\r\n sUnit.makeUnitVector()\r\n\r\n # Half a unit vector:\r\n binUnitHalf = copy.deepcopy(sUnit)\r\n binUnitHalf.scale(0.5*resolution)\r\n\r\n # Make s the length of a bin step (i.e. resolution unit)\r\n s.makeUnitVector()\r\n s.scale(resolution)\r\n\r\n rectPos = Vector2D(0, 0)\r\n\r\n # A pixel center can be this far from the binPos (bin center)\r\n # in s and t direction to still be accepted:\r\n sBoundary = (resolution/2) + pixelHalfDiagonal\r\n tBoundary = (width/2) + pixelHalfDiagonal\r\n\r\n # Vectorize the pixel weight function:\r\n weightFunction = numpy.vectorize(self.getPixelWeight, otypes=[numpy.float64])\r\n\r\n i = 0\r\n for b in range(nSamples):\r\n print(\"\\rCalculating line profile... {:.1f}%\".format(100.0*i/nSamples), end=\"\")\r\n i += 1\r\n # Bin position on s axis:\r\n sPos = resolution*b\r\n\r\n # Construct a vector to the left point of the bin on the s axis:\r\n rectPos.setx(sUnit.x)\r\n rectPos.sety(sUnit.y)\r\n rectPos.scale(sPos)\r\n rectPos.add(origin)\r\n\r\n binPos = rectPos + binUnitHalf\r\n\r\n # Construct a rectangle that contains the area of this bin:\r\n A = rectPos - t\r\n B = rectPos + s - t\r\n C = rectPos + s + t\r\n D = rectPos + t\r\n\r\n binRect = Polygon(D, C, B, A) # Clockwise order because pixel CS is y-flipped.\r\n\r\n # Get all pixels and their relative areas in this bin:\r\n #pixelsInBin = self.pixelsInShape(shape=binRect, seedPoint=rectPos, mode='partial', calculateWeights=True)\r\n\r\n meanGV = self.meanGVinBin(binCenter=binPos, sUnit=sUnit, tUnit=tUnit, sBoundary=sBoundary, tBoundary=tBoundary, binShape=binRect, weightFunction=weightFunction)\r\n\r\n sProfile[b] = meanGV\r\n\r\n # Shift the sPositions by half a bin size so that they represent bin centers:\r\n sPositions += 0.5*resolution\r\n\r\n print(\"\\rCalculating line profile... 100% \")\r\n return sProfile, sPositions, sStepSize\r\n \r\n def clip(self, lower, upper):\r\n \"\"\" Clip grey values to given boundary interval. \"\"\"\r\n self.px = numpy.clip(self.px, lower, upper)\r\n\r\n def crop(self, x0, y0, x1, y1):\r\n \"\"\" Crop to given box (x0, y0)--(x1, y1). \"\"\"\r\n if x0 > x1:\r\n x0,x1 = x1,x0\r\n\r\n if y0 > y1:\r\n y0,y1 = y1,y0\r\n\r\n if y1 > self.getHeight() or x1 > self.getWidth():\r\n raise Exception(\"Trying to crop beyond image boundaries.\")\r\n\r\n self.boundingBoxX0 += x0\r\n self.boundingBoxY0 += y0\r\n\r\n self.px = self.px[int(y0):int(y1),int(x0):int(x1)] # Array has shape [y][x]\r\n self.width = int(x1 - x0)\r\n self.height = int(y1 - y0)\r\n\r\n def cropBorder(self, top=0, bottom=0, left=0, right=0):\r\n \"\"\" Crop away given border around image. \"\"\"\r\n x0 = int(left)\r\n y0 = int(top)\r\n x1 = self.getWidth() - int(right)\r\n y1 = self.getHeight() - int(bottom)\r\n\r\n self.crop(x0, y0, x1, y1)\r\n\r\n def cropROIaroundPoint(self, centerX, centerY, roiWidth, roiHeight):\r\n \"\"\" Crop a region of interest, centerd around given point. \"\"\"\r\n\r\n if roiWidth < 0:\r\n roiWidth = abs(roiWidth)\r\n if roiHeight < 0:\r\n roiHeight = abs(roiHeight)\r\n if roiWidth == 0 or roiHeight == 0:\r\n raise Exception(\"The region of interest should not be a square of size 0.\")\r\n\r\n x0 = int(math.floor(centerX - roiWidth/2))\r\n x1 = int(math.ceil(centerX + roiWidth/2))\r\n y0 = int(math.floor(centerY - roiHeight/2))\r\n y1 = int(math.ceil(centerY + roiHeight/2))\r\n\r\n if x1<0 or y1<0:\r\n raise Exception(\"Right or lower boundary for ROI (x1 or y1) cannot be below zero.\")\r\n\r\n if roiWidth>self.getWidth() or roiHeight>self.getHeight():\r\n raise Exception(\"Size of the ROI is bigger than the image size. ROI: \" + str(roiWidth) + \" x \" + str(roiHeight) + \". Image: \" + str(self.getWidth()) + \" x \" + str(self.getHeight())) \r\n if x0 < 0:\r\n x1 += abs(x0)\r\n x0 = 0\r\n\r\n if y0 < 0:\r\n y1 += abs(y0)\r\n y0 = 0\r\n\r\n if x1 >= self.getWidth():\r\n x1 = self.getWidth()\r\n x0 = x1 - roiWidth\r\n\r\n if y1 >= self.getHeight():\r\n y1 = self.getHeight()\r\n y0 = y1 - roiHeight\r\n\r\n # These should match roiWidth and roiHeight...\r\n roiDimX = x1 - x0\r\n roiDimY = y1 - y0\r\n\r\n self.crop(x0, y0, x1, y1)\r\n return x0, x1, y0, y1\r\n\r\n def bin(self, binSizeX, binSizeY, operation=\"mean\"):\r\n \"\"\" Decrease image size by merging pixels using specified operation.\r\n Valid operations: mean, max, min, sum. \"\"\"\r\n\r\n if binSizeX is None:\r\n binSizeX = 1\r\n\r\n if binSizeY is None:\r\n binSizeY = 1\r\n\r\n if (binSizeX > 1) or (binSizeY > 1):\r\n # Picture dimensions must be integer multiple of binning factor. If not, crop:\r\n overhangX = math.fmod(int(self.getWidth()), binSizeX)\r\n overhangY = math.fmod(int(self.getHeight()), binSizeY)\r\n if (overhangX > 0) or (overhangY > 0):\r\n #log(\"Cropping before binning because of nonzero overhang: (\" + str(overhangX) + \", \" + str(overhangY) + \")\")\r\n self.crop(0, 0, self.getWidth()-int(overhangX), self.getHeight()-int(overhangY))\r\n\r\n newWidth = self.width // binSizeX\r\n newHeight = self.height // binSizeY\r\n\r\n # Shift pixel values that need to be binned together into additional axes:\r\n binshape = (newHeight, binSizeY, newWidth, binSizeX)\r\n self.px = self.px.reshape(binshape)\r\n \r\n # Perform binning operation along binning axes (axis #3 and #1).\r\n # These axes will be collapsed to contain only the result\r\n # of the binning operation.\r\n if operation == \"mean\":\r\n self.px = self.px.mean(axis=(3, 1))\r\n elif operation == \"sum\":\r\n self.px = self.px.sum(axis=(3, 1))\r\n elif operation == \"max\":\r\n self.px = self.px.max(axis=(3, 1))\r\n elif operation == \"min\":\r\n self.px = self.px.min(axis=(3, 1))\r\n elif operation is None:\r\n raise Exception(\"No binning operation specified.\")\r\n else:\r\n raise Exception(\"Invalid binning operation: {}.\".format(operation))\r\n\r\n self.setWidth(newWidth)\r\n self.setHeight(newHeight)\r\n\r\n # Resolution assumes isotropic pixels...\r\n self.resolution *= binSizeX\r\n\r\n def addImage(self, other):\r\n \"\"\" Add pixel values from another image to this image. \"\"\"\r\n if self.dimensionsMatch(other):\r\n self.px = self.px + other.getPixelMap()\r\n\r\n def subtractImage(self, other):\r\n \"\"\" Subtract pixel values of another image from this image. \"\"\"\r\n if self.dimensionsMatch(other):\r\n self.px = self.px - other.getPixelMap()\r\n\r\n def multiplyImage(self, other):\r\n \"\"\" Multiply pixel values from another image to this image. \"\"\"\r\n if self.dimensionsMatch(other):\r\n self.px = self.px * other.getPixelMap()\r\n\r\n def divideImage(self, other):\r\n \"\"\" Multiply pixel values by another image. \"\"\"\r\n if self.dimensionsMatch(other):\r\n self.px = self.px / other.getPixelMap()\r\n\r\n def square(self):\r\n self.px *= self.px\r\n\r\n def sqrt(self):\r\n self.px = numpy.sqrt(self.px)\r\n\r\n def add(self, value):\r\n self.px += value\r\n\r\n def subtract(self, value):\r\n self.px -= value\r\n\r\n def multiply(self, value):\r\n self.px *= value\r\n\r\n def divide(self, value):\r\n \"\"\" Divide all pixels values by given scalar value. \"\"\"\r\n self.px = self.px / float(value)\r\n\r\n def invert(self, min=0, maximum=65535):\r\n self.px = maximum - self.px\r\n\r\n def renormalize(self, newMin=0, newMax=1, currentMin=None, currentMax=None, ROI=None):\r\n \"\"\"Renormalization of grey values from (currentMin, Max) to (newMin, Max) \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n ROI = ImageROI(0, 0, self.getWidth(), self.getHeight())\r\n\r\n slc = self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1]\r\n\r\n if currentMin is None:\r\n currentMin = slc.min()\r\n\r\n if currentMax is None:\r\n currentMax = slc.max()\r\n\r\n if(currentMax != currentMin):\r\n slc = (slc-currentMin)*(newMax-newMin)/(currentMax-currentMin)+newMin\r\n self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1] = slc\r\n else:\r\n slc = slc*0\r\n self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1] = slc\r\n #raise Exception(\"Division by zero upon renormalization: currentMax=currentMin={}\".format(currentMax))\r\n\r\n def map_lookup(self, gv, gv_from, gv_to):\r\n \"\"\" Return new grey value for given grey value 'gv'. Helper function for self.map().\"\"\"\r\n\r\n if gv in gv_from:\r\n # Given grey value is defined in 'from' list:\r\n return gv_to[numpy.where(gv_from==gv)]\r\n else:\r\n # Linear interpolation:\r\n a = 0 # left index of interpolation region\r\n if len(gv_from) > 2:\r\n for i in range(len(gv_from)-2):\r\n if gv_from[i+1] > gv:\r\n break\r\n\r\n a += 1\r\n\r\n b = a + 1 # right index of interpolation region\r\n\r\n xa = gv_from[a]\r\n xb = gv_from[b]\r\n ya = gv_to[a]\r\n yb = gv_to[b] \r\n\r\n # Slope of linear function:\r\n m = (yb-ya) / (xb-xa)\r\n\r\n # y axis intersection point (\"offset\"):\r\n n = yb - m*xb\r\n\r\n # newly assigned grey value:\r\n return (m*gv + n)\r\n\r\n\r\n def map(self, gv_from, gv_to, bins=1000):\r\n \"\"\" Applies a lookup table (LUT map) to convert image grey values\r\n according to given assignment tables (two numpy lists).\r\n\r\n gv_from: numpy array of given grey values (in current image)\r\n gv_to: numpy array of assigned grey values (for converted image)\r\n\r\n Linear interpolation will take place for gaps in lookup table.\r\n \"\"\"\r\n\r\n if len(gv_from) == len(gv_to):\r\n if len(gv_from) > 1:\r\n gvMin = self.min()\r\n gvMax = self.max()\r\n\r\n # Left position of each bin:\r\n positions, gvStepsize = numpy.linspace(start=gvMin, stop=gvMax, num=bins+1, endpoint=True, dtype=numpy.float64, retstep=True)\r\n\r\n # New grey value for each left position:\r\n mappingFunction = numpy.vectorize(pyfunc=self.map_lookup, excluded={1, 2})\r\n newGV = mappingFunction(positions, gv_from, gv_to)\r\n\r\n # Differences in newGV:\r\n deltaGV = numpy.diff(newGV, n=1)\r\n\r\n\r\n # Prepare parameters m (slope) and n (offset) for linear\r\n # interpolation functions of each bin:\r\n slopes = numpy.zeros(bins, dtype=numpy.float64)\r\n offsets = numpy.zeros(bins, dtype=numpy.float64)\r\n\r\n slopes = deltaGV / gvStepsize\r\n\r\n #print(\"newGV: {}\".format(numpy.shape(newGV)))\r\n #print(\"slopes: {}\".format(numpy.shape(slopes)))\r\n #print(\"positions: {}\".format(numpy.shape(positions)))\r\n\r\n offsets = newGV[1:] - slopes*positions[1:]\r\n\r\n inverse_stepsize = 1.0 / gvStepsize\r\n\r\n maxIndices = numpy.full(shape=numpy.shape(self.px), fill_value=bins-1, dtype=numpy.uint32)\r\n bin_indices = numpy.minimum(maxIndices, numpy.floor((self.px - gvMin) * inverse_stepsize).astype(numpy.uint32))\r\n\r\n m_px = slopes[bin_indices]\r\n n_px = offsets[bin_indices]\r\n\r\n self.px = m_px*self.px + n_px\r\n else:\r\n raise Exception(\"image.map(): At least two mappings are required in the grey value assignment lists.\")\r\n else:\r\n raise Exception(\"image.map(): gv_from must have same length as gv_to.\")\r\n\r\n def stats(self, ROI=None):\r\n \"\"\" Image or ROI statistics. Mean, Standard Deviation \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n ROI = ImageROI(0, 0, self.getWidth(), self.getHeight())\r\n\r\n slc = self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1]\r\n\r\n mean = numpy.mean(slc)\r\n sigma = numpy.std(slc)\r\n snr = 0\r\n if sigma > 0:\r\n snr = mean / sigma\r\n\r\n return {\"mean\": mean, \"stddev\": sigma, \"snr\": snr, \"width\": ROI.width(), \"height\": ROI.height(), \"area\": ROI.area()}\r\n\r\n def noise(self, sigma):\r\n \"\"\" Add noise to image.\r\n\r\n Gaussian noise:\r\n sigma: standard deviation (scalar or array that matches image size)\r\n \"\"\"\r\n\r\n rng = default_rng()\r\n self.px += rng.normal(loc=0, scale=sigma, size=numpy.shape(self.px))\r\n\r\n def smooth_gaussian(self, sigma):\r\n self.px = ndimage.gaussian_filter(input=self.px, sigma=sigma, order=0, )\r\n\r\n def applyMedian(self, kernelSize=1):\r\n if kernelSize > 1:\r\n self.px = ndimage.median_filter(self.px, int(kernelSize))\r\n\r\n def applyThreshold(self, threshold, lower=0, upper=65535):\r\n self.px = numpy.where(self.px > threshold, upper, lower).astype(self.getInternalDataType())\r\n\r\n def renormalizeToMeanAndStdDev(self, mean, stdDev, ROI=None):\r\n \"\"\" Renormalize grey values such that mean=30000, (mean-stdDev)=0, (mean+stdDev)=60000 \"\"\"\r\n\r\n # Take full image if no ROI is given\r\n if ROI==None:\r\n ROI = ImageROI(0, 0, self.getWidth(), self.getHeight())\r\n\r\n self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1] = ((self.px[ROI.y0:ROI.y1, ROI.x0:ROI.x1] - mean)/stdDev)*30000 + 30000\r\n\r\n def edges_sobel(self):\r\n # Sobel edge detection:\r\n edgesX = ndimage.sobel(self.px, axis=0, mode='nearest')\r\n edgesY = ndimage.sobel(self.px, axis=1, mode='nearest')\r\n return numpy.sqrt(edgesX**2 + edgesY**2)\r\n\r\n def edges_canny(self):\r\n # The 'feature' package from scikit-image,\r\n # only needed for Canny edge detection, when used instead of Sobel.\r\n from skimage.feature import canny # Canny edge detection\r\n\r\n # Canny edge detection. Needs 'scikit-image' package. from skimage import feature\r\n return canny(self.px)\r\n\r\n def filter_edges(self, mode='sobel'):\r\n if(mode == 'sobel'):\r\n self.px = self.edges_sobel()\r\n elif(mode == 'canny'):\r\n self.px = self.edges_canny()\r\n else:\r\n raise Exception(\"Valid edge detection modes: 'sobel'\")\r\n \r\n # Rescale:\r\n self.px = self.px.astype(self.getInternalDataType())\r\n #self.thresholding(0) # black=0, white=65535\r\n\r\n def cleanPatches(self, min_patch_area=None, max_patch_area=None, remove_border_patches=False, aspect_ratio_tolerance=None):\r\n iterationStructure = ndimage.generate_binary_structure(rank=2, connectivity=2) # apply to rank=2D array, only nearest neihbours (connectivity=1) or next nearest neighbours as well (connectivity=2)\r\n\r\n labelField, nPatches = ndimage.label(self.px, iterationStructure)\r\n nCleaned = 0\r\n nRemaining = 0\r\n patchGeometry = []\r\n\r\n if nPatches == 0:\r\n log(\"Found no structures\")\r\n else:\r\n self.erase()\r\n\r\n areaMin = 0\r\n if(min_patch_area != None):\r\n areaMin = min_patch_area\r\n \r\n areaMax = self.getWidth() * self.getHeight()\r\n if(max_patch_area != None):\r\n areaMax = max_patch_area\r\n\r\n areaMin = areaMin / (self.getResolution()**2)\r\n areaMax = areaMax / (self.getResolution()**2)\r\n\r\n for i in range(1, nPatches+1):\r\n patchCoordinates = numpy.nonzero(labelField==i)\r\n\r\n # Check patch size:\r\n nPatchPixels = len(patchCoordinates[0])\r\n if nPatchPixels < areaMin or nPatchPixels > areaMax: # Black out areas that are too small or too big for a circle\r\n nCleaned += 1\r\n continue\r\n \r\n coordinatesX = patchCoordinates[1]\r\n coordinatesY = patchCoordinates[0]\r\n\r\n left = numpy.amin(coordinatesX)\r\n right = numpy.amax(coordinatesX)\r\n top = numpy.amin(coordinatesY)\r\n bottom= numpy.amax(coordinatesY)\r\n\r\n if remove_border_patches: \r\n if((left==0) or (top==0) or (right==self.getWidth()-1) or (bottom==self.getHeight()-1)):\r\n nCleaned += 1\r\n continue\r\n\r\n # An ideal circle should have an aspect ratio of 1:\r\n if aspect_ratio_tolerance != None:\r\n aspectRatio = 0\r\n if(top != bottom):\r\n aspectRatio = abs(right-left) / abs(bottom-top)\r\n\r\n if abs(1-aspectRatio) > aspect_ratio_tolerance: # This is not a circle\r\n nCleaned += 1\r\n log(\"Aspect ratio {ar:.3f} doesn't meet aspect ratio tolerance |1-AR|={tolerance:.3f}\".format(ar=aspectRatio, tolerance=aspect_ratio_tolerance))\r\n continue\r\n\r\n # Add patch center as its coordinate:\r\n patchGeometry.append(((right+left)/2.0, (bottom+top)/2.0, right-left, bottom-top))\r\n\r\n self.px[patchCoordinates] = 1\r\n nRemaining += 1\r\n\r\n return nPatches, nCleaned, nRemaining, patchGeometry\r\n\r\n def fitCircle(self):\r\n # Linear least squares method by:\r\n # I. D. Coope,\r\n # Circle Fitting by Linear and Nonlinear Least Squares,\r\n # Journal of Optimization Theory and Applications, 1993, Volume 76, Issue 2, pp 381-388\r\n # https://doi.org/10.1007/BF00939613\r\n\r\n coordinates = numpy.nonzero(self.px)\r\n circlePixelsX = coordinates[1]\r\n circlePixelsY = coordinates[0]\r\n nPoints = len(circlePixelsX)\r\n circlePixels1 = numpy.ones(nPoints)\r\n\r\n # Create the matrix B for the system of linear equations:\r\n matrixB = numpy.array((circlePixelsX, circlePixelsY, circlePixels1))\r\n matrixB = matrixB.transpose()\r\n\r\n # linear equation to optimize:\r\n # matrix B * result = vector d\r\n d = []\r\n for i in range(nPoints):\r\n d.append(circlePixelsX[i]**2 + circlePixelsY[i]**2)\r\n\r\n vectorD = numpy.array(d)\r\n\r\n results, residuals, rank, s = numpy.linalg.lstsq(matrixB, vectorD, rcond=None)\r\n\r\n centerX = (results[0] / 2.0)\r\n centerY = (results[1] / 2.0)\r\n radius = math.sqrt(results[2] + centerX**2 + centerY**2)\r\n\r\n # Calculate deviation statistics:\r\n differenceSum = 0\r\n minDifference = 99999\r\n maxDifference = 0\r\n for i in range(nPoints):\r\n diff = abs(radius - math.sqrt((centerX - circlePixelsX[i])**2 + (centerY - circlePixelsY[i])**2))\r\n differenceSum += diff\r\n\r\n if minDifference > diff:\r\n minDifference = diff\r\n\r\n if maxDifference < diff:\r\n maxDifference = diff\r\n\r\n meanDifference = differenceSum / nPoints\r\n\r\n return centerX, centerY, radius, meanDifference, minDifference, maxDifference\r\n\r\n def intensityFunction2D(self, x, I0, mu, R, x0): # Lambert-Beer-Law for ball intensity, to fit.\r\n radicand = numpy.power(R,2) - numpy.power((x-x0),2)\r\n \r\n # Avoid root of negative numbers\r\n radicand[radicand < 0] = 0 \r\n\r\n # Huge radicands lead to exp()->0, therefore avoid huge exponentiation:\r\n radicand[radicand > (1400*1400)] = (1400*1400)\r\n\r\n result = I0*numpy.exp(-2.0*mu*numpy.sqrt(radicand))\r\n\r\n return result\r\n\r\n def intensityFunction3D(self, coord, I0, mu, R, x0, y0): # Lambert-Beer-Law for ball intensity, to fit.\r\n if len(coord) == 2:\r\n (x, y) = coord\r\n\r\n radicand = numpy.power(R,2) - numpy.power((x-x0),2) - numpy.power((y-y0),2)\r\n \r\n # Avoid root of negative numbers\r\n radicand[radicand < 0] = 0 \r\n\r\n # Huge radicands lead to exp()->0, therefore avoid huge exponentiation:\r\n radicand[radicand > (1400*1400)] = (1400*1400)\r\n\r\n result = I0 * numpy.exp(-2.0*mu*numpy.sqrt(radicand))\r\n \r\n return result\r\n else:\r\n raise Exception(\"3D Intensity fit function expects a tuple (x,y) for coordinates.\")\r\n\r\n def fitIntensityProfile(self, axis=\"x\", initI0=None, initMu=0.003, initR=250, initX0=None, avgLines=5):\r\n yData = 0\r\n xdata = 0\r\n if initI0 is None:\r\n initI0 = self.max() # Hoping that a median has been applied before.\r\n\r\n if axis == \"x\":\r\n if initX0 is None:\r\n initX0 = self.getWidth() / 2\r\n\r\n startLine = int((self.getHeight() / 2) - math.floor(avgLines/2))\r\n stopLine = int((self.getHeight() / 2) + math.floor(avgLines/2))\r\n\r\n # Accumulate intensity profile along 'avgLines' lines around the center line:\r\n yData = numpy.zeros(self.getWidth(), dtype=self.getInternalDataType())\r\n for l in range(startLine, stopLine+1):\r\n yData += self.px[l,:]\r\n\r\n xData = numpy.linspace(0, self.getWidth()-1, self.getWidth())\r\n\r\n elif axis == \"y\":\r\n if initX0 is None:\r\n initX0 = self.getHeight() / 2\r\n\r\n startLine = int((self.getWidth() / 2) - math.floor(avgLines/2))\r\n stopLine = int((self.getWidth() / 2) + math.floor(avgLines/2))\r\n\r\n # Accumulate intensity profile along 'avgLines' lines around the center line:\r\n yData = numpy.zeros(self.getHeight(), dtype=self.getInternalDataType())\r\n for l in range(startLine, stopLine+1):\r\n yData += self.px[:,l]\r\n\r\n xData = numpy.linspace(0, self.getHeight()-1, self.getHeight())\r\n\r\n else:\r\n raise Exception(\"projectionImage::fitIntensityProfile() needs profile direction to be 'x' or 'y'.\")\r\n\r\n yData = yData / int(avgLines) # average intensity profile\r\n firstGuess = (initI0, initMu, initR, initX0)\r\n\r\n try:\r\n optimalParameters, covariances = optimize.curve_fit(self.intensityFunction2D, xData, yData, p0=firstGuess)\r\n except Exception:\r\n optimalParameters = (None, None, None, None)\r\n\r\n\r\n fittedI0 = optimalParameters[0]\r\n fittedMu = optimalParameters[1]\r\n fittedR = optimalParameters[2]\r\n fittedX0 = optimalParameters[3]\r\n\r\n return fittedI0, fittedMu, fittedR, fittedX0\r\n\r\nclass ImageStack:\r\n \"\"\" Specify an image stack from a single file (RAW chunk) or\r\n a collection of single 2D RAW or TIFF files. \"\"\"\r\n\r\n def __init__(self, filePattern=None, width=None, height=None, dataType=None, byteOrder=None, rawFileHeaderSize=0, rawImageHeaderSize=0, slices=None, startNumber=0, flipByteOrder=False):\r\n self.files = ImageFile(filePattern, dataType, byteOrder, flipByteOrder)\r\n\r\n # Has this stack already been built?\r\n self.built = False\r\n\r\n self.width = width\r\n self.height = height\r\n self.nSlices = slices # number of slices in stack\r\n self.startNumber = startNumber\r\n\r\n # A RAW chunk can contain an overall file header, and\r\n # each image in the stack can contain an image header.\r\n self.rawFileHeaderSize = rawFileHeaderSize\r\n self.rawImageHeaderSize = rawImageHeaderSize\r\n\r\n self._isVolumeChunk = False # Is this a volume chunk or is a file list provided?\r\n\r\n self.fileList = []\r\n self.fileNumbers = [] # store original stack number in file name\r\n\r\n def addStack(self, other):\r\n if (self.width == other.width) and (self.height == other.height):\r\n self.nSlices += other.nSlices\r\n self.fileList.extend(other.fileList)\r\n self.fileNumbers.extend(other.fileNumbers)\r\n else:\r\n raise Exception(\"Error adding stack: image dimensions don't match.\")\r\n\r\n def isVolumeChunk(self):\r\n return self._isVolumeChunk\r\n\r\n def setVolumeChunk(self, isVolumeChunk):\r\n self._isVolumeChunk = isVolumeChunk\r\n\r\n def getFileByteOrder(self):\r\n return self.files.getByteOrder()\r\n\r\n def setFileByteOrder(self, byteOrder):\r\n self.files.setByteOrder(byteOrder)\r\n\r\n def getFileDataType(self):\r\n return self.files.getDataType()\r\n\r\n def setFileDataType(self, dataType):\r\n self.files.setDataType(dataType)\r\n\r\n def doFlipByteOrder(self):\r\n return self.files.doFlipByteOrder()\r\n\r\n def setFlipByteOrder(self, flipByteOrder):\r\n self.files.setFlipByteOrder(flipByteOrder)\r\n\r\n def fileStackInfo(self, filenameString):\r\n \"\"\" Split file pattern into lead & trail text, number of expected digits. \"\"\"\r\n if '%' in filenameString:\r\n # A % sign in the provided file pattern indicates an image stack: e.g. %04d\r\n percentagePosition = filenameString.find(\"%\")\r\n\r\n numberStart = percentagePosition + 1\r\n numberStop = filenameString.find(\"d\", percentagePosition)\r\n\r\n leadText = \"\"\r\n if(percentagePosition > 0):\r\n leadText = filenameString[:percentagePosition]\r\n\r\n trailText = \"\"\r\n if((numberStop+1) < len(filenameString)):\r\n trailText = filenameString[(numberStop+1):]\r\n\r\n if(numberStop > numberStart):\r\n numberString = filenameString[numberStart:numberStop]\r\n if(numberString.isdigit()):\r\n nDigitsExpected = int(numberString)\r\n return leadText, trailText, nDigitsExpected\r\n else:\r\n raise Exception(\"Image stack pattern is wrong. The wildcard for sequential digits in a filename must be %, followed by number of digits, followed by d, e.g. %04d\")\r\n else:\r\n raise Exception(\"Image stack pattern is wrong. The wildcard for sequential digits in a filename must be %, followed by number of digits, followed by d, e.g. %04d\")\r\n\r\n return filenameString, \"\", 0\r\n\r\n def buildStack(self):\r\n \"\"\" Build list of files that match given file name pattern. \"\"\"\r\n self.fileList = []\r\n self.fileNumbers = []\r\n\r\n # Treat projection files\r\n inFilePattern = self.files.getFilename()\r\n inputFolder = os.path.dirname(inFilePattern)\r\n projBasename = os.path.basename(inFilePattern)\r\n\r\n if inputFolder == \"\" or inputFolder is None:\r\n inputFolder = \".\"\r\n\r\n # Check if an image stack is provided:\r\n if('%' not in inFilePattern):\r\n self.fileList.append(inFilePattern)\r\n\r\n if(isTIFF(inFilePattern)): # treat as single TIFF projection \r\n self._isVolumeChunk = False\r\n testImage = Image(inFilePattern)\r\n testImage.read()\r\n self.width = testImage.getWidth()\r\n self.height = testImage.getHeight()\r\n self.nSlices = 1\r\n self.files.setDataType(testImage.inputFile.getDataType())\r\n else: # treat as raw chunk\r\n if (self.width != None) and (self.height != None):\r\n if (self.files.getDataType() != None):\r\n if os.path.isfile(inFilePattern):\r\n self._isVolumeChunk = True\r\n\r\n if (self.nSlices is None):\r\n # Determine number of slices.\r\n fileSizeInBytes = os.path.getsize(inFilePattern)\r\n dataSizeInBytes = fileSizeInBytes - self.rawFileHeaderSize\r\n bytesPerImage = self.rawImageHeaderSize + self.width * self.height * self.files.getDataType().itemsize\r\n\r\n if (dataSizeInBytes >= bytesPerImage):\r\n if (dataSizeInBytes % bytesPerImage) == 0:\r\n self.nSlices = int(dataSizeInBytes / bytesPerImage)\r\n log(\"{} slices found in raw chunk.\".format(self.nSlices))\r\n else:\r\n raise Exception(\"The raw chunk data size ({} bytes, without general file header) is not divisible by the calculated size of a single image ({} bytes, including image header). Therefore, the number of slices cannot be determined. {}\".format(dataSizeInBytes, bytesPerImage, inFilePattern))\r\n else:\r\n raise Exception(\"The raw chunk data size ({} bytes, without general file header) is smaller than the calculated size of a single image ({} bytes, including image header). {}\".format(dataSizeInBytes, bytesPerImage, inFilePattern))\r\n else:\r\n raise Exception(\"File not found: {}\".format(inFilePattern))\r\n else:\r\n raise Exception(\"Please provide the data type of the raw chunk.\")\r\n else:\r\n raise Exception(\"Please provide width and height (in pixels) of the raw chunk.\")\r\n else:\r\n # A % sign in the provided file pattern indicates an image stack: e.g. %04d\r\n leadText, trailText, nDigitsExpected = self.fileStackInfo(projBasename)\r\n\r\n # Get list of files in input folder:\r\n fileList = os.listdir(inputFolder)\r\n fileList.sort()\r\n\r\n nImported = 0\r\n\r\n for f in fileList:\r\n file = inputFolder + \"/\" + f\r\n if os.path.isfile(file):\r\n # Check if filename matches pattern:\r\n if(f.startswith(leadText) and f.endswith(trailText)):\r\n digitText = f[len(leadText):-len(trailText)]\r\n if digitText.isdigit(): # and len(digitText)==nDigitsExpected:\r\n # Pattern matches.\r\n n = int(digitText)\r\n if n >= self.startNumber:\r\n self.fileList.append(file)\r\n self.fileNumbers.append(n)\r\n\r\n nImported += 1\r\n if nImported == self.nSlices:\r\n break\r\n else:\r\n continue\r\n else:\r\n continue\r\n\r\n self.nSlices = len(self.fileList)\r\n\r\n if self.nSlices > 0:\r\n if isTIFF(self.fileList[0]):\r\n testImage = Image(self.fileList[0])\r\n testImage.read()\r\n self.width = testImage.getWidth()\r\n self.height = testImage.getHeight()\r\n self.files.setDataType(testImage.inputFile.getDataType())\r\n\r\n self.built = True\r\n \r\n\r\n def getFilename(self, index=None):\r\n if index != None:\r\n if self._isVolumeChunk:\r\n if len(self.fileList) > 0:\r\n return self.fileList[0]\r\n else:\r\n return None\r\n else:\r\n if len(self.fileList) > index:\r\n return self.fileList[index]\r\n else:\r\n return None\r\n else:\r\n return self.files.getFilename()\r\n\r\n def getFileBasename(self, index=None):\r\n if index != None:\r\n if self._isVolumeChunk:\r\n if len(self.fileList) > 0:\r\n return os.path.basename(self.fileList[0])\r\n else:\r\n return None\r\n else:\r\n if len(self.fileList) > index:\r\n return os.path.basename(self.fileList[index])\r\n else:\r\n return None\r\n else:\r\n return self.files.getFileBasename()\r\n\r\n def setFilename(self, filename):\r\n self.files.setFilename(filename)\r\n\r\n def getImage(self, index, outputFile=None):\r\n \"\"\" Read and return image at position 'index' within the stack. \"\"\"\r\n if index >= 0:\r\n if not self._isVolumeChunk: # read single image file from stack:\r\n if len(self.fileList) > index:\r\n filename = self.fileList[index]\r\n file = ImageFile(filename=filename, dataType=self.getFileDataType(), byteOrder=self.getFileByteOrder(), flipByteOrder=self.doFlipByteOrder())\r\n\r\n img = Image(file, outputFile)\r\n if isTIFF(filename):\r\n img.read()\r\n else:\r\n img.readRAW(self.width, self.height, 0, self.getFileDataType(), self.getFileByteOrder(), self.rawFileHeaderSize, self.rawImageHeaderSize)\r\n return img\r\n else:\r\n raise Exception(\"The requested slice nr. {} is out of bounds, because only {} image files were found.\".format(index, len(self.fileList)))\r\n else: # read slice from volume chunk, obeying start number\r\n if len(self.fileList) > 0:\r\n file = self.fileList[0]\r\n img = Image(file, outputFile)\r\n chunkIndex = index + self.startNumber\r\n if isTIFF(file):\r\n raise Exception(\"Cannot treat 3D TIFFs.\")\r\n else:\r\n img.readRAW(self.width, self.height, chunkIndex, self.getFileDataType(), self.getFileByteOrder(), self.rawFileHeaderSize, self.rawImageHeaderSize)\r\n return img\r\n else:\r\n raise Exception(\"No image file specified to be loaded.\")\r\n else:\r\n raise Exception(\"Negative slice numbers do not exists. {} requested.\".format(index))\r\n\r\n def getMeanImage(self, outputFile=None):\r\n \"\"\" Calculate the mean of all image files. \"\"\"\r\n if self.nSlices > 0:\r\n if self.nSlices > 1:\r\n sumImg = self.getImage(0, outputFile)\r\n for i in range(1, self.nSlices):\r\n print(\"\\rMean Image: summing up {i}/{n}\".format(i=(i+1), n=self.nSlices), end='')\r\n sumImg.addImage(self.getImage(i, outputFile))\r\n \r\n\r\n print(\"\")\r\n\r\n sumImg.divide(self.nSlices)\r\n return sumImg\r\n else:\r\n return self.getImage(0, outputFile)\r\n else:\r\n return None\r\n\r\n def getStdDevImage(self, meanImg=None, outputFile=None):\r\n \"\"\" Calculate the pixel-wise RMS of the image files. \"\"\"\r\n if self.nSlices > 0:\r\n if self.nSlices > 1:\r\n if meanImg is None:\r\n meanImg = self.getMeanImage(outputFile)\r\n\r\n sumImg = Image()\r\n sumImg.shapeLike(otherImg=meanImg)\r\n\r\n for i in range(0, self.nSlices):\r\n print(\"\\rRMSD Image: component {i}/{n}\".format(i=i+1, n=self.nSlices), end='')\r\n sqDiffImg = self.getImage(i, outputFile)\r\n sqDiffImg.subtractImage(meanImg)\r\n sqDiffImg.square()\r\n\r\n sumImg.addImage(sqDiffImg)\r\n\r\n sumImg.divide(self.nSlices)\r\n sumImg.sqrt()\r\n\r\n print(\"\")\r\n\r\n return sumImg\r\n else:\r\n return self.getImage(0, outputFile)\r\n else:\r\n return None"
] | [
[
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.issubdtype",
"numpy.flipud",
"scipy.ndimage.sobel",
"numpy.dtype",
"numpy.mean",
"numpy.any",
"numpy.iinfo",
"numpy.where",
"scipy.optimize.curve_fit",
"numpy.random.default_rng",
"numpy.clip",
"numpy.fliplr",
"numpy.reshape",
"scipy.ndimage.generate_binary_structure",
"numpy.full",
"numpy.finfo",
"numpy.std",
"numpy.argmax",
"numpy.diff",
"numpy.ravel",
"numpy.zeros",
"numpy.rot90",
"numpy.nonzero",
"scipy.signal.fftconvolve",
"numpy.amin",
"numpy.power",
"scipy.ndimage.label",
"numpy.linalg.lstsq",
"numpy.floor",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"scipy.ndimage.gaussian_filter",
"numpy.abs",
"scipy.ndimage.center_of_mass",
"numpy.ones",
"numpy.vectorize",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
214929177/pyNastran | [
"73032d6ffd445ef085c124dde6b5e90a516a5b6a",
"73032d6ffd445ef085c124dde6b5e90a516a5b6a"
] | [
"pyNastran/op2/op2_interface/op2_scalar.py",
"pyNastran/converters/nastran/gui/results_helper.py"
] | [
"#pylint: disable=R0913\n\"\"\"\nDefines the sub-OP2 class. This should never be called outisde of the OP2 class.\n\n - OP2_Scalar(debug=False, log=None, debug_file=None)\n\n **Methods**\n - set_subcases(subcases=None)\n - set_transient_times(times)\n - read_op2(op2_filename=None, combine=False)\n - set_additional_generalized_tables_to_read(tables)\n - set_additional_result_tables_to_read(tables)\n - set_additional_matrices_to_read(matrices)\n\n **Attributes**\n - total_effective_mass_matrix\n - effective_mass_matrix\n - rigid_body_mass_matrix\n - modal_effective_mass_fraction\n - modal_participation_factors\n - modal_effective_mass\n - modal_effective_weight\n - set_as_msc()\n - set_as_optistruct()\n\n **Private Methods**\n - _get_table_mapper()\n - _not_available(data, ndata)\n - _table_crasher(data, ndata)\n - _table_passer(data, ndata)\n - _validate_op2_filename(op2_filename)\n - _create_binary_debug()\n - _make_tables()\n - _read_tables(table_name)\n - _skip_table(table_name)\n - _read_table_name(rewind=False, stop_on_failure=True)\n - _update_generalized_tables(tables)\n - _read_cmodext()\n - _read_cmodext_helper(marker_orig, debug=False)\n - _read_geom_table()\n - _finish()\n\n\"\"\"\nimport os\nfrom struct import Struct, unpack\nfrom collections import defaultdict\nfrom typing import List, Tuple, Dict, Union, Any\n\nfrom numpy import array\nimport numpy as np\nfrom cpylog import get_logger\n\nfrom pyNastran import is_release, __version__\nfrom pyNastran.f06.errors import FatalError\nfrom pyNastran.op2.op2_interface.op2_reader import OP2Reader, mapfmt, reshape_bytes_block\nfrom pyNastran.bdf.cards.params import PARAM\n\n#============================\n\nfrom pyNastran.op2.op2_interface.msc_tables import MSC_RESULT_TABLES, MSC_MATRIX_TABLES, MSC_GEOM_TABLES\nfrom pyNastran.op2.op2_interface.nx_tables import NX_RESULT_TABLES, NX_MATRIX_TABLES, NX_GEOM_TABLES\n\nfrom pyNastran.op2.tables.lama_eigenvalues.lama import LAMA\nfrom pyNastran.op2.tables.oee_energy.onr import ONR\nfrom pyNastran.op2.tables.ogf_gridPointForces.ogpf import OGPF\n\nfrom pyNastran.op2.tables.oef_forces.oef import OEF\nfrom pyNastran.op2.tables.oes_stressStrain.oes import OES\n#from pyNastran.op2.tables.oes_stressStrain.oesm import OESM\nfrom pyNastran.op2.tables.ogs_grid_point_stresses.ogs import OGS\n\nfrom pyNastran.op2.tables.opg_appliedLoads.opg import OPG\nfrom pyNastran.op2.tables.oqg_constraintForces.oqg import OQG\nfrom pyNastran.op2.tables.oug.oug import OUG\nfrom pyNastran.op2.tables.ogpwg import OGPWG\nfrom pyNastran.op2.fortran_format import FortranFormat\n\nfrom pyNastran.utils import is_binary_file\n\"\"\"\nftp://161.24.15.247/Nastran2011/seminar/SEC04-DMAP_MODULES.pdf\n\nDatablock\tType\tDescription\nEFMFSMS\tMatrix\t6 x 1 Total Effective mass matrix\nEFMASSS\tMatrix\t6 x 6 Effective mass matrix\nRBMASS\tMatrix\t6 x 6 Rigid body mass matrix\nEFMFACS\tMatrix\t6 X N Modal effective mass fraction matrix\nMPFACS\tMatrix\t6 x N Modal participation factor matrix\nMEFMASS\tMatrix\t6 x N Modal effective mass matrix\nMEFWTS\tMatrix\t6 x N Modal effective weight matrix\nRAFGEN\tMatrix\tN x M Generalized force matrix\nRADEFMP\tMatrix\tN X U2 Effective inertia loads\nBHH\tMatrix\tN x N Viscous damping matrix\nK4HH\tMatrix\tN x N Structural damping matrix\nRADAMPZ\tMatrix\tN x N equivalent viscous damping ratios\nRADAMPG\tMatrix\tN X N equivalent structural damping ratio\n\nLAMA\tLAMA\tEigenvalue summary table\nOGPWG\tOGPWG\tMass properties output\nOQMG1\tOQMG\tModal MPC forces\nRANCONS\tORGY1\tConstraint mode element strain energy table\nRANEATC\tORGY1\tAttachment mode element strain energy table\nRAGCONS\tOGPFB\tConstraint mode grid point force table\nRAGEATC\tOGPFB\tAttachment mode grid point force table\nRAPCONS\tOES\tConstraint mode ply stress table\nRAPEATC\tOES\tAttachment mode ply stress table\nRASCONS\tOES\tConstraint mode element stress table\nRAECONS\tOES\tConstraint mode element strain table\nRASEATC\tOES\tAttachment mode element stress table\nRAEEATC\tOES\tAttachment mode element strain table\nOES1C\tOES\tModal Element Stress Table\nOES1X\tOES\tModal Element Stress Table\nOSTR1C\tOES\tModal Element Strain Table\nOSTR1X\tOSTR\tModal Element Strain Table\nRAQCONS\tOUG\tConstraint mode MPC force table\nRADCONS\tOUG\tConstraint mode displacement table\nRADEFFM\tOUG\tEffective inertia displacement table\nRAQEATC\tOUG\tAttachment mode MPC force table\nRADEATC\tOUG\tAttachment mode displacement table\nOUGV1\tOUG\tEigenvector Table\nRAFCONS\tOEF\tConstraint mode element force table\nRAFEATC\tOEF\tAttachment mode element force table\nOEF1X\tOEF\tModal Element Force Table\nOGPFB1\tOGPFB\tModal Grid Point Force Table\nONRGY1\tONRGY1\tModal Element Strain Energy Table\nONRGY2\tONRGY1\n\n#--------------------\n\nRADCONS - Displacement Constraint Mode\nRADDATC - Displacement Distributed Attachment Mode\nRADNATC - Displacement Nodal Attachment Mode\nRADEATC - Displacement Equivalent Inertia Attachment Mode\nRADEFFM - Displacement Effective Inertia Mode\n\nRAECONS - Strain Constraint Mode\nRAEDATC - Strain Distributed Attachment Mode\nRAENATC - Strain Nodal Attachment Mode\nRAEEATC - Strain Equivalent Inertia Attachment Mode\n\nRAFCONS - Element Force Constraint Mode\nRAFDATC - Element Force Distributed Attachment Mode\nRAFNATC - Element Force Nodal Attachment Mode\nRAFEATC - Element Force Equivalent Inertia Attachment Mode\n\nRALDATC - Load Vector Used to Compute the Distributed Attachment M\n\nRANCONS - Strain Energy Constraint Mode\nRANDATC - Strain Energy Distributed Attachment Mode\nRANNATC - Strain Energy Nodal Attachment Mode\nRANEATC - Strain Energy Equivalent Inertia Attachment Mode\n\nRAQCONS - Ply Strains Constraint Mode\nRAQDATC - Ply Strains Distributed Attachment Mode\nRAQNATC - Ply Strains Nodal Attachment Mode\nRAQEATC - Ply Strains Equivalent Inertia Attachment Mode\n\nRARCONS - Reaction Force Constraint Mode\nRARDATC - Reaction Force Distributed Attachment Mode\nRARNATC - Reaction Force Nodal Attachment Mode\nRAREATC - Reaction Force Equivalent Inertia Attachment Mode\n\nRASCONS - Stress Constraint Mode\nRASDATC - Stress Distributed Attachment Mode\nRASNATC - Stress Nodal Attachment Mode\nRASEATC - Stress Equivalent Inertia Attachment Mode\n\nRAPCONS - Ply Stresses Constraint Mode\nRAPDATC - Ply Stresses Distributed Attachment Mode\nRAPNATC - Ply Stresses Nodal Attachment Mode\nRAPEATC - Ply Stresses Equivalent Inertia Attachment Mode\n\nRAGCONS - Grid Point Forces Constraint Mode\nRAGDATC - Grid Point Forces Distributed Attachment Mode\nRAGNATC - Grid Point Forces Nodal Attachment Mode\nRAGEATC - Grid Point Forces Equivalent Inertia Attachment Mode\n\nRADEFMP - Displacement PHA^T * Effective Inertia Mode\n\nRADAMPZ - Viscous Damping Ratio Matrix\nRADAMPG - Structural Damping Ratio Matrix\n\nRAFGEN - Generalized Forces\nBHH - Modal Viscous Damping Matrix\nK4HH - Modal Structural Damping Matrix\n\"\"\"\nGEOM_TABLES = MSC_GEOM_TABLES + NX_GEOM_TABLES\n\nAUTODESK_MATRIX_TABLES = [\n #b'BELM',\n b'KELM',\n #b'MELM',\n] # type: List[bytes]\n# this will be split later\nTEST_MATRIX_TABLES = [b'ATB', b'BTA', b'MYDOF']\n\nRESULT_TABLES = NX_RESULT_TABLES + MSC_RESULT_TABLES\nMATRIX_TABLES = NX_MATRIX_TABLES + MSC_MATRIX_TABLES + AUTODESK_MATRIX_TABLES + TEST_MATRIX_TABLES + [b'MEFF']\n\n#GEOM_TABLES = MSC_GEOM_TABLES\n#RESULT_TABLES = MSC_RESULT_TABLES\n#MATRIX_TABLES = MSC_MATRIX_TABLES\n\n# TODO: these are weird...\n# RPOSTS1, MAXRATI, RESCOMP, PDRMSG\nINT_PARAMS_1 = {\n b'POST', b'OPPHIPA', b'OPPHIPB', b'GRDPNT', b'RPOSTS1', b'BAILOUT',\n b'COUPMASS', b'CURV', b'INREL', b'MAXRATI', b'OG',\n b'S1AM', b'S1M', b'DDRMM', b'MAXIT', b'PLTMSG', b'LGDISP', b'NLDISP',\n b'OUNIT2K', b'OUNIT2M', b'RESCOMP', b'PDRMSG', b'LMODES', b'USETPRT',\n b'NOCOMPS', b'OPTEXIT', b'RSOPT', b'GUSTAERO', b'MPTUNIT',\n b'USETSEL', b'NASPRT', b'DESPCH', b'DESPCH1', b'COMPARE', b'DBNBLKS', b'NEWSEQ', b'OLDSEQ',\n b'METHCMRS', b'NOFISR', b'KGGCPCH', b'ERROR', b'DBCDIAG', b'GPECT', b'LSTRN',\n b'DBDROPT', b'SEOP2CV', b'IRES', b'SNORMPRT', b'DBDRNL', b'VMOPT',\n b'OSWPPT', b'KDAMP', b'KDAMPFL', b'MATNL', b'MPCX', b'GEOMPLT', b'NOELOP',\n b'NOGPF', b'PROUT', b'SUPER', b'LGDIS', b'EST', b'SEP1XOVR',\n b'FRSEID', b'HRSEID', b'LRSEID', b'MODACC', b'XFLAG', b'TSTATIC',\n b'NASPDV', b'RMXCRT', b'RMXTRN', b'DBCLEAN', b'LANGLE', b'SEMAPPRT',\n b'FIXEDB', b'AMGOK', b'ASING', b'CNSTRT', b'CURVPLOT', b'CYCIO',\n b'CYCSEQ', b'DBDICT', b'DBINIT', b'DBSET1', b'DBSET2', b'DBSET3', b'DBSET4',\n b'DBSORT', b'DOPT', b'FACTOR', b'ALTSHAPE', b'MODTRK', b'IFTM', b'INRLM',\n b'KINDEX', b'KMIN', b'KMAX', b'LARGEDB', b'LOADINC', b'LOADING', b'LOOP',\n b'LOOPID', b'MODEL', b'MOREK', b'NEWDYN', b'NFECI', b'NINTPTS',\n b'NLAYERS', b'NOELOF', b'NOMSGSTR', b'NONCUP', b'NUMOUT', b'NUMOUT1', b'NUMOUT2',\n b'OPGTKG', b'OPPHIB', b'OUTOPT', b'PKRSP', b'RSPECTRA', b'RSPRINT',\n b'S1G', b'SCRSPEC', b'SEMAPOPT', b'SEQOUT', b'SESEF', b'SKPAMG', b'SKPAMP',\n b'SLOOPID', b'SOLID', b'SPCGEN', b'SRTELTYP', b'SRTOPT', b'START', b'SUBID',\n b'SUBSKP', b'TABID', b'TESTNEG', b'BDMNCON', b'FRUMIN',\n\n # not defined in qrg...\n b'NT', b'PNCHDB', b'DLOAD', b'NLOAD', b'NOAP', b'NOCMPFLD', b'NODATA',\n b'NODJE', b'NOMECH', b'NOSDR1', b'NOSHADE', b'NOSORT1', b'NOTRED',\n b'NSEGS', b'OLDELM', b'OPADOF', b'OUTPUT', b'P1', b'P2', b'P3', b'PCHRESP',\n b'PLOT', b'PLOTSUP', b'PRTPCH', b'RADLIN', b'RESDUAL', b'S1', b'SDATA',\n b'SEFINAL', b'SEMAP1', b'SKPLOAD', b'SKPMTRX', b'SOLID1', b'SSG3',\n b'PEDGEP', b'ACMSPROC', b'ACMSSEID', b'ACOUS', b'ACOUSTIC', b'ADJFLG',\n b'ADJLDF', b'AEDBCP', b'AESRNDM', b'ARCSIGNS', b'ATVUSE', b'BADMESH', b'BCHNG',\n b'BCTABLE', b'ROTCSV', b'ROTGPF', b'BEARDMP', b'BEARFORC', b'BOV', b'OP2FMT',\n\n # ???\n b'CHKSEC', b'CMSMETH', b'CNTNSUB', b'CNTSCL', b'CNTSTPS', b'CONCHG',\n b'DBDRPRJ', b'DBDRVER', b'DDAMRUN', b'DESCONX', b'DESEIG', b'DESFINAL',\n b'DESMAX', b'DESSOLAP', b'DIAGOPT',\n\n b'DOBUCKL', b'DOF123', b'DOMODES', b'DOSTATIC', b'DOTRIP', b'DRESP', b'DSGNOPTX',\n b'DSNOKD', b'DYNAMICX', b'EBULK', b'EIGNFREQ', b'ELOOPID',\n b'FDEPCB', b'FLUIDMP', b'FLUIDSE', b'FMODE', b'FREQDEP', b'FREQDEPS',\n b'GENEL', b'GEOMFLAG', b'GEOMU', b'GKCHNG', b'GLUSET', b'GMCONV', b'GNSTART',\n b'GOODVER', b'GOPH2', b'GRIDFMP', b'GRIDMP', b'HNNLK', b'ICTASET', b'IFPCHNG',\n b'INEP', b'INP2FMT', b'INP4FMT', b'INREL0', b'ITAPE', b'ITODENS', b'ITOITCNT',\n b'ITOMXITR', b'ITONDVAR', b'ITONGHBR', b'ITONOBJF', b'ITOOPITR', b'ITOPALG',\n b'ITOPALLR', b'ITOPCONV', b'ITOPDIAG', b'ITOPOPT', b'ITORMAS', b'ITOSIMP',\n b'ITOSIMP1', b'ITOSIMP2', b'IUNIT', b'K4CHNG', b'KCHNG', b'KREDX', b'LANGLES',\n b'LBEARING', b'LDSTI1', b'LMDYN', b'LMODESFL', b'LMSTAT', b'LNUMROT',\n b'LOADGENX', b'LOADREDX', b'LOADU', b'LODCHG', b'LROTOR', b'LTOPOPT',\n b'LUSET', b'LUSETD', b'LUSETS', b'LUSETX', b'MATGENX',\n b'MAXITER', b'MAXRPM', b'MAXSEIDX', b'MBDIFB', b'MBDIFO', b'MBDLMN',\n b'MCHNG', b'MDOF', b'MDTRKFLG', b'MELPG', b'MGRID', b'MLTIMSTR', b'MODESX',\n b'MODETRAK', b'MPIFRHD', b'MPNFLG', b'MREDX', b'MSCOP2', b'NACEXTRA',\n b'NCNOFFST', b'NDISOFP', b'NDVAR', b'NEWSET', b'NGELS', b'NJ', b'NK',\n b'NLBEAR', b'NLCBFOR', b'NMLOOP', b'NMSOL', b'NOA', b'NOASET', b'NOCOMP',\n b'NOFASET', b'NOFGSET', b'NOGENL', b'NOGEOM3', b'NOK4GG', b'NOK4JJ',\n b'NOKGGX', b'NOKJJX', b'NOLSET', b'NOMGG', b'NOMGGX', b'NOMJJX', b'NOQSET',\n b'NORADMAT', b'NORBM', b'NOSE', b'NOSIMP', b'NOSSET', b'NOUE', b'NOUP',\n b'NOYSET', b'NOZSET', b'NQSET', b'NR1OFFST', b'NR2OFFST', b'NR3OFFST',\n b'NROTORS', b'NSE', b'NSKIP0', b'NSOL', b'NSOLF', b'NUMPAN', b'NX',\n b'O2E', b'OADPMAX', b'OALTSHP', b'OBJIN', b'ODESMAX', b'ODSFLG', b'OMAXR',\n b'OP2SE', b'OP4FMT', b'OP4SE', b'OPGEOM', b'OPTIFCS',\n b'OPTII231', b'OPTII408', b'OPTII411', b'OPTII420', b'OPTIIDMP', b'OPTISNS',\n b'OTAPE', b'OUNIT1', b'OUNIT2', b'OUNIT2R', b'OUTFMP', b'OUTSMP', b'PANELMP',\n b'PBCONT', b'PCHNG', b'PITIME', b'PKLLR', b'POSTU', b'PRTMAT', b'PSLGDVX',\n b'PSLOAD', b'PSORT', b'PVALINIT', b'PVALLAST', b'PVALLIST', b'PYCHNG',\n b'REFOPT', b'RESLTOPT', b'RESPSENX', b'RGBEAMA', b'RGBEAME', b'RGLCRIT',\n b'RGSPRGK', b'RMXPANEL', b'ROTPRES', b'ROTPRT', b'RPDFRD', b'RVCHG', b'RVCHG1',\n b'RVCHG2', b'S1AG', b'SAVERSTL', b'SDSRFLAG', b'SEBULK',\n b'SEDMP231', b'SEDMP265', b'SEDMP408', b'SEDMP411', b'SEDMP445', b'SEDMPFLG',\n b'SELDPRS', b'SKIPSE', b'SNDSEIDX', b'SOLFINAL',\n b'SOLNLX', b'SOLNX', b'SOLVSUB', b'SPLINE', b'STOP0', b'STRUCTMP', b'SWEXIST',\n b'TORSIN', b'UACCEL', b'UNIQIDS', b'VOL', b'VOLS', b'VUELJUMP', b'VUENEXT',\n b'VUGJUMP', b'VUGNEXT', b'WGT', b'WGTS', b'WRTMAT',\n b'XSMALLQ',\n b'XNTIPS', b'XRESLTOP', b'XSEMEDIA', b'XSEUNIT', b'XTIPSCOL',\n b'XUPFAC', b'XYUNIT', b'XZCOLLCT', b'Z2XSING',\n b'ZUZRI1', b'ZUZRI2', b'ZUZRI3', b'ZUZRI4', b'ZUZRI5', b'ZUZRI6', b'ZUZRI7', b'ZUZRI8', b'ZUZRI9', b'ZUZRI10',\n b'ZUZRL1', b'ZUZRL2', b'ZUZRL3', b'ZUZRL4', b'ZUZRL5', b'ZUZRL6', b'ZUZRL7', b'ZUZRL8', b'ZUZRL9', b'ZUZRL10',\n b'ZUZRR1', b'ZUZRR2', b'ZUZRR3', b'ZUZRR4', b'ZUZRR5', b'ZUZRR6', b'ZUZRR7', b'ZUZRR8', b'ZUZRR9', b'ZUZRR10',\n\n # no\n #b'SEPS', b'SMALLQ', b'FEPS',\n}\nFLOAT_PARAMS_1 = {\n b'K6ROT', b'WTMASS', b'SNORM', b'PATVER', b'MAXRATIO', b'EPSHT',\n b'SIGMA', b'TABS', b'AUNITS', b'BOLTFACT', b'LMSCAL',\n 'DSZERO', b'G', b'GFL', b'LFREQ', b'HFREQ', b'ADPCON',\n b'W3', b'W4', b'W3FL', b'W4FL', b'PREFDB',\n b'EPZERO', b'DSZERO', b'TINY', b'TOLRSC',\n b'FRSPD', b'HRSPD', b'LRSPD', b'MTRFMAX', b'ROTCMRF', b'MTRRMAX',\n b'LAMLIM', b'BIGER', b'BIGER1', b'BIGER2', b'CLOSE',\n b'EPSBIG', b'EPSMALC', b'EPSMALU', b'HIRES', b'KDIAG', b'MACH', b'VREF',\n b'STIME', b'TESTSE', b'LFREQFL', b'Q', b'ADPCONS', b'AFNORM', b'AFZERO',\n b'GE', b'MASSDENS',\n\n # should this be FLOAT_PARAMS_1???\n b'EPPRT', b'HFREQFL',\n\n # not defined\n b'PRPA', b'PRPHIVZ', b'PRPJ', b'PRRULV', b'RMAX', b'ADJFRQ', b'ARF',\n b'ARS', # b'BSHDAMP',\n b'EPSRC',\n\n # floats - not verified\n b'THRSHOLD', b'SEPS', b'SMALLQ', b'FEPS',\n\n # or integer (not string)\n b'BSHDMP',\n b'BSHDMP4',\n b'CONFAC',\n b'CP',\n b'DBCPAE',\n b'DBCPATH',\n b'DFREQ', b'DFRSPCF', b'DSTSPCF', b'DTRSPCF',\n b'DUCTFMAX',\n b'EXTBEMI', b'EXTBEMO', b'EXTDONE', b'EXTDRUNT', b'EXTUNIT',\n b'FZERO', b'LMFACT', b'MPCZERO',\n b'RESVPGF', b'RESVRAT', b'SWPANGLE', b'UPFAC', b'UZROLD',\n}\nFLOAT_PARAMS_2 = {\n b'BETA', b'CB1', b'CB2', b'CK1', b'CK2', b'CK3', b'CK41', b'CK42',\n b'CM1', b'CM2',\n b'G1', b'G2', b'G3', b'G4', b'G5', b'G6', b'G7', b'G8', b'G9', b'G10',\n b'G11', b'G12', b'G13', b'G14', b'G15', b'G16', b'G17', b'G18', b'G19',\n b'ALPHA1', b'ALPHA2',\n b'CA1', b'CA2',\n b'CP1', b'CP2',\n\n\n # should this be FLOAT_PARAMS_1???\n #b'EPPRT',\n}\nINT_PARAMS_2 = {\n b'LOADFACS',\n b'ZUZRC1', b'ZUZRC2', b'ZUZRC3', b'ZUZRC4', b'ZUZRC5', b'ZUZRC6', b'ZUZRC7', b'ZUZRC8', b'ZUZRC9', b'ZUZRC10',\n}\nDOUBLE_PARAMS_1 = [] # b'Q'\nSTR_PARAMS_1 = {\n b'POSTEXT', b'PRTMAXIM', b'AUTOSPC', b'OGEOM', b'PRGPST',\n b'RESVEC', b'RESVINER', b'ALTRED', b'OGPS', b'OIBULK', b'OMACHPR',\n b'UNITSYS', b'F56', b'OUGCORD', b'OGEM', b'EXTSEOUT',\n b'CDIF', b'SUPAERO', b'RSCON', b'AUTOMPC', b'DBCCONV',\n b'AUTOSPRT', b'PBRPROP', b'OMID', b'HEATSTAT', b'SECOMB', b'ELEMITER',\n b'ELITASPC', b'DBCONV', b'SHLDAMP', b'COMPMATT', b'SPCSTR', b'ASCOUP',\n b'PRTRESLT', b'SRCOMPS', b'CHECKOUT', b'SEMAP', b'AESMETH', b'RESVALT',\n b'ROTSYNC', b'SYNCDAMP', b'PRGPOST', b'WMODAL', b'SDAMPUP',\n b'COLPHEXA', b'CHKOUT', b'CTYPE', b'DBNAME', b'VUHEXA', b'VUPENTA', b'VUTETRA',\n b'MESH', b'OPTION', b'PRINT', b'SENAME', b'MECHFIX', b'RMXTRAN', b'FLEXINV',\n b'ADSTAT', b'ACOUT', b'ACSYM', b'ACTYPE', b'ADBX', b'AUTOSEEL',\n b'RDSPARSE',\n b'SPARSEDR',\n b'BSHDAMP',\n b'CORROPT',\n b'DBACOUS',\n b'DBALLNOQ',\n b'DBALLX',\n b'DBAPI',\n b'DBAPP',\n b'DBCNT',\n b'DBCOVWRT',\n b'DBDNOPT',\n b'DBDNR', b'DBDNR1', b'DBDNX', b'DBEXT', b'DBGOA', b'DBMAP',\n b'DBOFP2X', b'DBOFPX', b'DBRCVX', b'DBSCRR', b'DBUPOPT', b'DBUPR',\n b'DBUPX', b'DBXSEDR', b'DBXSEDRR', b'DBZUZR', b'DSOR', b'DSOX',\n b'DVGRDN', b'DYNSPCF', b'EQVSCR', b'EXTDROUT',\n b'FLEXINCR', b'FTL', b'GDAMPF', b'GEOCENT', b'IFPSCR', b'IFPSOPT',\n b'IFPX', b'IFPXOPT', b'MASTER', b'MODEOUT',\n b'NXVER', b'OAPP', b'OCMP', b'OEE', b'OEEX', b'OEF', b'OEFX', b'OEPT',\n b'OES', b'OESE', b'OESX', b'OGPF', b'OMPT', b'OPG', b'OPTIM', b'OQG',\n b'OUG', b'OUMU', b'OUTSCR', b'PANAME', b'QSETREM', b'RESVSE', b'RESVSLI',\n b'RESVSO', b'RSATT', b'SAVEOFP', b'SAVERST', b'SCRATCH', b'SDRPOPT',\n b'SECOMB0', b'SELRNG', b'SERST', b'SOFTEXIT', b'SOLAPPI', b'SOLTYPI',\n b'TDB0', b'TDBX', b'UPDTBSH',\n b'USETSTR1', b'USETSTR2', b'USETSTR3', b'USETSTR4',\n b'VMOPTSET', b'VUBEAM', b'VUQUAD4', b'VUTRIA3', b'WRN', b'XAUTOSPT',\n b'XRESVECA', b'XRESVECO', b'XRESVIRA', b'XRESVIRO',\n b'ZUZRCL1', b'ZUZRCL2', b'ZUZRCL3', b'ZUZRCL4', b'ZUZRCL5', b'ZUZRCL6', b'ZUZRCL7', b'ZUZRCL8', b'ZUZRCL9', b'ZUZRCL10',\n b'ZUZRCH1', b'ZUZRCH2', b'ZUZRCH3', b'ZUZRCH4', b'ZUZRCH5', b'ZUZRCH6', b'ZUZRCH7', b'ZUZRCH8', b'ZUZRCH9', b'ZUZRCH10',\n b'APPI', b'APPF',\n\n # part of param, checkout\n b'PRTBGPDT', b'PRTCSTM', b'PRTEQXIN', b'PRTGPDT',\n b'PRTGPL', b'PRTGPTT', b'PRTMGG', b'PRTPG',\n\n # superelements\n b'EXTOUT', b'SESDAMP',\n\n # TODO: remove these as they're in the matrix test and are user\n # defined PARAMs; arguably all official examples should just work\n # TODO: add an option for custom PARAMs\n b'ADB', b'AEDB', b'MREDUC', b'OUTDRM', b'OUTFORM', b'REDMETH', b'DEBUG',\n b'AEDBX', b'AERO', b'AUTOSUP0', b'AXIOPT',\n}\n\nclass OP2_Scalar(LAMA, ONR, OGPF,\n OEF, OES, OGS, OPG, OQG, OUG, OGPWG, FortranFormat):\n \"\"\"Defines an interface for the Nastran OP2 file.\"\"\"\n @property\n def total_effective_mass_matrix(self):\n \"\"\"6x6 matrix\"\"\"\n return self.matrices['EFMFSMS']\n\n @property\n def effective_mass_matrix(self):\n \"\"\"6x6 matrix\"\"\"\n return self.matrices['EFMASSS']\n\n @property\n def rigid_body_mass_matrix(self):\n \"\"\"6x6 matrix\"\"\"\n return self.matrices['RBMASS']\n\n @property\n def modal_effective_mass_fraction(self):\n \"\"\"6xnmodes matrix\"\"\"\n return self.matrices['EFMFACS']#.dataframe\n\n @property\n def modal_participation_factors(self):\n \"\"\"6xnmodes matrix\"\"\"\n return self.matrices['MPFACS']#.dataframe\n\n @property\n def modal_effective_mass(self):\n \"\"\"6xnmodes matrix\"\"\"\n return self.matrices['MEFMASS']#.dataframe\n\n @property\n def modal_effective_weight(self):\n \"\"\"6xnmodes matrix\"\"\"\n return self.matrices['MEFWTS']#.dataframe\n\n @property\n def matrix_tables(self):\n return MATRIX_TABLES\n\n def set_as_nx(self):\n self.is_nx = True\n self.is_msc = False\n self.is_autodesk = False\n self.is_nasa95 = False\n self.is_optistruct = False\n self._nastran_format = 'nx'\n\n def set_as_msc(self):\n self.is_nx = False\n self.is_msc = True\n self.is_autodesk = False\n self.is_nasa95 = False\n self.is_optistruct = False\n self._nastran_format = 'msc'\n\n def set_as_autodesk(self):\n self.is_nx = False\n self.is_msc = False\n self.is_autodesk = True\n self.is_nasa95 = False\n self.is_optistruct = False\n self._nastran_format = 'autodesk'\n\n def set_as_nasa95(self):\n self.is_nx = False\n self.is_msc = False\n self.is_autodesk = False\n self.is_optistruct = False\n self.is_nasa95 = True\n self._nastran_format = 'nasa95'\n self._read_oes1_loads = self._read_oes1_loads_nasa95\n self._read_oef1_loads = self._read_oef1_loads_nasa95\n\n def set_as_optistruct(self):\n self.is_nx = False\n self.is_msc = False\n self.is_autodesk = False\n self.is_nasa95 = False\n self.is_optistruct = True\n self._nastran_format = 'optistruct'\n\n def __init__(self, debug=False, log=None, debug_file=None):\n \"\"\"\n Initializes the OP2_Scalar object\n\n Parameters\n ----------\n debug : bool; default=False\n enables the debug log and sets the debug in the logger\n log : Log()\n a logging object to write debug messages to\n (.. seealso:: import logging)\n debug_file : str; default=None (No debug)\n sets the filename that will be written to\n\n \"\"\"\n assert isinstance(debug, bool), 'debug=%r' % debug\n\n self.log = get_logger(log, 'debug' if debug else 'info')\n self._count = 0\n self.op2_filename = None\n self.bdf_filename = None\n self.f06_filename = None\n self.des_filename = None\n self.h5_filename = None\n self._encoding = 'utf8'\n\n #: should a MATPOOL \"symmetric\" matrix be stored as symmetric\n #: it takes double the RAM, but is easier to use\n self.apply_symmetry = True\n\n LAMA.__init__(self)\n ONR.__init__(self)\n OGPF.__init__(self)\n\n OEF.__init__(self)\n OES.__init__(self)\n #OESM.__init__(self)\n OGS.__init__(self)\n\n OPG.__init__(self)\n OQG.__init__(self)\n OUG.__init__(self)\n OGPWG.__init__(self)\n FortranFormat.__init__(self)\n\n self.is_vectorized = False\n self._close_op2 = True\n\n self.result_names = set()\n\n self.grid_point_weight = {}\n self.words = []\n self.debug = debug\n self._last_comment = None\n #self.debug = True\n #self.debug = False\n #debug_file = None\n if debug_file is None:\n self.debug_file = None\n else:\n assert isinstance(debug_file, str), debug_file\n self.debug_file = debug_file\n\n self.op2_reader = OP2Reader(self)\n\n def set_subcases(self, subcases=None):\n \"\"\"\n Allows you to read only the subcases in the list of isubcases\n\n Parameters\n ----------\n subcases : List[int, ...] / int; default=None->all subcases\n list of [subcase1_ID,subcase2_ID]\n\n \"\"\"\n #: stores the set of all subcases that are in the OP2\n #self.subcases = set()\n if subcases is None or subcases == []:\n #: stores if the user entered [] for isubcases\n self.is_all_subcases = True\n self.valid_subcases = []\n else:\n #: should all the subcases be read (default=True)\n self.is_all_subcases = False\n\n if isinstance(subcases, int):\n subcases = [subcases]\n\n #: the set of valid subcases -> set([1,2,3])\n self.valid_subcases = set(subcases)\n self.log.debug(\"set_subcases - subcases = %s\" % self.valid_subcases)\n\n def set_transient_times(self, times): # TODO this name sucks...\n \"\"\"\n Takes a dictionary of list of times in a transient case and\n gets the output closest to those times.\n\n Examples\n --------\n >>> times = {subcase_id_1: [time1, time2],\n subcase_id_2: [time3, time4]}\n\n .. warning:: I'm not sure this still works...\n\n \"\"\"\n expected_times = {}\n for (isubcase, etimes) in times.items():\n etimes = list(times)\n etimes.sort()\n expected_times[isubcase] = array(etimes)\n self.expected_times = expected_times\n\n def _get_table_mapper(self):\n \"\"\"gets the dictionary of function3 / function4\"\"\"\n\n # MSC table mapper\n table_mapper = {\n # per NX\n b'OESVM1' : [self._read_oes1_3, self._read_oes1_4], # isat_random\n b'OESVM1C' : [self._read_oes1_3, self._read_oes1_4], # isat_random\n b'OSTRVM1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n b'OSTRVM1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n\n b'OSTRVM2' : [self._read_oes2_3, self._read_ostr2_4],\n\n b'OESVM2' : [self._read_oes2_3, self._read_oes2_4], # big random\n b'OES2C' : [self._read_oes2_3, self._read_oes2_4],\n b'OSTR2' : [self._read_oes2_3, self._read_ostr2_4], # TODO: disable\n b'OSTR2C' : [self._read_oes2_3, self._read_ostr2_4],\n #b'OES2C' : [self._table_passer, self._table_passer], # stress\n #b'OSTR2' : [self._table_passer, self._table_passer], # TODO: enable\n #b'OSTR2C' : [self._table_passer, self._table_passer],\n\n b'OTEMP1' : [self._read_otemp1_3, self._read_otemp1_4],\n # --------------------------------------------------------------------------\n # MSC TABLES\n # common tables\n\n # unorganized\n b'RADCONS': [self._read_oug1_3, self._read_oug_4], # Displacement Constraint Mode (OUG)\n b'RADEFFM': [self._read_oug1_3, self._read_oug_4], # Displacement Effective Inertia Mode (OUG)\n b'RADEATC': [self._read_oug1_3, self._read_oug_4], # Displacement Equivalent Inertia Attachment mode (OUG)\n\n # broken - isat_launch_100hz.op2 - wrong numwide\n # spc forces\n b'RAQCONS': [self._read_oqg1_3, self._read_oqg_4], # Constraint mode MPC force table (OQG)\n b'RAQEATC': [self._read_oqg1_3, self._read_oqg_4], # Attachment mode MPC force table (OQG)\n #b'RAQCONS': [self._table_passer, self._table_passer], # temporary\n #b'RAQEATC': [self._table_passer, self._table_passer], # temporary\n\n # element forces\n b'RAFCONS': [self._read_oef1_3, self._read_oef1_4], # Element Force Constraint Mode (OEF)\n b'RAFEATC': [self._read_oef1_3, self._read_oef1_4], # Element Force Equivalent Inertia Attachment mode (OEF)\n #b'RAFCONS': [self._table_passer, self._table_passer], # temporary\n #b'RAFEATC': [self._table_passer, self._table_passer], # temporary\n\n # grid point forces\n b'RAGCONS': [self._read_ogpf1_3, self._read_ogpf1_4], # Grid Point Forces Constraint Mode (OGPFB)\n b'RAGEATC': [self._read_ogpf1_3, self._read_ogpf1_4], # Grid Point Forces Equivalent Inertia Attachment mode (OEF)\n #b'RAGCONS': [self._table_passer, self._table_passer], # Grid Point Forces Constraint Mode (OGPFB)\n #b'RAGEATC': [self._table_passer, self._table_passer], # Grid Point Forces Equivalent Inertia Attachment mode (OEF)\n\n # stress\n b'RAPCONS': [self._read_oes1_3, self._read_oes1_4], # Constraint mode ply stress table (OES)\n b'RAPEATC': [self._read_oes1_3, self._read_oes1_4], # Attachment mode ply stress table (OES)\n #b'RAPCONS': [self._table_passer, self._table_passer], # Constraint mode ply stress table (OES)\n #b'RAPEATC': [self._table_passer, self._table_passer], # Attachment mode ply stress table (OES)\n\n # stress\n b'RASCONS': [self._read_oes1_3, self._read_oes1_4], # Stress Constraint Mode (OES)\n b'RASEATC': [self._read_oes1_3, self._read_oes1_4], # Stress Equivalent Inertia Attachment mode (OES)\n #b'RASCONS': [self._table_passer, self._table_passer], # temporary\n #b'RASEATC': [self._table_passer, self._table_passer], # temporary\n\n # strain\n b'RAEEATC': [self._read_oes1_3, self._read_ostr1_4], # Strain Equivalent Inertia Attachment mode (OES)\n b'RAECONS': [self._read_oes1_3, self._read_ostr1_4], # Strain Constraint Mode (OSTR)\n #b'RAEEATC': [self._table_passer, self._table_passer], # temporary\n #b'RAECONS': [self._table_passer, self._table_passer], # temporary\n\n # strain energy\n b'RANEATC' : [self._read_onr1_3, self._read_onr1_4], # Strain Energy Equivalent Inertia Attachment mode (ORGY1)\n b'RANCONS': [self._read_onr1_3, self._read_onr1_4], # Constraint mode element strain energy table (ORGY1)\n #b'RANEATC': [self._table_passer, self._table_passer], # Strain Energy Equivalent Inertia Attachment mode (ORGY1)\n #b'RANCONS': [self._table_passer, self._table_passer], # Constraint mode element strain energy table (ORGY1)\n\n\n #b'TOL': [self._table_passer, self._table_passer],\n\n b'MATPOOL': [self._table_passer, self._table_passer], # DMIG bulk data entries\n\n # this comment may refer to CSTM?\n #F:\\work\\pyNastran\\examples\\Dropbox\\pyNastran\\bdf\\cards\\test\\test_mass_01.op2\n #F:\\work\\pyNastran\\examples\\matpool\\gpsc1.op2\n b'AXIC': [self._table_passer, self._table_passer],\n\n b'RSOUGV1': [self._table_passer, self._table_passer],\n b'RESOES1': [self._table_passer, self._table_passer],\n b'RESEF1' : [self._table_passer, self._table_passer],\n b'DESCYC' : [self._table_passer, self._table_passer],\n #b'AEMONPT' : [self._read_aemonpt_3, self._read_aemonpt_4],\n #=======================\n # OEF\n # element forces\n #b'OEFITSTN' : [self._table_passer, self._table_passer], # works\n b'OEFITSTN' : [self._read_oef1_3, self._read_oef1_4],\n b'OEFIT' : [self._read_oef1_3, self._read_oef1_4], # failure indices\n b'OEF1X' : [self._read_oef1_3, self._read_oef1_4], # element forces at intermediate stations\n b'OEF1' : [self._read_oef1_3, self._read_oef1_4], # element forces or heat flux\n b'HOEF1' : [self._read_oef1_3, self._read_oef1_4], # element heat flux\n b'DOEF1' : [self._read_oef1_3, self._read_oef1_4], # scaled response spectra - forces\n\n # off force\n b'OEF2' : [self._read_oef2_3, self._read_oef2_4], # element forces or heat flux\n #=======================\n # OQG\n # spc forces\n # OQG1/OQGV1 - spc forces in the nodal frame\n # OQP1 - scaled response spectra - spc-forces\n b'OQG1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQG2' : [self._read_oqg2_3, self._read_oqg_4],\n\n b'OQGV1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQGV2' : [self._read_oqg2_3, self._read_oqg_4],\n\n b'OQP1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQP2' : [self._read_oqg2_3, self._read_oqg_4],\n\n # SPC/MPC tables depending on table_code\n # SPC - NX/MSC\n # MPC - MSC\n b'OQGATO1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQGCRM1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQGPSD1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQGRMS1' : [self._read_oqg1_3, self._read_oqg_4],\n b'OQGNO1' : [self._read_oqg1_3, self._read_oqg_4],\n\n b'OQGATO2' : [self._read_oqg2_3, self._read_oqg_4],\n b'OQGCRM2' : [self._read_oqg2_3, self._read_oqg_4],\n b'OQGPSD2' : [self._read_oqg2_3, self._read_oqg_4],\n b'OQGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OQGNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OQGRMS2' : [self._read_oqg2_3, self._read_oqg_4], # buggy on isat random\n #b'OQGNO2' : [self._read_oqg2_3, self._read_oqg_4], # buggy on isat random\n\n b'PSDF' : [self._read_psdf_3, self._read_psdf_4], # MSC NASA/goesr\n\n #=======================\n # MPC Forces\n # these are NX tables\n\n # OQGM1 - mpc forces in the nodal frame\n b'OQMG1' : [self._read_oqg1_3, self._read_oqg_mpc_forces],\n b'OQMATO1' : [self._read_oqg1_3, self._read_oqg_mpc_ato],\n b'OQMCRM1' : [self._read_oqg1_3, self._read_oqg_mpc_crm],\n b'OQMPSD1' : [self._read_oqg1_3, self._read_oqg_mpc_psd],\n b'OQMRMS1' : [self._read_oqg1_3, self._read_oqg_mpc_rms],\n b'OQMNO1' : [self._read_oqg1_3, self._read_oqg_mpc_no],\n\n b'OQMG2' : [self._read_oqg2_3, self._read_oqg_mpc_forces], # big random\n b'OQMATO2' : [self._read_oqg2_3, self._read_oqg_mpc_ato],\n b'OQMCRM2' : [self._read_oqg2_3, self._read_oqg_mpc_crm],\n b'OQMPSD2' : [self._read_oqg2_3, self._read_oqg_mpc_psd],\n b'OQMRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OQMNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OQMRMS2' : [self._read_oqg2_3, self._read_oqg_mpc_rms], # buggy on isat random\n #b'OQMNO2' : [self._read_oqg2_3, self._read_oqg_mpc_no], # buggy on isat random\n\n #=======================\n # OPG\n # applied loads\n b'OPG1' : [self._read_opg1_3, self._read_opg1_4], # applied loads in the nodal frame\n b'OPGV1' : [self._read_opg1_3, self._read_opg1_4], # solution set applied loads?\n b'OPNL1' : [self._read_opg1_3, self._read_opg1_4], # nonlinear loads\n b'OCRPG' : [self._read_opg1_3, self._read_opg1_4], # post-buckling loads\n\n b'OPG2' : [self._read_opg2_3, self._read_opg1_4], # applied loads in the nodal frame\n b'OPNL2' : [self._read_opg2_3, self._read_opg1_4], # nonlinear loads\n\n b'OPGATO1' : [self._read_opg1_3, self._read_opg1_4],\n b'OPGCRM1' : [self._read_opg1_3, self._read_opg1_4],\n b'OPGPSD1' : [self._read_opg1_3, self._read_opg1_4],\n b'OPGRMS1' : [self._read_opg1_3, self._read_opg1_4],\n b'OPGNO1' : [self._read_opg1_3, self._read_opg1_4],\n\n b'OPGATO2' : [self._read_opg2_3, self._read_opg1_4],\n b'OPGCRM2' : [self._read_opg2_3, self._read_opg1_4],\n b'OPGPSD2' : [self._read_opg2_3, self._read_opg1_4],\n #b'OPGRMS2' : [self._table_passer, self._table_passer],\n #b'OPGNO2' : [self._table_passer, self._table_passer],\n b'OPGRMS2' : [self._read_opg2_3, self._read_opg1_4],\n b'OPGNO2' : [self._read_opg2_3, self._read_opg1_4],\n #=======================\n # OGPFB1\n # grid point forces\n b'OGPFB1' : [self._read_ogpf1_3, self._read_ogpf1_4], # grid point forces\n #b'OGPFB2' : [self._read_ogpf1_3, self._read_ogpf1_4], # grid point forces\n\n #=======================\n # ONR/OEE\n # strain energy density\n b'ONRGY' : [self._read_onr1_3, self._read_onr1_4],\n b'ONRGY1' : [self._read_onr1_3, self._read_onr1_4], # strain energy density\n b'ONRGY2': [self._read_onr2_3, self._read_onr1_4],\n #b'ONRGY2': [self._table_passer, self._table_passer],\n #===========================================================\n # OES\n # stress\n # OES1C - Table of composite element stresses or strains in SORT1 format\n # OESRT - Table of composite element ply strength ratio. Output by SDRCOMP\n b'OES1X1' : [self._read_oes1_3, self._read_oes1_4], # stress - nonlinear elements\n b'OES1' : [self._read_oes1_3, self._read_oes1_4], # stress - linear only\n b'OES1X' : [self._read_oes1_3, self._read_oes1_4], # element stresses at intermediate stations & nonlinear stresses\n b'OES1C' : [self._read_oes1_3, self._read_oes1_4], # stress - composite\n b'OESCP' : [self._read_oes1_3, self._read_oes1_4], # stress - nonlinear???\n b'OESRT' : [self._read_oes1_3, self._read_oes1_4], # ply strength ratio\n\n # strain\n b'OSTR1' : [self._read_oes1_3, self._read_ostr1_4], # strain - autodesk/9zk6b5uuo.op2\n b'OSTR1X' : [self._read_oes1_3, self._read_ostr1_4], # strain - isotropic\n b'OSTR1C' : [self._read_oes1_3, self._read_ostr1_4], # strain - composite\n b'OESTRCP' : [self._read_oes1_3, self._read_ostr1_4],\n\n b'OSTR1PL' : [self._table_passer, self._table_passer], # ????\n b'OSTR1THC' : [self._table_passer, self._table_passer], # ????\n b'OSTR1CR' : [self._table_passer, self._table_passer], # ????\n #b'OEFIIP'\n b'XCASECC' : [self._table_passer, self._table_passer], # ????\n\n # special nonlinear tables\n # OESNLBR - Slideline stresses\n # OESNLXD - Nonlinear transient stresses\n # OESNLXR - Nonlinear stress\n # Table of nonlinear element stresses in SORT1 format and appended for all subcases\n\n b'OESNLXR' : [self._read_oes1_3, self._read_oes1_4], # nonlinear stresses\n b'OESNLXD' : [self._read_oes1_3, self._read_oes1_4], # nonlinear transient stresses\n b'OESNLBR' : [self._read_oes1_3, self._read_oes1_4],\n b'OESNL1X' : [self._read_oes1_3, self._read_oes1_4],\n\n b'OESNL2' : [self._read_oes2_3, self._read_oes2_4],\n b'OESNLXR2' : [self._read_oes2_3, self._read_oes2_4],\n b'OESNLBR2' : [self._read_oes2_3, self._read_oes2_4],\n #b'OESNLXR2' : [self._table_passer, self._table_passer],\n #b'OESNLBR2' : [self._table_passer, self._table_passer],\n\n # off stress\n b'OES2' : [self._read_oes2_3, self._read_oes2_4], # stress - linear only - disabled; need better tests\n #b'OES2' : [self._table_passer, self._table_passer], # stress - linear only - disabled; need better tests\n\n b'OESPSD2C' : [self._read_oes2_3, self._read_oes2_4], # isat_random (nx)\n b'OSTPSD2C' : [self._read_oes2_3, self._read_ostr2_4], # isat_random (nx)\n #=======================\n\n # off strain\n b'OSTRATO1' : [self._read_oes1_3, self._read_ostr1_4],\n b'OSTRCRM1' : [self._read_oes1_3, self._read_ostr1_4],\n b'OSTRPSD1' : [self._read_oes1_3, self._read_ostr1_4],\n b'OSTRRMS1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n b'OSTRNO1' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n\n b'OSTRATO2' : [self._read_oes2_3, self._read_ostr2_4],\n b'OSTRCRM2' : [self._read_oes2_3, self._read_ostr2_4],\n b'OSTRPSD2' : [self._read_oes2_3, self._read_ostr2_4],\n b'OSTRRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OSTRNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OSTRRMS2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random\n #b'OSTRNO2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random\n\n b'OSTRMS1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n b'OSTNO1C' : [self._read_oes1_3, self._read_ostr1_4], # isat_random\n\n #=======================\n # OUG\n # displacement/velocity/acceleration/eigenvector/temperature\n b'OUG1' : [self._read_oug1_3, self._read_oug_4], # displacements in nodal frame\n # OVG1?\n b'OAG1' : [self._read_oug1_3, self._read_oug_4], # accelerations in nodal frame\n\n b'OUG1F' : [self._read_oug1_3, self._read_oug_4], # acoustic displacements in ? frame\n\n b'OUGV1' : [self._read_oug1_3, self._read_oug_4], # displacements in nodal frame\n b'BOUGV1' : [self._read_oug1_3, self._read_oug_4], # OUG1 on the boundary???\n b'BOUGF1' : [self._read_oug1_3, self._read_oug_4], # OUG1 on the boundary???\n b'OUGV1PAT': [self._read_oug1_3, self._read_oug_4], # OUG1 + coord ID\n b'OUPV1' : [self._read_oug1_3, self._read_oug_4], # scaled response spectra - displacement\n b'TOUGV1' : [self._read_oug1_3, self._read_oug_4], # grid point temperature\n b'ROUGV1' : [self._read_oug1_3, self._read_oug_4], # relative OUG\n b'OPHSA' : [self._read_oug1_3, self._read_oug_4], # Displacement output table in SORT1\n b'OUXY1' : [self._read_oug1_3, self._read_oug_4], # Displacements in SORT1 format for h-set or d-set.\n b'OUGPC1' : [self._read_ougpc1_3, self._read_ougpc_4], # panel contributions\n b'OUGPC2' : [self._read_ougpc2_3, self._read_ougpc_4], # panel contributions\n b'OUGF1' : [self._read_oug1_3, self._read_oug_4], # Acoustic pressures at microphone points in SORT1 format\n b'OUGF2' : [self._read_oug2_3, self._read_oug_4], # Acoustic pressures at microphone points in SORT1 format\n\n b'OUGV2' : [self._read_oug2_3, self._read_oug_4], # displacements in nodal frame\n b'ROUGV2' : [self._read_oug2_3, self._read_oug_4], # relative OUG\n b'OUXY2' : [self._read_oug2_3, self._read_oug_4], # Displacements in SORT2 format for h-set or d-set.\n\n # modal contribution\n b'OUGMC1' : [self._read_oug1_3, self._read_ougmc_4],\n b'OQGMC1' : [self._read_oqg1_3, self._read_ougmc_4],\n b'OESMC1' : [self._read_oes1_3, self._read_oesmc_4],\n b'OSTRMC1' : [self._read_oes1_3, self._read_oesmc_4],\n\n #F:\\work\\pyNastran\\examples\\Dropbox\\move_tpl\\sbuckl2a.op2\n b'OCRUG' : [self._read_oug1_3, self._read_oug_4], # post-buckling displacement\n\n b'OPHIG' : [self._read_oug1_3, self._read_oug_4], # eigenvectors in basic coordinate system\n b'BOPHIG' : [self._read_oug1_3, self._read_oug_4], # eigenvectors in basic coordinate system\n b'BOPHIGF' : [self._read_oug1_3, self._read_oug_4], # Eigenvectors in the basic coordinate system for the fluid portion of the model.\n b'BOPHIGS' : [self._read_oug1_3, self._read_oug_4], # Eigenvectors in the basic coordinate system for the structural portion of the model.\n\n b'BOPG1' : [self._read_opg1_3, self._read_opg1_4], # applied loads in basic coordinate system\n\n b'OUGATO1' : [self._read_oug1_3, self._read_oug_ato],\n b'OUGCRM1' : [self._read_oug1_3, self._read_oug_crm],\n b'OUGPSD1' : [self._read_oug1_3, self._read_oug_psd],\n b'OUGRMS1' : [self._read_oug1_3, self._read_oug_rms],\n b'OUGNO1' : [self._read_oug1_3, self._read_oug_no],\n\n b'OUGATO2' : [self._read_oug2_3, self._read_oug_ato],\n b'OUGCRM2' : [self._read_oug2_3, self._read_oug_crm],\n b'OUGPSD2' : [self._read_oug2_3, self._read_oug_psd],\n b'OUGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OUGNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OUGRMS2' : [self._read_oug2_3, self._read_oug_rms], # buggy on isat random\n #b'OUGNO2' : [self._read_oug2_3, self._read_oug_no], # buggy on isat random\n\n #=======================\n # extreme values of the respective table\n b'OUGV1MX' : [self._table_passer, self._table_passer],\n b'OEF1MX' : [self._table_passer, self._table_passer],\n b'OES1MX' : [self._table_passer, self._table_passer],\n\n #=======================\n # contact\n b'OQGCF1' : [self._read_oqg1_3, self._read_oqg_4], # Contact force at grid point.\n b'OQGCF2' : [self._read_oqg2_3, self._read_oqg_4], # Contact force at grid point.\n\n b'OSPDS1' : [self._nx_table_passer, self._table_passer], # Final separation distance.\n b'OSPDS2' : [self._nx_table_passer, self._table_passer],\n\n b'OSPDSI1' : [self._nx_table_passer, self._table_passer], # Initial separation distance.\n b'OSPDSI2' : [self._nx_table_passer, self._table_passer], # Output contact separation distance results.\n\n #b'OBC1' : [self._read_obc1_3, self._read_obc1_4],\n #b'OBC2' : [self._nx_table_passer, self._table_passer], # Contact pressures and tractions at grid points.\n\n #b'OSLIDE1'\n b'OPRPSD2' : [self._nx_table_passer, self._table_passer],\n b'OPRATO2' : [self._nx_table_passer, self._table_passer],\n b'OPRNO1' : [self._nx_table_passer, self._table_passer],\n b'OPRCRM2' : [self._nx_table_passer, self._table_passer],\n\n b'OCPSDFC' : [self._nx_table_passer, self._table_passer],\n b'OCCORFC' : [self._nx_table_passer, self._table_passer],\n\n # Glue normal and tangential tractions at grid point in basic coordinate system\n b'OBG1' : [self._nx_table_passer, self._table_passer],\n b'OBG2' : [self._nx_table_passer, self._table_passer],\n\n b'OQGGF1' : [self._read_oqg1_3, self._read_oqg_4], # Glue forces at grid point in basic coordinate system\n b'OQGGF2' : [self._read_oqg2_3, self._read_oqg_4],\n\n # Table of Euler Angles for transformation from material to basic coordinate system\n # in the undeformed configuration\n b'TRMBU' : [self._nx_table_passer, self._table_passer],\n b'TRMBD' : [self._nx_table_passer, self._table_passer],\n #=======================\n # OGPWG\n # grid point weight\n b'OGPWG' : [self._read_ogpwg_3, self._read_ogpwg_4], # grid point weight\n b'OGPWGM' : [self._read_ogpwg_3, self._read_ogpwg_4], # modal? grid point weight\n\n #=======================\n # OGS\n # grid point stresses\n b'OGS1' : [self._read_ogs1_3, self._read_ogs1_4], # grid point stresses\n #b'OGS2' : [self._read_ogs1_3, self._read_ogs1_4], # grid point stresses\n #=======================\n # eigenvalues\n b'BLAMA' : [self._read_buckling_eigenvalue_3, self._read_buckling_eigenvalue_4], # buckling eigenvalues\n b'CLAMA' : [self._read_complex_eigenvalue_3, self._read_complex_eigenvalue_4], # complex eigenvalues\n b'LAMA' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues\n b'LAMAS' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues-structure\n b'LAMAF' : [self._read_real_eigenvalue_3, self._read_real_eigenvalue_4], # eigenvalues-fluid\n\n # ===========================geom passers===========================\n # geometry\n b'GEOM1' : [self._table_passer, self._table_passer], # GEOM1-Geometry-related bulk data\n b'GEOM2' : [self._table_passer, self._table_passer], # GEOM2-element connectivity and SPOINT-related data\n b'GEOM3' : [self._table_passer, self._table_passer], # GEOM3-Static and thermal loads\n b'GEOM4' : [self._table_passer, self._table_passer], # GEOM4-constraints, DOF membership entries, MPC, and R-type element data\n\n # superelements\n b'GEOM1S' : [self._table_passer, self._table_passer], # GEOMx + superelement\n b'GEOM2S' : [self._table_passer, self._table_passer],\n b'GEOM3S' : [self._table_passer, self._table_passer],\n b'GEOM4S' : [self._table_passer, self._table_passer],\n\n b'GEOM1VU' : [self._table_passer, self._table_passer],\n b'GEOM2VU' : [self._table_passer, self._table_passer],\n b'BGPDTVU' : [self._table_passer, self._table_passer],\n\n b'GEOM1N' : [self._table_passer, self._table_passer],\n b'GEOM2N' : [self._table_passer, self._table_passer],\n b'GEOM3N' : [self._table_passer, self._table_passer],\n b'GEOM4N' : [self._table_passer, self._table_passer],\n\n b'GEOM1OLD' : [self._table_passer, self._table_passer],\n b'GEOM2OLD' : [self._table_passer, self._table_passer],\n b'GEOM3OLD' : [self._table_passer, self._table_passer],\n b'GEOM4OLD' : [self._table_passer, self._table_passer],\n\n b'EPT' : [self._table_passer, self._table_passer], # elements\n b'EPTS' : [self._table_passer, self._table_passer], # elements - superelements\n b'EPTOLD' : [self._table_passer, self._table_passer],\n\n b'MPT' : [self._table_passer, self._table_passer], # materials\n b'MPTS' : [self._table_passer, self._table_passer], # materials - superelements\n\n b'DYNAMIC' : [self._table_passer, self._table_passer],\n b'DYNAMICS' : [self._table_passer, self._table_passer],\n b'DIT' : [self._table_passer, self._table_passer],\n b'DITS' : [self._table_passer, self._table_passer],\n b'AXIC' : [self._table_passer, self._table_passer],\n # =========================end geom passers=========================\n\n # ===passers===\n #b'EQEXIN': [self._table_passer, self._table_passer],\n #b'EQEXINS': [self._table_passer, self._table_passer],\n\n b'GPDT' : [self._table_passer, self._table_passer], # grid points?\n b'BGPDT' : [self._table_passer, self._table_passer], # basic grid point defintion table\n b'BGPDTS' : [self._table_passer, self._table_passer],\n b'BGPDTOLD' : [self._table_passer, self._table_passer],\n\n b'PVT' : [self._read_pvto_3, self._read_pvto_4], # PVT - Parameter Variable Table\n b'PVTS' : [self._read_pvto_3, self._read_pvto_4], # ???\n b'PVT0' : [self._read_pvto_3, self._read_pvto_4], # user parameter value table\n b'TOLD' : [self._table_passer, self._table_passer],\n b'CASECC' : [self._table_passer, self._table_passer], # case control deck\n\n b'STDISP' : [self._table_passer, self._table_passer], # matrix?\n b'AEDISP' : [self._table_passer, self._table_passer], # matrix?\n #b'TOLB2' : [self._table_passer, self._table_passer], # matrix?\n\n # EDT - element deformation, aerodynamics, p-element, divergence analysis,\n # and iterative solver input (includes SET1 entries)\n b'EDT' : [self._table_passer, self._table_passer],\n b'EDTS' : [self._table_passer, self._table_passer],\n\n b'FOL' : [self._table_passer, self._table_passer],\n b'PERF' : [self._table_passer, self._table_passer],\n b'VIEWTB' : [self._table_passer, self._table_passer], # view elements\n\n # DSCMCOL - Correlation table for normalized design sensitivity coefficient matrix.\n # Output by DSTAP2.\n # DBCOPT - Design optimization history table for\n b'CONTACT' : [self._table_passer, self._table_passer],\n b'CONTACTS' : [self._table_passer, self._table_passer],\n b'OEKE1' : [self._table_passer, self._table_passer],\n #b'DSCMCOL' : [self._table_passer, self._table_passer],\n #b'DBCOPT' : [self._table_passer, self._table_passer],\n #b'FRL0': [self._table_passer, self._table_passer], # frequency response list\n\n #==================================\n # modal participation factors\n # OFMPF2M Table of fluid mode participation factors by normal mode.\n b'OFMPF2M' : [self._read_mpf_3, self._read_mpf_4],\n # OLMPF2M Load mode participation factors by normal mode.\n b'OLMPF2M' : [self._read_mpf_3, self._read_mpf_4],\n # OPMPF2M Panel mode participation factors by normal mode.\n b'OPMPF2M' : [self._read_mpf_3, self._read_mpf_4],\n # OPMPF2M Panel mode participation factors by normal mode.\n b'OSMPF2M' : [self._read_mpf_3, self._read_mpf_4],\n # OGMPF2M Grid mode participation factors by normal mode.\n b'OGPMPF2M' : [self._read_mpf_3, self._read_mpf_4],\n\n #OFMPF2E Table of fluid mode participation factors by excitation frequencies.\n #OSMPF2E Table of structure mode participation factors by excitation frequencies.\n #OPMPF2E Table of panel mode participation factors by excitation frequencies.\n #OLMPF2E Table of load mode participation factors by excitation frequencies.\n #OGMPF2E Table of grid mode participation factors by excitation frequencies.\n\n # velocity\n b'OVGATO1' : [self._read_oug1_3, self._read_oug_ato],\n b'OVGCRM1' : [self._read_oug1_3, self._read_oug_crm],\n b'OVGPSD1' : [self._read_oug1_3, self._read_oug_psd],\n b'OVGRMS1' : [self._read_oug1_3, self._read_oug_rms],\n b'OVGNO1' : [self._read_oug1_3, self._read_oug_no],\n\n b'OVGATO2' : [self._read_oug2_3, self._read_oug_ato],\n b'OVGCRM2' : [self._read_oug2_3, self._read_oug_crm],\n b'OVGPSD2' : [self._read_oug2_3, self._read_oug_psd],\n #b'OVGRMS2' : [self._table_passer, self._table_passer],\n #b'OVGNO2' : [self._table_passer, self._table_passer],\n b'OVGRMS2' : [self._read_oug2_3, self._read_oug_rms],\n b'OVGNO2' : [self._read_oug2_3, self._read_oug_no],\n\n #==================================\n #b'GPL': [self._table_passer, self._table_passer],\n #b'OMM2' : [self._table_passer, self._table_passer], # max/min table - kinda useless\n b'ERRORN' : [self._table_passer, self._table_passer], # p-element error summary table\n #==================================\n\n b'EDOM' : [self._table_passer, self._table_passer],\n b'OUG2T' : [self._table_passer, self._table_passer],\n\n # acceleration\n b'OAGATO1' : [self._read_oug1_3, self._read_oug_ato],\n b'OAGCRM1' : [self._read_oug1_3, self._read_oug_crm],\n b'OAGPSD1' : [self._read_oug1_3, self._read_oug_psd],\n b'OAGRMS1' : [self._read_oug1_3, self._read_oug_rms],\n b'OAGNO1' : [self._read_oug1_3, self._read_oug_no],\n\n b'OAGATO2' : [self._read_oug2_3, self._read_oug_ato],\n b'OAGCRM2' : [self._read_oug2_3, self._read_oug_crm],\n b'OAGPSD2' : [self._read_oug2_3, self._read_oug_psd],\n #b'OAGRMS2' : [self._table_passer, self._table_passer],\n #b'OAGNO2' : [self._table_passer, self._table_passer],\n b'OAGRMS2' : [self._read_oug2_3, self._read_oug_rms],\n b'OAGNO2' : [self._read_oug2_3, self._read_oug_no],\n\n # stress\n b'OESATO1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESCRM1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESPSD1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESRMS1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESNO1' : [self._read_oes1_3, self._read_oes1_4],\n\n # OESXRM1C : Composite element RMS stresses in SORT1 format for random analysis that includes von Mises stress output.\n b'OESXRMS1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESXRM1C' : [self._read_oes1_3, self._read_oes1_4],\n b'OESXNO1' : [self._read_oes1_3, self._read_oes1_4],\n b'OESXNO1C' : [self._read_oes1_3, self._read_oes1_4],\n\n\n b'OESATO2' : [self._read_oes2_3, self._read_oes2_4],\n b'OESCRM2' : [self._read_oes2_3, self._read_oes2_4],\n b'OESPSD2' : [self._read_oes2_3, self._read_oes2_4],\n #b'OESRMS2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random\n #b'OESNO2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random\n b'OESRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OESNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n\n # force\n b'OEFATO1' : [self._read_oef1_3, self._read_oef1_4],\n b'OEFCRM1' : [self._read_oef1_3, self._read_oef1_4],\n b'OEFPSD1' : [self._read_oef1_3, self._read_oef1_4],\n b'OEFRMS1' : [self._read_oef1_3, self._read_oef1_4],\n b'OEFNO1' : [self._read_oef1_3, self._read_oef1_4],\n\n b'OEFATO2' : [self._read_oef2_3, self._read_oef2_4],\n b'OEFCRM2' : [self._read_oef2_3, self._read_oef2_4],\n b'OEFPSD2' : [self._read_oef2_3, self._read_oef2_4],\n #b'OEFRMS2' : [self._read_oef2_3, self._read_oef2_4], # buggy on isat random\n }\n if self.is_nx and 0:\n table_mapper2 = {\n #b'OUGRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OUGNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OUGRMS2' : [self._read_oug2_3, self._read_oug_rms], # buggy on isat random\n b'OUGNO2' : [self._read_oug2_3, self._read_oug_no], # buggy on isat random\n\n #b'OQMRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OQMNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OQMRMS2' : [self._read_oqg2_3, self._read_oqg_mpc_rms], # buggy on isat random\n b'OQMNO2' : [self._read_oqg2_3, self._read_oqg_mpc_no], # buggy on isat random\n\n #b'OSTRRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OSTRNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n b'OSTRRMS2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random\n b'OSTRNO2' : [self._read_oes2_3, self._read_ostr2_4], # buggy on isat random\n\n b'OESRMS2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random\n b'OESNO2' : [self._read_oes1_3, self._read_oes1_4], # buggy on isat random\n #b'OESRMS2' : [self._table_passer, self._table_passer], # buggy on isat random\n #b'OESNO2' : [self._table_passer, self._table_passer], # buggy on isat random\n\n b'OEFNO2' : [self._read_oef2_3, self._read_oef2_4],\n #b'OEFNO2' : [self._table_passer, self._table_passer], # buggy on isat_random_steve2.op2\n }\n for key, value in table_mapper2.items():\n table_mapper[key] = value\n #table_mapper.update(table_mapper2)\n return table_mapper\n\n def _read_mpf_3(self, data, ndata: int) -> int:\n \"\"\"reads table 3 (the header table)\n\n OFMPF2E Table of fluid mode participation factors by excitation frequencies.\n OFMPF2M Table of fluid mode participation factors by normal mode.\n OSMPF2E Table of structure mode participation factors by excitation frequencies.\n OSMPF2M Table of structure mode participation factors by normal mode.\n OPMPF2E Table of panel mode participation factors by excitation frequencies.\n OPMPF2M Table of panel mode participation factors by normal mode.\n OLMPF2E Table of load mode participation factors by excitation frequencies.\n OLMPF2M Table of load mode participation factors by normal mode.\n OGMPF2E Table of grid mode participation factors by excitation frequencies.\n OGMPF2M Table of grid mode participation factors by normal mode.\n \"\"\"\n #self._set_times_dtype()\n self.nonlinear_factor = np.nan\n self.is_table_1 = True\n self.is_table_2 = False\n unused_three = self.parse_approach_code(data)\n self.words = [\n 'approach_code', 'table_code', '???', 'isubcase',\n '???', '???', '???', 'random_code',\n 'format_code', 'num_wide', '???', '???',\n 'acoustic_flag', '???', '???', '???',\n '???', '???', '???', '???',\n '???', '???', 'thermal', '???',\n '???', 'Title', 'subtitle', 'label']\n\n ## random code\n self.random_code = self.add_data_parameter(data, 'random_code', b'i', 8, False)\n\n ## format code\n self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)\n\n ## number of words per entry in record\n self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)\n\n ## acoustic pressure flag\n self.acoustic_flag = self.add_data_parameter(data, 'acoustic_flag', b'i', 13, False)\n\n ## thermal flag; 1 for heat transfer, 0 otherwise\n self.thermal = self.add_data_parameter(data, 'thermal', b'i', 23, False)\n\n #if self.analysis_code == 1: # statics / displacement / heat flux\n ## load set number\n #self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5, False)\n #self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])\n #self.setNullNonlinearFactor()\n #elif self.analysis_code == 2: # real eigenvalues\n ## mode number\n #self.mode = self.add_data_parameter(data, 'mode', b'i', 5)\n ## eigenvalue\n #self.eign = self.add_data_parameter(data, 'eign', b'f', 6, False)\n ## mode or cycle .. todo:: confused on the type - F1???\n #self.mode_cycle = self.add_data_parameter(data, 'mode_cycle', b'i', 7, False)\n #self.update_mode_cycle('mode_cycle')\n #self.data_names = self.apply_data_code_value('data_names', ['mode', 'eign', 'mode_cycle'])\n #elif self.analysis_code == 3: # differential stiffness\n #self.lsdvmn = self.get_values(data, b'i', 5) ## load set number\n #self.data_code['lsdvmn'] = self.lsdvmn\n #elif self.analysis_code == 4: # differential stiffness\n #self.lsdvmn = self.get_values(data, b'i', 5) ## load set number\n if self.analysis_code == 5: # frequency\n # frequency\n self.node_id = self.add_data_parameter(data, 'node_id', b'i', 5, fix_device_code=True)\n self.data_names = self.apply_data_code_value('data_names', ['node_id'])\n #self.freq = self.add_data_parameter(data, 'freq', b'f', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['freq'])\n #elif self.analysis_code == 6: # transient\n ## time step\n #self.dt = self.add_data_parameter(data, 'dt', b'f', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['dt'])\n #elif self.analysis_code == 7: # pre-buckling\n ## load set number\n #self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])\n #elif self.analysis_code == 8: # post-buckling\n ## load set number\n #self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)\n ## real eigenvalue\n #self.eigr = self.add_data_parameter(data, 'eigr', b'f', 6, False)\n #self.data_names = self.apply_data_code_value('data_names', ['lsdvmn', 'eigr'])\n #elif self.analysis_code == 9: # complex eigenvalues\n ## mode number\n #self.mode = self.add_data_parameter(data, 'mode', b'i', 5)\n ## real eigenvalue\n #self.eigr = self.add_data_parameter(data, 'eigr', b'f', 6, False)\n ## imaginary eigenvalue\n #self.eigi = self.add_data_parameter(data, 'eigi', b'f', 7, False)\n #self.data_names = self.apply_data_code_value('data_names', ['mode', 'eigr', 'eigi'])\n #elif self.analysis_code == 10: # nonlinear statics\n ## load step\n #self.lftsfq = self.add_data_parameter(data, 'lftsfq', b'f', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['lftsfq'])\n #elif self.analysis_code == 11: # old geometric nonlinear statics\n ## load set number\n #self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])\n #elif self.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...\n ## load set number\n #self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', b'i', 5)\n #self.data_names = self.apply_data_code_value('data_names', ['lsdvmn'])\n else:\n msg = f'invalid analysis_code...analysis_code={self.analysis_code}\\ndata={self.data_code}'\n raise RuntimeError(msg)\n\n #print self.code_information()\n #\n self.fix_format_code()\n if self.num_wide == 8:\n self.format_code = 1\n self.data_code['format_code'] = 1\n else:\n #self.fix_format_code()\n if self.format_code == 1:\n self.format_code = 2\n self.data_code['format_code'] = 2\n assert self.format_code in [2, 3], self.code_information()\n\n self._parse_thermal_code()\n if self.is_debug_file:\n self.binary_debug.write(' approach_code = %r\\n' % self.approach_code)\n self.binary_debug.write(' tCode = %r\\n' % self.tCode)\n self.binary_debug.write(' isubcase = %r\\n' % self.isubcase)\n self._read_title(data)\n self._write_debug_bits()\n\n def _read_mpf_4(self, data, ndata):\n \"\"\"unused\"\"\"\n if self.read_mode == 1: # or self.table_name_str not in ['OFMPF2M']:\n return ndata\n #print(self.table_name_str, ndata, self.num_wide) # 176\n #self.show_ndata(100, types='ifs')\n\n structi = Struct('fiff')\n nelements = ndata // 16\n ndev = ndata % 16\n assert ndev == 0, ndev\n\n for i in range(nelements):\n datai = data[i*16 : (i+1)*16]\n freq, dunno_int, mag, phase = structi.unpack(datai)\n assert dunno_int == 2, str(self.node_id, freq, dunno_int, mag, phase)\n #print(self.node_id, freq, dunno_int, mag, phase)\n #print()\n if self.isubtable == -4:\n self.log.warning('%s results were read, but not saved' % self.table_name_str)\n return ndata\n\n def _read_pvto_3(self, data, ndata):\n \"\"\"unused\"\"\"\n raise RuntimeError(self.read_mode)\n\n def _read_pvto_4(self, data, ndata):\n \"\"\"reads PARAM cards\"\"\"\n if self.read_mode == 2:\n return ndata\n\n iloc = self.f.tell()\n try:\n ndata2 = self._read_pvto_4_helper(data, ndata)\n except NotImplementedError as e:\n self.log.error(str(e))\n #raise # only for testing\n if 'dev' in __version__ and self.IS_TESTING:\n raise # only for testing\n self.f.seek(iloc)\n ndata2 = ndata\n if 'NXVER' in self.params and not self.is_nx:\n self.set_as_nx()\n self.log.debug('found PARAM,NXVER -> setting as NX')\n return ndata2\n\n def _read_pvto_4_helper(self, data, ndata: int) -> int:\n \"\"\"reads PARAM cards\"\"\"\n xword = (4 * self.factor)\n nvalues = ndata // xword\n assert ndata % xword == 0, ndata\n\n if self.size == 4:\n structs8 = self.struct_8s\n #struct2s8 = Struct(b'4s8s')\n struct2i = self.struct_2i\n struct2f = Struct(b'ff')\n struct2d = Struct(b'dd')\n else:\n struct2i = self.struct_2q\n structs8 = self.struct_16s\n struct2f = Struct(b'dd')\n\n i = 0\n\n #print('---------------------------')\n #self.show_data(data, types='ifsqL')\n while i < nvalues:\n #print('-----------------------------------------------------------')\n #print('*i=%s nvalues=%s' % (i, nvalues))\n istart = i*xword\n #self.show_data(data[istart:istart+32], types='sqd')\n #self.show_data(data[istart:istart+64], types='sqd')\n if self.size == 4:\n word = data[istart:(i+2)*xword].rstrip()\n elif self.size == 8:\n bword = data[istart:(i+2)*xword]\n word = reshape_bytes_block(bword).rstrip()\n else:\n raise RuntimeError(self.size)\n\n #print('word=%r' % word)\n #word = s8.unpack(word)[0]#.decode(self._encoding)\n\n # the first two entries are typically trash, then we can get values\n if word in INT_PARAMS_1:\n slot = data[(i+2)*xword:(i+4)*xword]\n value = struct2i.unpack(slot)[1]\n i += 4\n elif word in FLOAT_PARAMS_1:\n slot = data[(i+2)*xword:(i+4)*xword]\n value = struct2f.unpack(slot)[1]\n i += 4\n elif word in FLOAT_PARAMS_2:\n slot = data[(i+3)*xword:(i+5)*xword]\n value = struct2f.unpack(slot)\n i += 5\n elif word in INT_PARAMS_2:\n slot = data[(i+3)*xword:(i+5)*xword]\n value = struct2i.unpack(slot)\n i += 5\n elif word in DOUBLE_PARAMS_1:\n slot = data[(i+1)*xword:(i+8)*xword]\n try:\n value = struct2d.unpack(slot)[1]\n except:\n print(word)\n raise\n i += 8\n #elif word in [b'VUHEXA']:\n #self.show_data(data[i*4:(i+5)*4], types='ifs', endian=None)\n #aaa\n elif word in STR_PARAMS_1:\n i += 3\n slot = data[i*xword:(i+2)*xword]\n bvalue = structs8.unpack(slot)[0]\n if self.size == 8:\n bvalue = reshape_bytes_block(bvalue)\n value = bvalue.decode('latin1').rstrip()\n i += 2\n else:\n if self.size == 4:\n self.show_data(data[i*xword+12:i*4+i*4+12], types='ifs')\n self.show_data(data[i*xword+8:(i+4)*4], types='ifs')\n else:\n self.show_data(data[i*xword+24:i*8+i*8+24], types='sdq')\n self.show_data(data[i*xword+16:(i+4)*8], types='sdq')\n #print(i*xword+24, i*8+i*8+24)\n #print(i*xword+16, (i+4)*8)\n self.log.error('%r' % word)\n raise NotImplementedError('%r is not a supported PARAM' % word)\n\n key = word.decode('latin1')\n param = PARAM(key, [value], comment='')\n self.params[key] = param\n #print(f'{key} = {value}')\n #print(param.rstrip())\n return nvalues\n\n def _not_available(self, data: bytes, ndata: int):\n \"\"\"testing function\"\"\"\n if ndata > 0:\n raise RuntimeError('this should never be called...'\n 'table_name=%r len(data)=%s' % (self.table_name, ndata))\n\n def _table_crasher(self, data, ndata):\n \"\"\"auto-table crasher\"\"\"\n if self.is_debug_file:\n self.binary_debug.write(' crashing table = %s\\n' % self.table_name)\n raise NotImplementedError(self.table_name)\n return ndata\n\n def _nx_table_passer(self, data, ndata: int):\n \"\"\"auto-table skipper\"\"\"\n self.to_nx()\n self._table_passer(data, ndata)\n\n def _table_passer(self, data, ndata: int):\n \"\"\"auto-table skipper\"\"\"\n if self.is_debug_file:\n self.binary_debug.write(' skipping table = %s\\n' % self.table_name)\n if self.table_name not in GEOM_TABLES and self.isubtable > -4:\n self.log.warning(' skipping table: %s' % self.table_name_str)\n if not is_release and self.isubtable > -4:\n if self.table_name in GEOM_TABLES and not self.make_geom:\n pass\n else:\n print('dont skip table %r' % self.table_name_str)\n raise RuntimeError('dont skip table %r' % self.table_name_str)\n return ndata\n\n def _validate_op2_filename(self, op2_filename):\n \"\"\"\n Pops a GUI if the op2_filename hasn't been set.\n\n Parameters\n ----------\n op2_filename : str\n the filename to check (None -> gui)\n\n Returns\n -------\n op2_filename : str\n a valid file string\n\n \"\"\"\n if op2_filename is None:\n from pyNastran.utils.gui_io import load_file_dialog\n wildcard_wx = \"Nastran OP2 (*.op2)|*.op2|\" \\\n \"All files (*.*)|*.*\"\n wildcard_qt = \"Nastran OP2 (*.op2);;All files (*)\"\n title = 'Please select a OP2 to load'\n op2_filename, unused_wildcard_level = load_file_dialog(\n title, wildcard_wx, wildcard_qt, dirname='')\n assert op2_filename is not None, op2_filename\n return op2_filename\n\n def _create_binary_debug(self):\n \"\"\"Instatiates the ``self.binary_debug`` variable/file\"\"\"\n if hasattr(self, 'binary_debug') and self.binary_debug is not None:\n self.binary_debug.close()\n del self.binary_debug\n\n self.is_debug_file, self.binary_debug = create_binary_debug(\n self.op2_filename, self.debug_file, self.log)\n\n def read_op2(self, op2_filename=None, combine=False, load_as_h5=False, h5_file=None, mode=None):\n \"\"\"\n Starts the OP2 file reading\n\n Parameters\n ----------\n op2_filename : str\n the op2 file\n combine : bool; default=True\n True : objects are isubcase based\n False : objects are (isubcase, subtitle) based;\n will be used for superelements regardless of the option\n load_as_h5 : default=None\n False : don't setup the h5_file\n True : loads the op2 as an h5 file to save memory\n stores the result.element/data attributes in h5 format\n h5_file : h5File; default=None\n None : ???\n h5File : ???\n\n +--------------+-----------------------+\n | op2_filename | Description |\n +--------------+-----------------------+\n | None | a dialog is popped up |\n +--------------+-----------------------+\n | string | the path is used |\n +--------------+-----------------------+\n \"\"\"\n fname = os.path.splitext(op2_filename)[0]\n self.op2_filename = op2_filename\n self.bdf_filename = fname + '.bdf'\n self.f06_filename = fname + '.f06'\n self.des_filename = fname + '.des'\n self.h5_filename = fname + '.h5'\n\n self.op2_reader.load_as_h5 = load_as_h5\n if load_as_h5:\n h5_file = None\n import h5py\n self.h5_file = h5py.File(self.h5_filename, 'w')\n self.op2_reader.h5_file = self.h5_file\n\n self._count = 0\n if self.read_mode == 1:\n #sr = list(self._results.saved)\n #sr.sort()\n #self.log.debug('_results.saved = %s' % str(sr))\n #self.log.info('_results.saved = %s' % str(sr))\n pass\n\n if self.read_mode != 2:\n op2_filename = self._validate_op2_filename(op2_filename)\n self.log.info('op2_filename = %r' % op2_filename)\n if not is_binary_file(op2_filename):\n if os.path.getsize(op2_filename) == 0:\n raise IOError('op2_filename=%r is empty.' % op2_filename)\n raise IOError('op2_filename=%r is not a binary OP2.' % op2_filename)\n\n self._create_binary_debug()\n self._setup_op2()\n _op2 = self.op2_reader.op2\n #is_nasa_nastran = False\n #if is_nasa_nastran:\n #self.show(104, types='ifs', endian=None)\n #self.show(52, types='ifs', endian=None)\n #aa\n #data = _op2.f.read(4)\n #_op2.n += 8\n #_op2.f.seek(_op2.n)\n #else:\n self.op2_reader.read_nastran_version(mode)\n data = _op2.f.read(4)\n _op2.f.seek(_op2.n)\n if len(data) == 0:\n raise FatalError('There was a Nastran FATAL Error. Check the F06.\\n'\n 'No tables exist...check for a license issue')\n\n #=================\n table_name = self.op2_reader._read_table_name(rewind=True, stop_on_failure=False)\n if table_name is None:\n raise FatalError('There was a Nastran FATAL Error. Check the F06.\\n'\n 'No tables exist...check for a license issue')\n\n self._make_tables()\n table_names = self._read_tables(table_name)\n\n self.close_op2(force=False)\n #self.remove_unpickable_data()\n return table_names\n\n def close_op2(self, force=True):\n \"\"\"closes the OP2 and debug file\"\"\"\n if self.is_debug_file:\n self.binary_debug.write('-' * 80 + '\\n')\n self.binary_debug.write('f.tell()=%s\\ndone...\\n' % self.f.tell())\n self.binary_debug.close()\n\n if self._close_op2 or force:\n if self.f is not None:\n # can happen if:\n # - is ascii file\n self.f.close()\n del self.binary_debug\n del self.f\n self._cleanup_data_members()\n self._cleanup_words()\n #self.op2_reader.h5_file.close()\n\n def _cleanup_words(self):\n \"\"\"\n Remove internal parameters that are not useful and just clutter\n the object attributes.\n \"\"\"\n words = [\n 'isubcase', 'int3', '_table4_count', 'nonlinear_factor',\n 'is_start_of_subtable', 'superelement_adaptivity_index',\n 'thermal_bits', 'is_vectorized', 'pval_step', #'_frequencies',\n '_analysis_code_fmt', 'isubtable', '_data_factor', 'sort_method',\n 'acoustic_flag', 'approach_code', 'format_code_original',\n 'element_name', 'sort_bits', 'code', 'n', 'use_vector', 'ask',\n 'stress_bits', 'expected_times', 'table_code', 'sort_code',\n 'is_all_subcases', 'num_wide', '_table_mapper', 'label',\n 'apply_symmetry',\n 'words', 'device_code', 'table_name', '_count', 'additional_matrices',\n # 350\n 'data_names', '_close_op2',\n 'op2_reader',\n # 74\n 'generalized_tables',\n # 124\n 'is_table_1', 'is_table_2', 'ntotal', 'element_mapper',\n 'is_debug_file', 'debug_file',\n '_results', 'skip_undefined_matrices',\n # 140\n #---------------------------------------------------------\n # dont remove...\n # make_geom, title, read_mode\n # result_names, op2_results\n\n ]\n for word in words:\n if hasattr(self, word):\n delattr(self, word)\n\n def _setup_op2(self):\n \"\"\"\n Does preliminary op2 tasks like:\n - open the file\n - set the endian\n - preallocate some struct objects\n\n \"\"\"\n #: file index\n self.n = 0\n self.table_name = None\n\n if not hasattr(self, 'f') or self.f is None:\n #: the OP2 file object\n self.f = open(self.op2_filename, 'rb')\n #: the endian in bytes\n self._endian = None\n #: the endian in unicode\n self._uendian = None\n flag_data = self.f.read(20)\n self.f.seek(0)\n\n #(8, 3, 0, 8, 24)\n little_data = unpack(b'<5i', flag_data)\n big_data = unpack(b'>5i', flag_data)\n if big_data[0] in [4, 8]:\n self._uendian = '>'\n self._endian = b'>'\n size = big_data[0]\n elif little_data[0] in [4, 8] or 1:\n self._uendian = '<'\n self._endian = b'<'\n size = little_data[0]\n #elif unpack(b'<ii', flag_data)[0] == 4:\n #self._endian = b'<'\n else:\n # Matrices from test show\n # (24, 10, 10, 6, 2) before the Matrix Name...\n print(little_data, big_data)\n self.show(30, types='ifs', endian='<')\n self.show(30, types='ifs', endian='>')\n self.show(12, types='ifs', endian='<')\n self.show(12, types='ifs', endian='>')\n #self.show_data(flag_data, types='iqlfsld', endian='<')\n #print('----------')\n #self.show_data(flag_data, types='iqlfsld', endian='>')\n raise FatalError('cannot determine endian')\n else:\n self.op2_reader._goto(self.n)\n\n if self.read_mode == 1:\n self._set_structs(size)\n\n def _make_tables(self):\n return\n #global RESULT_TABLES, NX_RESULT_TABLES, MSC_RESULT_TABLES\n #table_mapper = self._get_table_mapper()\n #RESULT_TABLES = table_mapper.keys()\n\n def _read_tables(self, table_name: bytes) -> List[bytes]:\n \"\"\"\n Reads all the geometry/result tables.\n The OP2 header is not read by this function.\n\n Parameters\n ----------\n table_name : bytes str\n the first table's name\n\n Returns\n -------\n table_names : List[bytes str]\n the table names that were read\n\n \"\"\"\n op2_reader = self.op2_reader\n table_names = []\n self.table_count = defaultdict(int)\n while table_name is not None:\n self.table_count[table_name] += 1\n table_names.append(table_name)\n\n if self.is_debug_file:\n self.binary_debug.write('-' * 80 + '\\n')\n self.binary_debug.write('table_name = %r\\n' % (table_name))\n\n if is_release:\n self.log.debug(' table_name=%r' % table_name)\n\n self.table_name = table_name\n #if 0:\n #op2_reader._skip_table(table_name)\n #else:\n #print(table_name, table_name in op2_reader.mapped_tables)\n if table_name in self.generalized_tables:\n t0 = self.f.tell()\n self.generalized_tables[table_name](self)\n assert self.f.tell() != t0, 'the position was unchanged...'\n elif table_name in op2_reader.mapped_tables:\n t0 = self.f.tell()\n op2_reader.mapped_tables[table_name]()\n assert self.f.tell() != t0, 'the position was unchanged...'\n elif table_name in GEOM_TABLES:\n op2_reader.read_geom_table() # DIT (agard)\n elif table_name in MATRIX_TABLES:\n op2_reader.read_matrix(table_name)\n elif table_name in RESULT_TABLES:\n op2_reader.read_results_table()\n elif self.skip_undefined_matrices:\n op2_reader.read_matrix(table_name)\n elif table_name.strip() in self.additional_matrices:\n op2_reader.read_matrix(table_name)\n else:\n #self.show(1000, types='ifsq')\n msg = (\n 'Invalid Table = %r\\n\\n'\n 'If you have matrices that you want to read, see:\\n'\n ' model.set_additional_matrices_to_read(matrices)'\n ' matrices = {\\n'\n \" b'BHH' : True,\\n\"\n \" b'KHH' : False,\\n\"\n ' } # you want to read some matrices, but not others\\n'\n \" matrices = [b'BHH', b'KHH'] # assumes True\\n\\n\"\n\n 'If you the table is a geom/result table, see:\\n'\n ' model.set_additional_result_tables_to_read(methods_dict)\\n'\n \" methods_dict = {\\n\"\n \" b'OUGV1' : [method3, method4],\\n\"\n \" b'GEOM4SX' : [method3, method4],\\n\"\n \" b'OES1X1' : False,\\n\"\n ' }\\n\\n'\n\n 'If you want to take control of the OP2 reader (mainly useful '\n 'for obscure tables), see:\\n'\n \" methods_dict = {\\n\"\n \" b'OUGV1' : [method],\\n\"\n ' }\\n'\n ' model.set_additional_generalized_tables_to_read(methods_dict)\\n' % (\n table_name)\n )\n raise NotImplementedError(msg)\n\n table_name = op2_reader._read_table_name(last_table_name=table_name,\n rewind=True, stop_on_failure=False)\n return table_names\n\n def set_additional_generalized_tables_to_read(self, tables):\n \"\"\"\n Adds methods to call a generalized table.\n Everything is left to the user.\n\n ::\n\n def read_some_table(self):\n # read the data from self.f\n pass\n\n # let's overwrite the existing OP2 table\n model2 = OP2Geom(debug=True)\n generalized_tables = {\n b'GEOM1S' : read_some_table,\n }\n\n model.set_additional_generalized_tables_to_read(generalized_tables)\n\n \"\"\"\n self._update_generalized_tables(tables)\n self.generalized_tables = tables\n\n def set_additional_result_tables_to_read(self, tables):\n \"\"\"\n Adds methods to read additional result tables.\n This is expected to really only be used for skipping\n unsupported tables or disabling enabled tables that are\n buggy (e.g., OUGV1).\n\n Parameters\n ----------\n tables : Dict[bytes] = varies\n a dictionary of key=name, value=list[method3, method4]/False,\n False : skips a table\n applies self._table_passer to method3 and method4\n method3 : function\n function to read table 3 results (e.g., metadata)\n method4 : function\n function to read table 4 results (e.g., the actual results)\n\n \"\"\"\n self._update_generalized_tables(tables)\n table_mapper = self._get_table_mapper()\n #is_added = False\n def func():\n \"\"\"overloaded version of _get_table_mapper\"\"\"\n #if is_added:\n #return table_mapper\n for _key, methods in tables.items():\n if methods is False:\n table_mapper[_key] = [self._table_passer, self._table_passer]\n else:\n assert len(methods) == 2, methods\n table_mapper[_key] = methods\n #is_added = True\n return table_mapper\n self._get_table_mapper = func\n\n def _update_generalized_tables(self, tables):\n \"\"\"\n helper function for:\n - set_additional_generalized_tables_to_read\n - set_additional_result_tables_to_read\n\n \"\"\"\n global NX_RESULT_TABLES\n global MSC_RESULT_TABLES\n global RESULT_TABLES\n failed_keys = []\n keys = list(tables.keys())\n for _key in keys:\n if not isinstance(_key, bytes):\n failed_keys.append(_key)\n if hasattr(self, 'is_nx') and self.is_nx:\n NX_RESULT_TABLES.append(_key)\n else:\n MSC_RESULT_TABLES.append(_key)\n if failed_keys:\n failed_keys_str = [str(_key) for _key in failed_keys]\n raise TypeError('[%s] must be bytes' % ', '. join(failed_keys_str))\n RESULT_TABLES = NX_RESULT_TABLES + MSC_RESULT_TABLES\n\n #RESULT_TABLES.sort()\n #assert 'OESXRMS1' in RESULT_TABLES, RESULT_TABLES\n\n def set_additional_matrices_to_read(self, matrices: Union[List[str], Dict[str, bool]]):\n \"\"\"\n Matrices (e.g., KHH) can be sparse or dense.\n\n Parameters\n ----------\n matrices : List[str]; Dict[str] = bool\n List[str]:\n simplified method to add matrices; value will be True\n Dict[str] = bool:\n a dictionary of key=name, value=True/False,\n where True/False indicates the matrix should be read\n\n .. note:: If you use an already defined table (e.g. KHH), it\n will be ignored. If the table you requested doesn't\n exist, there will be no effect.\n .. note:: Do not use this for result tables like OUGV1, which\n store results like displacement. Those are not matrices.\n Matrices are things like DMIGs.\n\n \"\"\"\n if isinstance(matrices, list):\n matrices2 = {}\n for matrix in matrices:\n assert isinstance(matrix, str), 'matrix=%r' % str(matrix)\n matrices2[matrix] = True\n matrices = matrices2\n\n self.additional_matrices = matrices\n self.additional_matrices = {}\n for matrix_name, matrix in matrices.items():\n if isinstance(matrix_name, bytes):\n self.additional_matrices[matrix_name] = matrix\n else:\n self.additional_matrices[matrix_name.encode('latin1')] = matrix\n\n def _finish(self):\n \"\"\"\n Clears out the data members contained within the self.words variable.\n This prevents mixups when working on the next table, but otherwise\n has no effect.\n\n \"\"\"\n for word in self.words:\n if word != '???' and hasattr(self, word):\n if word not in ['Title', 'reference_point']:\n delattr(self, word)\n self.obj = None\n if hasattr(self, 'subtable_name'):\n del self.subtable_name\n\n def _read_psdf_3(self, data, ndata):\n \"\"\"reads the PSDF table\"\"\"\n #(50, 2011, 4001, 0, 302130, 3\n # strip off the title\n unused_three = self.parse_approach_code(data)\n self.words = [\n 'approach_code', 'table_code', '???', 'isubcase',\n '???', '???', '???', 'random_code',\n 'format_code', 'num_wide', '???', '???',\n 'acoustic_flag', '???', '???', '???',\n '???', '???', '???', '???',\n '???', '???', 'thermal', '???',\n '???', 'Title', 'subtitle', 'label'\n ]\n\n ## random code\n self.random_code = self.add_data_parameter(data, 'random_code', b'i', 8, False)\n self._read_title(data)\n\n # simplifying to see the data better\n del self.data_code['title']\n del self.data_code['label']\n del self.data_code['subtitle']\n del self.data_code['subtitle_original']\n del self.data_code['superelement_adaptivity_index']\n #del self.data_code['pval_step']\n del self.data_code['table_name']\n\n del self.data_code['_encoding']\n del self.data_code['load_as_h5']\n del self.data_code['h5_file']\n del self.data_code['is_msc']\n #del self.data_code['is_nasa95']\n del self.data_code['pval_step']\n\n # wrong\n del self.data_code['isubcase']\n #del self.data_code['random_code']\n #del self.data_code['sort_bits']\n #del self.data_code['device_code']\n #del self.data_code['sort_code']\n #del self.data_code['sort_method']\n #print(self.data_code)\n\n #aaa\n #self._read_oug1_3(data, ndata)\n if self.read_mode == 1:\n return ndata\n # just stripping off title\n #self.show_data(data[:200], types='if')\n\n # stripping off zeros\n #self.show_data(data[:52], types='ifs')\n\n #self.show_data(data[:40], types='if')\n\n approach_code, tcode, int3, frame_id, int5, dof, float7, rms_value, float9, int10, stress_strain_flag = unpack(\n self._endian + b'6i 3f 2i', data[:44])\n self.stress_strain_flag = stress_strain_flag\n\n ints = np.frombuffer(data[:200], dtype=self.idtype)\n if ints[11:].max() > 0:\n self.log.warning(f'ints11 = {ints[11:].tolist()}')\n\n node = int5 // 10\n #dof = int5 % 10\n #from pyNastran.op2.op2_interface.op2_codes import TABLE_CODE_MAP\n #title = self.title\n #subtitle = self.subtitle\n #label = self.label\n #approach_code={iapproach_code} tcode={tcode} table_code={self.table_code}\n #print(f'analysis_code={self.analysis_code} '\n #print(f'title={title!r} subtitle={subtitle!r} label={label!r}')\n\n if (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 1, 0):\n word = 'displacements'\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 2, 0):\n word = 'load_vectors'\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 3, 0):\n word = 'spc_forces'\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 4, 0):\n word = 'force'\n\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 5, 0):\n word = 'stress'\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 5, 2):\n word = 'strain'\n\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 10, 0):\n word = 'velocities'\n elif (self.analysis_code, self.table_code, self.stress_strain_flag) == (5, 11, 0):\n word = 'accelerations'\n else: # pragma: no cover\n #print(f'table_code={self.table_code} table={TABLE_CODE_MAP[self.table_code]!r}')\n print(f'analysis_code={self.analysis_code} approach_code={approach_code} tcode={tcode} table_code={self.table_code} '\n f'int3={int3} frame_id={frame_id} node={node} dof={dof} '\n f'float7={float7} rms_value={rms_value:.5e} float9={float9:.4e} int10={int10} stress_strain_flag={stress_strain_flag}')\n raise NotImplementedError(f'analysis_code={self.analysis_code} '\n f'table_code={self.table_code} '\n f'stress_strain_flag={self.stress_strain_flag} is not supported')\n\n self.node = node\n self.dof = dof\n self.word = word\n return ndata\n #self.show_data(data, types='ifs', endian=None)\n #aaaa\n\n def _read_psdf_4(self, data, ndata):\n \"\"\"reads the PSDF table\"\"\"\n if self.read_mode == 1:\n return ndata\n #self.show_data(data[:100], types='ifs', endian=None)\n data2 = np.frombuffer(data, dtype=self.fdtype)\n ndata = len(data2)\n nfreqs = ndata // 2\n data2 = data2.reshape(nfreqs, 2)\n #last2 = data2[-2:, 1]\n #self.log.warning(f'skipping PSDF; nfreqs={nfreqs} [{last2[0]:.6e},{last2[1]:.6e}] '\n #f'ymin={data2[:,1].min():.6e} ymax={data2[:,1].max():.6e}') # {self.data_code}\n # self.show_data(), self._read_psdf_4\n key = (self.label, self.node, self.dof)\n slot = getattr(self.op2_results.psds, self.word)\n assert key not in slot, slot\n slot[key] = data2\n del self.node\n del self.dof\n del self.word\n\ndef main(): # pragma: no cover\n \"\"\"testing pickling\"\"\"\n from pickle import dump, load\n txt_filename = 'solid_shell_bar.txt'\n pickle_file = open(txt_filename, 'wb')\n op2_filename = 'solid_shell_bar.op2'\n op2 = OP2_Scalar()\n op2.read_op2(op2_filename)\n #print(op2.displacements[1])\n dump(op2, pickle_file)\n pickle_file.close()\n\n pickle_file = open(txt_filename, 'r')\n op2 = load(pickle_file)\n pickle_file.close()\n #print(op2.displacements[1])\n\n\n #import sys\n #op2_filename = sys.argv[1]\n\n #o = OP2_Scalar()\n #o.read_op2(op2_filename)\n #(model, ext) = os.path.splitext(op2_filename)\n #f06_outname = model + '.test_op2.f06'\n #o.write_f06(f06_outname)\n\ndef create_binary_debug(op2_filename: str, debug_file: str, log) -> Tuple[bool, Any]:\n \"\"\"helper method\"\"\"\n binary_debug = None\n\n if debug_file is not None:\n #: an ASCII version of the op2 (creates lots of output)\n log.debug('debug_file = %s' % debug_file)\n binary_debug = open(debug_file, 'w')\n binary_debug.write(op2_filename + '\\n')\n is_debug_file = True\n else:\n is_debug_file = False\n return is_debug_file, binary_debug\n\n\nif __name__ == '__main__': # pragma: no cover\n main()\n",
"\"\"\"Interface for converting OP2 results to the GUI format\"\"\"\n# pylint: disable=C1801, C0103\nfrom __future__ import annotations\nimport os\nfrom collections import defaultdict\nfrom typing import Tuple, Dict, Union, Any, TYPE_CHECKING\n\nimport numpy as np\nfrom numpy.linalg import norm # type: ignore\n\nfrom pyNastran.gui.gui_objects.gui_result import GuiResult, GuiResultIDs\nfrom pyNastran.gui.gui_objects.displacements import (\n DisplacementResults, ForceTableResults) #, TransientElementResults\nfrom pyNastran.op2.result_objects.stress_object import (\n _get_nastran_header,\n get_rod_stress_strain,\n get_bar_stress_strain, get_bar100_stress_strain, get_beam_stress_strain,\n get_plate_stress_strain, get_solid_stress_strain\n)\nfrom pyNastran.gui.gui_objects.gui_result import GridPointForceResult\n\nfrom .geometry_helper import NastranGuiAttributes\nfrom .stress import (\n get_spring_stress_strains, get_rod_stress_strains,\n get_bar_stress_strains, get_beam_stress_strains,\n get_plate_stress_strains, get_composite_plate_stress_strains,\n get_solid_stress_strains)\nfrom .force import get_spring_force, get_bar_force, get_plate_force\n\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.op2.op2 import OP2\n from pyNastran.gui.gui_objects.settings import Settings\n #from pyNastran.op2.result_objects.design_response import Desvars\n\nGuiResults = Union[GuiResult, GuiResultIDs, GridPointForceResult]\n\nclass NastranGuiResults(NastranGuiAttributes):\n \"\"\"Defines OP2 specific methods NastranIO\"\"\"\n def __init__(self):\n super(NastranGuiResults, self).__init__()\n\n def _fill_grid_point_forces(self, cases, model, key, icase,\n form_dict, header_dict, keys_map):\n if key not in model.grid_point_forces:\n return icase\n grid_point_forces = model.grid_point_forces[key]\n case = grid_point_forces\n if not case.is_real:\n #raise RuntimeError(grid_point_forces.is_real)\n return icase\n\n subcase_id = key[0]\n title = 'Grid Point Forces'\n header = 'Grid Point Forces'\n nastran_res = GridPointForceResult(subcase_id, header, title, grid_point_forces)\n\n itime = 0\n\n cases[icase] = (nastran_res, (itime, 'Grid Point Forces'))\n formii = ('Grid Point Forces', icase, [])\n form_dict[(key, itime)].append(formii)\n\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n icase += 1\n return icase\n\n def _fill_op2_oug_oqg(self, cases, model: OP2, key, icase: int,\n form_dict, header_dict, keys_map, log) -> int:\n \"\"\"\n loads nodal results bector results (e.g., dispalcements/temperatures)\n \"\"\"\n nnodes = self.nnodes\n node_ids = self.node_ids\n icase = _fill_nastran_displacements(\n cases, model, key, icase,\n form_dict, header_dict, keys_map,\n self.xyz_cid0,\n nnodes, node_ids, log, dim_max=self.gui.settings.dim_max)\n\n icase = _fill_nastran_displacements(\n cases, model, key, icase,\n form_dict, header_dict, keys_map,\n self.xyz_cid0,\n nnodes, node_ids, log, dim_max=self.gui.settings.dim_max,\n prefix='acoustic',\n )\n\n icase = _fill_nastran_temperatures(\n cases, model, key, icase,\n form_dict, header_dict, keys_map,\n nnodes, log)\n return icase\n\n def _fill_op2_gpstress(self, cases, model: OP2,\n times, key, icase: int,\n form_dict, header_dict, keys_map) -> int:\n \"\"\"Creates the time accurate grid point stress objects\"\"\"\n if key in model.grid_point_stress_discontinuities:\n case = model.grid_point_stress_discontinuities[key]\n self.log.warning('skipping grid_point_stress_discontinuities')\n if key in model.grid_point_stresses_volume_principal:\n case = model.grid_point_stresses_volume_principal[key]\n self.log.warning('skipping grid_point_stresses_volume_principal')\n\n icase = _fill_op2_grid_point_surface_stresses(\n self.element_ids,\n cases, model,\n times, key, icase,\n form_dict, header_dict, keys_map)\n\n icase = _fill_op2_grid_point_stresses_volume_direct(\n self.node_ids,\n cases, model,\n times, key, icase,\n form_dict, header_dict, keys_map)\n return icase\n\n def _fill_op2_centroidal_strain_energy(self, cases: Dict[int, GuiResults], model: OP2,\n times, key, icase: int,\n form_dict, header_dict, keys_map) -> int:\n \"\"\"Creates the time accurate strain energy objects\"\"\"\n case = None\n\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key ????\n subcase_id = key[0]\n\n strain_energy = model.op2_results.strain_energy\n strain_energies = [\n # results_dict, name, flag of the element being supported\n (strain_energy.cquad4_strain_energy, 'CQUAD4', True),\n (strain_energy.cquad8_strain_energy, 'CQUAD8', True),\n (strain_energy.cquadr_strain_energy, 'CQUADR', True),\n (strain_energy.cquadx_strain_energy, 'CQUADX', True),\n\n (strain_energy.ctria3_strain_energy, 'CTRIA3', True),\n (strain_energy.ctria6_strain_energy, 'CTRIA6', True),\n (strain_energy.ctriar_strain_energy, 'CTRIAR', True),\n (strain_energy.ctriax_strain_energy, 'CTRIAX', True),\n (strain_energy.ctriax6_strain_energy, 'CTRIAX6', True),\n\n (strain_energy.ctetra_strain_energy, 'CTETRA', True),\n (strain_energy.cpenta_strain_energy, 'CPENTA', True),\n (strain_energy.chexa_strain_energy, 'CHEXA', True),\n (strain_energy.cpyram_strain_energy, 'CPYRAM', True),\n\n (strain_energy.crod_strain_energy, 'CROD', True),\n (strain_energy.ctube_strain_energy, 'CTUBE', True),\n (strain_energy.conrod_strain_energy, 'CONROD', True),\n\n (strain_energy.cbar_strain_energy, 'CBAR', True),\n (strain_energy.cbeam_strain_energy, 'CBEAM', True),\n\n (strain_energy.cgap_strain_energy, 'CGAP', True),\n (strain_energy.celas1_strain_energy, 'CELAS1', True),\n (strain_energy.celas2_strain_energy, 'CELAS2', True),\n (strain_energy.celas3_strain_energy, 'CELAS3', True),\n (strain_energy.celas4_strain_energy, 'CELAS4', True),\n (strain_energy.cdum8_strain_energy, 'CDUM8', False),\n (strain_energy.cbush_strain_energy, 'CBUSH', True),\n #(strain_energy.chexa8fd_strain_energy, '', False),\n (strain_energy.cbend_strain_energy, 'CBEND', False),\n (strain_energy.dmig_strain_energy, 'DMIG', False),\n (strain_energy.genel_strain_energy, 'GENEL', False),\n (strain_energy.cshear_strain_energy, 'CSHEAR', True),\n (strain_energy.conm2_strain_energy, 'CONM2', False),\n ]\n # find the cases that have results for this key\n has_strain_energy = [key in res[0] for res in strain_energies]\n if not any(has_strain_energy):\n return icase\n itrue = has_strain_energy.index(True)\n unused_ese0 = strain_energies[itrue][0]\n #times = ese0._times\n\n #fmt = '%g'\n #header = ''\n #form0 = ('Element Strain Energy', None, [])\n\n #op2.strain_energy[1]\n #type=StrainEnergyObject ntimes=3 nelements=16\n #energy, percent, density\n #modes = [1, 2, 3]\n\n nelements = self.nelements\n eids = self.element_ids\n\n for itime, unused_dt in enumerate(times):\n ese = np.full(nelements, np.nan, dtype='float32')\n percent = np.full(nelements, np.nan, dtype='float32')\n strain_energy_density = np.full(nelements, np.nan, dtype='float32')\n for istrain_energy, is_true in enumerate(has_strain_energy):\n if not is_true:\n continue\n resdict, name, unused_flag = strain_energies[istrain_energy]\n case = resdict[key]\n\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n if case.is_complex:\n continue\n\n data = case.data\n itotals = np.where(case.element[itime, :] == 100000000)[0]\n assert len(itotals) == 1, itotals\n itotal = itotals[0]\n\n eidsi2 = case.element[itime, :itotal]\n\n # find eids2i in eids\n i = np.searchsorted(eids, eidsi2)\n #if 0 and name == 'CELAS1': # pragma: no cover\n ## check that the elements were mapped correctly\n #eids_actual = self.element_ids[i]\n #for eid in eids_actual:\n #element = self.model.elements[eid]\n #assert element.type == name, element\n #assert np.all(eids_actual == eidsi2)\n\n if len(i) != len(np.unique(i)):\n msg = 'Strain Energy i%s=%s is not unique because there are missing elements' % (name, str(i))\n model.log.warning(msg)\n continue\n\n # verifies the try-except is what we think it is (missing elements)\n esei = data[itime, :itotal, 0]\n\n try:\n ese[i] = esei\n percent[i] = data[itime, :itotal, 1]\n strain_energy_density[i] = data[itime, :itotal, 2]\n except IndexError:\n model.log.warning('error reading Strain Energy')\n continue\n\n # helicopter.dat\n #CBEAM : 10\n #CQUAD4 : 11388\n #CROD : 544\n #CTRIA3 : 151\n # nelements = 12093\n\n if np.any(np.isfinite(ese)):\n ese_res = GuiResult(subcase_id, header='Strain Energy: ' + header,\n title='Strain Energy', data_format='%.3e',\n location='centroid', scalar=ese)\n percent_res = GuiResult(subcase_id, header='Percent of Total: '+ header,\n title='Percent of Total', data_format='%.3f',\n location='centroid', scalar=percent)\n cases[icase] = (ese_res, (subcase_id, 'Strain Energy'))\n cases[icase + 1] = (percent_res, (subcase_id, 'Percent'))\n\n form_dict[(key, itime)].append(('Strain Energy', icase, []))\n form_dict[(key, itime)].append(('Percent', icase + 1, []))\n icase += 2\n if np.any(np.isfinite(strain_energy_density)):\n sed_res = GuiResult(subcase_id, header='Strain Energy Density: ' + header,\n title='Strain Energy Density', data_format='%.3e',\n location='centroid', scalar=strain_energy_density)\n cases[icase] = (sed_res, (subcase_id, 'Strain Energy Density'))\n form_dict[(key, itime)].append(('Strain Energy Density', icase, []))\n icase += 1\n return icase\n\n def _create_op2_time_centroidal_force_arrays(self, model, nelements, key, itime,\n header_dict, keys_map):\n \"\"\"\n creates the following force outputs:\n - fx, fy, fz, mx, my, mz\n - thermal_load\n \"\"\"\n element_ids = self.element_ids\n fx = np.full(nelements, np.nan, dtype='float32') # axial\n fy = np.full(nelements, np.nan, dtype='float32') # shear_y\n fz = np.full(nelements, np.nan, dtype='float32') # shear_z\n\n rx = np.full(nelements, np.nan, dtype='float32') # torque\n ry = np.full(nelements, np.nan, dtype='float32') # bending_y\n rz = np.full(nelements, np.nan, dtype='float32') # bending_z\n\n is_element_on = np.zeros(nelements, dtype='float32') # torque\n unused_fmt = '%g'\n header = ''\n unused_form0 = ('Force', None, [])\n\n case = None\n found_force = False\n for res_type in (model.conrod_force, model.crod_force, model.ctube_force):\n if key in res_type:\n found_force = True\n case = res_type[key]\n if case.is_complex:\n continue\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n data = case.data\n if case.nonlinear_factor is None:\n unused_ntimes = data.shape[:1]\n eids = case.element\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n #eids_to_find = intersect1d(self.element_ids, eids)\n i = np.searchsorted(element_ids, eids)\n assert np.array_equal(element_ids[i], eids)\n fxi = data[itime, :, 0]\n rxi = data[itime, :, 1]\n if fxi.size != i.size:\n msg = 'fx.size=%s i.size=%s fx=%s eids_to_find=%s' % (\n fxi.size, i.size, fxi, eids)\n raise RuntimeError(msg)\n fx[i] = fxi\n rx[i] = rxi\n is_element_on[i] = 1.\n else:\n continue\n\n if key in model.cbar_force:\n found_force = True\n case = model.cbar_force[key] # type: np.ndarray\n if case.element_type == 34:\n ## CBAR-34\n if case.is_real:\n eids = case.element\n i = np.searchsorted(element_ids, eids)\n is_element_on[i] = 1.\n\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n #[bending_moment_a1, bending_moment_a2, bending_moment_b1, bending_moment_b2,\n # shear1, shear2, axial, torque]\n #fx[i] = case.data[:, :, 6]\n #fy[i] = case.data[:, :, 4]\n #fz[i] = case.data[:, :, 5]\n\n if i.size == 1:\n rxi = case.data[itime, :, 7].max()\n ryi = np.vstack([case.data[itime, :, 0], case.data[itime, :, 2]]).max()\n rzi = np.vstack([case.data[itime, :, 1], case.data[itime, :, 3]]).max()\n else:\n rxi = case.data[itime, :, 7]#.max(axis=0)\n ryi = np.vstack([case.data[itime, :, 0], case.data[itime, :, 2]]).max(axis=0)\n rzi = np.vstack([case.data[itime, :, 1], case.data[itime, :, 3]]).max(axis=0)\n unused_rzv = rzi\n\n # rza = array([case.data[itime, :, 1], case.data[itime, :, 3]])#.max(axis=0)\n # rzh = hstack([case.data[itime, :, 1], case.data[itime, :, 3]])#.max(axis=0)\n # print(rzv.shape, rzv.shape, rzv.shape)\n assert rxi.size == i.size, 'rx.size=%s i.size=%s rx=%s' % (rxi.size, i.size, rxi)\n assert ryi.size == i.size, 'ry.size=%s i.size=%s ry=%s' % (ryi.size, i.size, ryi)\n assert rzi.size == i.size, 'rz.size=%s i.size=%s rz=%s' % (rzi.size, i.size, rzi)\n\n rx[i] = rxi\n ry[i] = ryi\n rz[i] = rzi\n elif case.element_type == 100:\n ## CBAR-100\n eids = case.element\n ueids = np.unique(eids)\n\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n j = np.searchsorted(self.element_ids, ueids)\n di = j[1:-1] - j[0:-2]\n if len(di) == 0:\n # pload1\n self.log_error('Error loading CBAR-100 forces; failed slicing element_ids')\n else:\n is_element_on[j] = 1.\n\n if di.max() != 2:\n #print('di =', np.unique(di))\n # [station, bending_moment1, bending_moment2, shear1, shear2, axial, torque]\n ii = 0\n unused_eid_old = eids[0]\n fxi = defaultdict(list)\n fyi = defaultdict(list)\n fzi = defaultdict(list)\n rxi = defaultdict(list)\n ryi = defaultdict(list)\n rzi = defaultdict(list)\n for ii, eid in enumerate(eids):\n fxi[eid].append(case.data[:, ii, 5])\n fyi[eid].append(case.data[:, ii, 3])\n fzi[eid].append(case.data[:, ii, 4])\n\n rxi[eid].append(case.data[:, ii, 6])\n ryi[eid].append(case.data[:, ii, 1])\n rzi[eid].append(case.data[:, ii, 2])\n #if eidi == eid_old:\n # fx[ii] = array([case.data[:, j, 5], case.data[:, j, 5]]).max(axis=0)\n #else:\n for ii, eidi in zip(j, eids[j]):\n fx[ii] = max(fxi[eidi])\n fy[ii] = max(fyi[eidi])\n fz[ii] = max(fyi[eidi])\n rx[ii] = max(rxi[eidi])\n ry[ii] = max(ryi[eidi])\n rz[ii] = max(rzi[eidi])\n else:\n # [station, bending_moment1, bending_moment2, shear1, shear2, axial, torque]\n neids = len(np.unique(eids)) * 2\n if len(eids) != len(np.unique(eids)) * 2:\n msg = 'CBAR-100 Error: len(eids)=%s neids=%s' % (len(eids), neids)\n raise RuntimeError(msg)\n fx[i] = np.array(\n [case.data[itime, ::-1, 5],\n case.data[itime, 1::-1, 5]]).max(axis=0)\n fy[i] = np.array(\n [case.data[itime, ::-1, 3],\n case.data[itime, 1::-1, 3]]).max(axis=0)\n fz[i] = np.array(\n [case.data[itime, ::-1, 4],\n case.data[itime, 1::-1, 4]]).max(axis=0)\n rx[i] = np.array(\n [case.data[itime, ::-1, 6],\n case.data[itime, 1::-1, 6]]).max(axis=0)\n ry[i] = np.array(\n [case.data[itime, ::-1, 1],\n case.data[itime, 1::-1, 1]]).max(axis=0)\n rz[i] = np.array(\n [case.data[itime, ::-1, 2],\n case.data[itime, 1::-1, 2]]).max(axis=0)\n else:\n raise NotImplementedError(case)\n return found_force, fx, fy, fz, rx, ry, rz, is_element_on\n\n def _fill_op2_time_centroidal_force(self, cases, model: OP2,\n key: Tuple[Any, int], icase: int, itime: int,\n form_dict: Dict[Any, Any],\n #form_dict: Dict[Tuple[Any, int], Any],\n header_dict: Dict[Any, Any],\n keys_map: Dict[Any, Any]) -> int:\n \"\"\"\n Creates the time accurate force objects\n \"\"\"\n nelements = self.nelements\n out = self._create_op2_time_centroidal_force_arrays(\n model, nelements, key, itime, header_dict, keys_map)\n found_force, fx, fy, fz, rx, ry, rz, is_element_on = out\n\n #new_cases = True\n subcase_id = key[2]\n if found_force:\n fmt = '%.4f'\n # header = _get_nastran_header(case, dt, itime)\n\n #num_on = nelements\n num_off = 0\n if itime == 0 and is_element_on.min() == 0.0:\n icase = self.save_filtered_forces(key, itime, icase, is_element_on,\n subcase_id, cases, form_dict)\n\n is_fx = np.any(np.isfinite(fx)) and np.nanmin(fx) != np.nanmax(fx)\n is_fy = np.any(np.isfinite(fy)) and np.nanmin(fy) != np.nanmax(fy)\n is_fz = np.any(np.isfinite(fz)) and np.nanmin(fz) != np.nanmax(fz)\n\n is_rx = np.any(np.isfinite(rx)) and np.nanmin(rx) != np.nanmax(rx)\n #is_ry = np.any(np.isfinite(ry)) and np.nanmin(ry) != np.nanmax(ry)\n #is_rz = np.any(np.isfinite(rz)) and np.nanmin(rz) != np.nanmax(rz)\n if is_fx or is_rx and not num_off == nelements:\n # header = _get_nastran_header(case, dt, itime)\n header = header_dict[(key, itime)]\n if is_fx:\n fx_res = GuiResult(subcase_id, header=f'Axial: {header}', title='Axial',\n location='centroid', scalar=fx)\n form_dict[(key, itime)].append(('Axial', icase, []))\n cases[icase] = (fx_res, (subcase_id, 'Axial'))\n icase += 1\n\n if is_fy:\n fy_res = GuiResult(subcase_id, header=f'ShearY: {header}', title='ShearY',\n location='centroid', scalar=fy)\n form_dict[(key, itime)].append(('ShearY', icase, []))\n cases[icase] = (fy_res, (subcase_id, 'ShearY'))\n icase += 1\n\n if is_fz:\n fz_res = GuiResult(subcase_id, header=f'ShearZ: {header}', title='ShearZ',\n location='centroid', scalar=fz)\n form_dict[(key, itime)].append(('ShearZ', icase, []))\n cases[icase + 2] = (fz_res, (subcase_id, 'ShearZ'))\n icase += 1\n\n if is_rx:\n mx_res = GuiResult(subcase_id, header=f'Torsion: {header}', title='Torsion',\n location='centroid', scalar=rx)\n my_res = GuiResult(subcase_id, header=f'BendingY: {header}', title='BendingY',\n location='centroid', scalar=ry)\n mz_res = GuiResult(subcase_id, header=f'BendingZ: {header}', title='BendingZ',\n location='centroid', scalar=rz)\n\n form_dict[(key, itime)].append(('Torsion', icase, []))\n form_dict[(key, itime)].append(('BendingY', icase + 1, []))\n form_dict[(key, itime)].append(('BendingZ', icase + 2, []))\n cases[icase] = (mx_res, (subcase_id, 'Torsion'))\n cases[icase + 1] = (my_res, (subcase_id, 'BendingY'))\n cases[icase + 2] = (mz_res, (subcase_id, 'BendingZ'))\n icase += 3\n\n is_axial = np.full(nelements, -1, dtype='int8')\n is_shear_y = np.full(nelements, -1, dtype='int8')\n is_shear_z = np.full(nelements, -1, dtype='int8')\n is_torsion = np.full(nelements, -1, dtype='int8')\n is_bending_y = np.full(nelements, -1, dtype='int8')\n is_bending_z = np.full(nelements, -1, dtype='int8')\n\n arrays = [\n (is_axial, fx), (is_shear_y, fy), (is_shear_z, fz),\n (is_torsion, rx), (is_bending_y, ry), (is_bending_z, rz),\n ]\n for is_array, force in arrays:\n iany = np.where(is_element_on)\n iwhere = np.where(np.abs(force) > 0.0)[0]\n is_array[iany] = 0\n is_array[iwhere] = 1\n #is_axial[np.where(np.abs(fx) > 0.0)[0]] = 1\n #is_shear_y[np.where(np.abs(fy) > 0.0)[0]] = 1\n #is_shear_z[np.where(np.abs(fz) > 0.0)[0]] = 1\n #is_torsion[np.where(np.abs(rx) > 0.0)[0]] = 1\n #is_bending_y[np.where(np.abs(ry) > 0.0)[0]] = 1\n #is_bending_z[np.where(np.abs(rz) > 0.0)[0]] = 1\n #is_bending[where(abs(rx) > 0.0)[0]] = 1\n\n is_fx_res = GuiResult(subcase_id, header='IsAxial', title='IsAxial',\n location='centroid', scalar=is_axial, data_format=fmt,\n mask_value=-1)\n is_fy_res = GuiResult(subcase_id, header='IsShearY', title='IsShearY',\n location='centroid', scalar=is_shear_y, data_format=fmt,\n mask_value=-1)\n is_fz_res = GuiResult(subcase_id, header='IsShearZ', title='IsShearZ',\n location='centroid', scalar=is_shear_z, data_format=fmt,\n mask_value=-1)\n is_mx_res = GuiResult(subcase_id, header='IsTorsion', title='IsTorsion',\n location='centroid', scalar=is_torsion, data_format=fmt,\n mask_value=-1)\n is_my_res = GuiResult(subcase_id, header='IsBendingY', title='IsBendingY',\n location='centroid', scalar=is_bending_y, data_format=fmt,\n mask_value=-1)\n is_mz_res = GuiResult(subcase_id, header='IsBendingZ', title='IsBendingZ',\n location='centroid', scalar=is_bending_z, data_format=fmt,\n mask_value=-1)\n\n cases[icase] = (is_fx_res, (subcase_id, 'IsAxial'))\n cases[icase + 1] = (is_fy_res, (subcase_id, 'IsShearY'))\n cases[icase + 2] = (is_fz_res, (subcase_id, 'IsShearZ'))\n cases[icase + 3] = (is_mx_res, (subcase_id, 'IsTorsion'))\n cases[icase + 4] = (is_my_res, (subcase_id, 'IsBendingY'))\n cases[icase + 5] = (is_mz_res, (subcase_id, 'IsBendingZ'))\n\n form_dict[(key, itime)].append(('IsAxial', icase, []))\n form_dict[(key, itime)].append(('IsShearY', icase + 1, []))\n form_dict[(key, itime)].append(('IsShearZ', icase + 2, []))\n form_dict[(key, itime)].append(('IsTorsion', icase + 3, []))\n form_dict[(key, itime)].append(('IsBendingY', icase + 4, []))\n form_dict[(key, itime)].append(('IsBendingZ', icase + 5, []))\n icase += 6\n return icase\n\n def save_filtered_forces(self, key, itime, icase, is_element_on, subcase_id, cases, form_dict):\n ioff = np.where(is_element_on == 0)[0]\n num_off = len(ioff)\n\n eids_off = []\n for eid in self.element_ids[ioff]:\n element = self.model.elements[eid]\n if element.type not in ['CTRIA3', 'CQUAD4', 'CHEXA', 'CPENTA', 'CTETRA',\n 'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', 'CSHEAR',\n 'CQUADR', 'CTRIAR', 'CQUAD8', 'CTRIA6', 'CVISC',\n 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CTUBE',\n 'CONROD', 'CROD']:\n eids_off.append(eid)\n for eid in eids_off[:20]:\n element = self.model.elements[eid]\n print(element.rstrip())\n\n if eids_off:\n print('force_eids_off = %s; n=%s' % (eids_off, num_off))\n self.log_error('force_eids_off = %s; n=%s' % (eids_off, num_off))\n force_on_res = GuiResult(subcase_id, header='Force - IsElementOn',\n title='Force\\nIsElementOn',\n location='centroid', scalar=is_element_on)\n cases[icase] = (force_on_res, (subcase_id, 'Force\\nIsElementOn'))\n form_dict[(key, itime)].append(('Force - IsElementOn', icase, []))\n #num_on -= num_off\n icase += 1\n return icase\n\n\n def _fill_op2_time_centroidal_composite_stress(self, cases, model, key, icase: int, itime: int,\n form_dict: Dict[Any, Any],\n header_dict: Dict[Any, Any],\n keys_map: Dict[Any, Any],\n is_stress: int=True) -> int:\n nelements = self.nelements\n #oxx = np.full(nelements, np.nan, dtype='float32')\n #oyy = np.full(nelements, np.nan, dtype='float32')\n\n #txy = np.full(nelements, np.nan, dtype='float32')\n #tyz = np.full(nelements, np.nan, dtype='float32')\n #txz = np.full(nelements, np.nan, dtype='float32')\n\n #max_principal = np.full(nelements, np.nan, dtype='float32') # max\n #min_principal = np.full(nelements, np.nan, dtype='float32') # min\n #ovm = np.full(nelements, np.nan, dtype='float32')\n\n if is_stress:\n stress_obj = self.stress[key]\n word = 'Stress'\n fmt = '%.3f'\n else:\n stress_obj = self.strain[key]\n word = 'Strain'\n fmt = '%.4e'\n\n vm_word = None\n if len(stress_obj.composite_data_dict):\n print(stress_obj)\n out = stress_obj.set_composite_stress_by_layer(\n key, itime, nelements, header_dict,\n )\n vm_word, element_ids, oxx, oyy, txy, tyz, txz, max_principal, min_principal, ovm = out\n if vm_word is None:\n return icase\n\n #form0 = (word, None, [])\n #unused_formis = form0[2]\n subcase_id = key[2]\n if np.any(np.isfinite(oxx)):\n header = header_dict[(key, itime)]\n oxx_res = GuiResultIDs(subcase_id, header=word + f'XX: {header}', title=word + 'XX',\n location='centroid',\n ids=element_ids, scalar=oxx, data_format=fmt)\n cases[icase] = (oxx_res, (subcase_id, word + 'XX'))\n form_dict[(key, itime)].append((word + 'XX', icase, []))\n icase += 1\n return icase\n\n def _fill_op2_centroidal_stress(self, cases, model, times, key, icase_old,\n form_dict, header_dict, keys_map) -> int:\n \"\"\"Creates the time accurate stress objects\"\"\"\n icase = icase_old\n settings = self.settings # type: Settings\n if settings.nastran_stress:\n for itime, unused_dt in enumerate(times):\n # shell stress\n try:\n icase = self._fill_op2_time_centroidal_stress(\n cases, model, key, icase_old, itime, form_dict, header_dict, keys_map,\n is_stress=True)\n except IndexError:\n self.log.error('problem getting stress...')\n break\n if icase == icase_old:\n return icase\n\n #self.settings.nastran_plate_stress\n eids = self.element_ids\n if settings.nastran_plate_stress:\n icase = get_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n icase = get_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True,\n prefix='modal_contribution',\n )\n\n if settings.nastran_composite_plate_stress:\n icase = get_composite_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map,\n self.stress[key].composite_data_dict, self.log, is_stress=True)\n\n if settings.nastran_rod_stress:\n icase = get_rod_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n if settings.nastran_bar_stress:\n icase = get_bar_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n if settings.nastran_beam_stress:\n icase = get_beam_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n\n icase = get_solid_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n icase = get_spring_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=True)\n\n return icase\n\n\n def _fill_op2_centroidal_force(self, cases, model, times, key, icase,\n force_dict, header_dict, keys_map) -> int:\n \"\"\"Creates the time accurate force objects\"\"\"\n\n settings = self.settings # type: Settings\n if settings.nastran_force:\n for itime, unused_dt in enumerate(times):\n try:\n icase = self._fill_op2_time_centroidal_force(\n cases, model, key, icase, itime,\n force_dict, header_dict, keys_map)\n except IndexError:\n self.log.error('problem getting force...')\n break\n\n eids = self.element_ids\n if settings.nastran_bar_force:\n icase = get_bar_force(\n eids, cases, model, times, key, icase,\n force_dict, header_dict, keys_map)\n\n if settings.nastran_beam_force:\n #icase = get_beam_force(\n #eids, cases, model, times, key, icase,\n #force_dict, header_dict, keys_map)\n if key in model.cbeam_force:\n model.log.warning('skipping nastran beam force')\n\n if settings.nastran_plate_force:\n icase = get_plate_force(\n eids, cases, model, times, key, icase,\n force_dict, header_dict, keys_map)\n #if key in model.ctria3_force or key in model.cquad4_force:\n #model.log.warning('skipping nastran plate force')\n\n if settings.nastran_spring_force:\n icase = get_spring_force(\n eids, cases, model, times, key, icase,\n force_dict, header_dict, keys_map)\n #if any([key in force for force in\n #[model.celas1_force, model.celas2_force,\n #model.celas3_force, model.celas4_force]]):\n #model.log.warning('skipping nastran spring force')\n\n if settings.nastran_cbush_force:\n if key in model.cbush_force:\n model.log.warning('skipping nastran bush force')\n #if key in model.bush1d_force:\n #model.log.warning('skipping nastran bush1d force')\n\n if settings.nastran_gap_force:\n if key in model.cgap_force:\n model.log.warning('skipping nastran gap force')\n\n return icase\n\n def _fill_op2_centroidal_strain(self, cases, model, times, key, icase,\n form_dict, header_dict, keys_map) -> int:\n \"\"\"Creates the time accurate strain objects\"\"\"\n settings = self.settings # type: Settings\n if settings.nastran_strain:\n for itime, unused_dt in enumerate(times):\n try:\n icase = self._fill_op2_time_centroidal_stress(\n cases, model, key, icase, itime, form_dict, header_dict, keys_map,\n is_stress=False)\n except IndexError:\n self.log.error('problem getting strain...')\n break\n\n eids = self.element_ids\n if settings.nastran_composite_plate_strain:\n icase = get_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n icase = get_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False,\n prefix='modal_contribution',\n )\n\n if settings.nastran_composite_plate_strain:\n icase = get_composite_plate_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map,\n self.strain[key].composite_data_dict, self.log, is_stress=False)\n\n if settings.nastran_rod_strain:\n icase = get_rod_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n if settings.nastran_bar_strain:\n icase = get_bar_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n if settings.nastran_beam_strain:\n icase = get_beam_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n\n icase = get_solid_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n icase = get_spring_stress_strains(\n eids, cases, model, times, key, icase,\n form_dict, header_dict, keys_map, is_stress=False)\n\n return icase\n\n def _fill_op2_time_centroidal_stress(self, cases, model: OP2,\n key, icase: int, itime: int,\n form_dict: Dict[Any, Any],\n header_dict: Dict[Any, Any],\n keys_map: Dict[Any, Any],\n is_stress=True) -> int:\n \"\"\"Creates the time accurate stress objects\"\"\"\n\n #new_cases = True\n #assert isinstance(subcase_id, int), type(subcase_id)\n assert isinstance(icase, int), icase\n #assert isinstance(itime, int), type(itime)\n assert is_stress in [True, False], is_stress\n eids = self.element_ids\n assert len(eids) > 0, eids\n nelements = self.nelements\n\n is_element_on = np.zeros(nelements, dtype='int8') # is the element supported\n oxx = np.full(nelements, np.nan, dtype='float32')\n oyy = np.full(nelements, np.nan, dtype='float32')\n ozz = np.full(nelements, np.nan, dtype='float32')\n\n txy = np.full(nelements, np.nan, dtype='float32')\n tyz = np.full(nelements, np.nan, dtype='float32')\n txz = np.full(nelements, np.nan, dtype='float32')\n\n max_principal = np.full(nelements, np.nan, dtype='float32') # max\n mid_principal = np.full(nelements, np.nan, dtype='float32') # mid\n min_principal = np.full(nelements, np.nan, dtype='float32') # min\n #max_shear = np.full(nelements, np.nan, dtype='float32')\n ovm = np.full(nelements, np.nan, dtype='float32')\n\n vm_word = None\n #-------------------------------------------------------------\n #vm_word = get_spring_stress_strain(\n #model, key, is_stress, vm_word, itime,\n #oxx, txy,\n #max_principal, min_principal, ovm, is_element_on,\n #eids, header_dict, keys_map)\n\n #-------------------------------------------------------------\n vm_word = get_rod_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx, txy,\n max_principal, min_principal, ovm, is_element_on,\n eids, header_dict, keys_map)\n\n vm_word = get_bar_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx,\n max_principal, min_principal, ovm, is_element_on,\n eids, header_dict, keys_map)\n\n vm_word = get_bar100_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx,\n max_principal, min_principal, ovm, is_element_on,\n eids, header_dict, keys_map)\n\n vm_word = get_beam_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx,\n max_principal, min_principal, ovm, is_element_on,\n header_dict, keys_map, self.eid_map)\n #-------------------------------------------------------------\n vm_word = get_plate_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx, oyy, txy, max_principal, min_principal, ovm, is_element_on,\n eids, header_dict, keys_map)\n\n #vm_word = get_shear_stress_strain(\n #model, key, is_stress, vm_word, itime,\n #oxx, txy,\n #max_principal, min_principal, ovm, is_element_on,\n #eids, header_dict, keys_map)\n\n if is_stress:\n stress_obj = self.stress[key]\n else:\n stress_obj = self.strain[key]\n\n if len(stress_obj.composite_data_dict):\n str(stress_obj)\n vm_word = stress_obj.set_composite_stress_old(\n key, itime, oxx, oyy, txy, tyz, txz,\n max_principal, min_principal, ovm,\n is_element_on, header_dict,\n )\n\n vm_word = get_solid_stress_strain(\n model, key, is_stress, vm_word, itime,\n oxx, oyy, ozz, txy, tyz, txz,\n max_principal, mid_principal, min_principal, ovm, is_element_on,\n eids, header_dict, keys_map)\n\n if is_stress:\n word = 'Stress'\n fmt = '%.3f'\n else:\n word = 'Strain'\n fmt = '%.4e'\n\n # a form is the table of output...\n # Subcase 1 <--- formi - form_isubcase\n # Time 1\n # Stress <--- form0 - the root level\n # oxx <--- formis - form_itime_stress\n # oyy\n # ozz\n\n if vm_word is None:\n #print('vm_word is None')\n return icase\n\n form0 = (word, None, [])\n unused_formis = form0[2]\n subcase_id = key[2]\n header = header_dict[(key, itime)]\n formi = []\n form_dict[(key, itime)].append(('Combined ' + word, None, formi))\n\n if is_stress and itime == 0:\n if is_element_on.min() == 0: # if all elements aren't on\n print_empty_elements(self.model, eids, is_element_on, self.log_error)\n\n is_element_on = np.isfinite(oxx)\n is_element_on = is_element_on.astype('|i1')\n stress_res = GuiResult(\n subcase_id, header=f'Stress - isElementOn: {header}', title='Stress\\nisElementOn',\n location='centroid', scalar=is_element_on, mask_value=0, data_format=fmt)\n\n cases[icase] = (stress_res, (subcase_id, 'Stress - isElementOn'))\n formi.append(('Stress - IsElementOn', icase, []))\n icase += 1\n\n #print('max/min', max_principal.max(), max_principal.min())\n # header = _get_nastran_header(case, dt, itime)\n if np.any(np.isfinite(oxx)):\n oxx_res = GuiResult(subcase_id, header=word + f'XX: {header}', title=word + 'XX',\n location='centroid', scalar=oxx, data_format=fmt)\n cases[icase] = (oxx_res, (subcase_id, word + 'XX'))\n formi.append((word + 'XX', icase, []))\n icase += 1\n\n if np.any(np.isfinite(oyy)):\n oyy_res = GuiResult(subcase_id, header=word + f'YY: {header}', title=word + 'YY',\n location='centroid', scalar=oyy, data_format=fmt)\n cases[icase] = (oyy_res, (subcase_id, word + 'YY'))\n formi.append((word + 'YY', icase, []))\n icase += 1\n\n if np.any(np.isfinite(ozz)):\n ozz_res = GuiResult(subcase_id, header=word + f'ZZ: {header}', title=word + 'ZZ',\n location='centroid', scalar=ozz, data_format=fmt)\n cases[icase] = (ozz_res, (subcase_id, word + 'ZZ'))\n formi.append((word + 'ZZ', icase, []))\n icase += 1\n\n if np.any(np.isfinite(txy)):\n oxy_res = GuiResult(subcase_id, header=word + f'XY: {header}', title=word + 'XY',\n location='centroid', scalar=txy, data_format=fmt)\n cases[icase] = (oxy_res, (subcase_id, word + 'XY'))\n formi.append((word + 'XY', icase, []))\n icase += 1\n\n if np.any(np.isfinite(tyz)):\n oyz_res = GuiResult(subcase_id, header=word + f'YZ: {header}', title=word + 'YZ',\n location='centroid', scalar=tyz, data_format=fmt)\n cases[icase] = (oyz_res, (subcase_id, word + 'YZ'))\n formi.append((word + 'YZ', icase, []))\n icase += 1\n\n if np.any(np.isfinite(txz)):\n oxz_res = GuiResult(subcase_id, header=word + f'XZ: {header}', title=word + 'XZ',\n location='centroid', scalar=txz, data_format=fmt)\n cases[icase] = (oxz_res, (subcase_id, word + 'XZ'))\n formi.append((word + 'XZ', icase, []))\n icase += 1\n\n if np.any(np.isfinite(max_principal)):\n maxp_res = GuiResult(subcase_id, header=f'MaxPrincipal: {header}', title='MaxPrincipal',\n location='centroid', scalar=max_principal, data_format=fmt)\n cases[icase] = (maxp_res, (subcase_id, 'MaxPrincipal'))\n formi.append(('Max Principal', icase, []))\n icase += 1\n\n if np.any(np.isfinite(mid_principal)):\n midp_res = GuiResult(subcase_id, header=f'MidPrincipal: {header}', title='MidPrincipal',\n location='centroid', scalar=mid_principal, data_format=fmt)\n cases[icase] = (midp_res, (subcase_id, 'MidPrincipal'))\n formi.append(('Mid Principal', icase, []))\n icase += 1\n\n if np.any(np.isfinite(min_principal)):\n minp_res = GuiResult(subcase_id, header=f'MinPrincipal: {header}', title='MinPrincipal',\n location='centroid', scalar=min_principal, data_format=fmt)\n cases[icase] = (minp_res, (subcase_id, 'MinPrincipal'))\n formi.append(('Min Principal', icase, []))\n icase += 1\n\n if vm_word is not None:\n ovm_res = GuiResult(subcase_id, header=f'{vm_word}: {header}', title=vm_word,\n location='centroid', scalar=ovm, data_format=fmt)\n cases[icase] = (ovm_res, (subcase_id, vm_word))\n formi.append((vm_word, icase, []))\n icase += 1\n\n #, case, header, form0\n return icase\n\ndef fill_responses(cases, model: OP2, icase):\n \"\"\"adds the optimization responses\"\"\"\n form_optimization = []\n #fractional_mass_response = model.op2_results.responses.fractional_mass_response\n #if fractional_mass_response is not None:\n #print(fractional_mass_response)\n\n des_filename = model.des_filename\n if os.path.exists(des_filename):\n des_desvars = read_des_filename(des_filename)\n if des_desvars:\n subcase_id = 0\n #eids = des_desvars['eids']\n fractional_mass = des_desvars['fractional_mass']\n minp_res = GuiResult(subcase_id, header='Fractional Mass', title='% Mass',\n location='centroid', scalar=fractional_mass, ) # data_format=fmt\n cases[icase] = (minp_res, (subcase_id, 'Fractional Mass'))\n form_optimization.append(('Fractional Mass', icase, []))\n icase += 1\n #f06_filename = model.f06_filename\n #print('f06_filename =', f06_filename)\n #from pyNastran.f06.dev.read_sol_200 import read_sol_200\n #read_sol_200(f06_filename)\n\n #desvars = model.op2_results.responses.desvars # type: Desvars\n #if desvars is not None:\n #itop = np.where(desvars.label == 'TOPVAR')[0]\n #if len(itop):\n #print(desvars)\n #print('itop =', itop)\n #asdf\n #form_optimization.append(('TOPVAR', icase, []))\n\n #minp_res = GuiResult(subcase_id, header=f'MinPrincipal: {header}', title='MinPrincipal',\n #location='centroid', scalar=min_principal, data_format=fmt)\n #cases[icase] = (minp_res, (subcase_id, 'MinPrincipal'))\n\n #desvars.internal_id = np.zeros(ndesvars, dtype='int32')\n #desvars.desvar_id = np.zeros(ndesvars, dtype='int32')\n #desvars.label = np.zeros(ndesvars, dtype='|U8')\n #desvars.lower = np.zeros(ndesvars, dtype='float32')\n #desvars.upper = np.zeros(ndesvars, dtype='float32')\n #desvars.delxv = np.zeros(ndesvars, dtype='float32')\n #desvars.dunno = np.zeros(ndesvars, dtype='float32')\n return icase, form_optimization\n\ndef _fill_nastran_displacements(cases, model: OP2, key, icase: int,\n form_dict, header_dict, keys_map,\n xyz_cid0,\n nnodes: int, node_ids, log, dim_max: float=1.0,\n prefix: str='') -> int:\n \"\"\"\n loads the nodal dispalcements/velocity/acceleration/eigenvector/spc/mpc forces\n \"\"\"\n if prefix == 'acoustic':\n results = model.op2_results.acoustic\n displacement_like = [\n (results.displacements, 'Acoustic Displacement', True),\n ]\n elif prefix == '':\n displacement_like = [\n # slot, name, deflects\n\n # TODO: what is a velocity/acceleration?\n # is it a fringe, displacement, force?\n (model.displacements, 'Displacement', True),\n (model.velocities, 'Velocity', False),\n (model.accelerations, 'Acceleration', False),\n (model.eigenvectors, 'Eigenvectors', True),\n (model.spc_forces, 'SPC Forces', False),\n (model.mpc_forces, 'MPC Forces', False),\n\n (model.contact_forces, 'Contact Forces', False),\n (model.glue_forces, 'Glue Forces', False),\n\n (model.load_vectors, 'LoadVectors', False),\n (model.applied_loads, 'AppliedLoads', False),\n (model.force_vectors, 'ForceVectors', False),\n ]\n else: # pragma: no cover\n raise NotImplementedError(prefix)\n\n for (result, name, deflects) in displacement_like:\n if key not in result:\n continue\n for t123_offset in [0, 3]:\n #if t123_offset == 3:\n #continue\n try:\n icase = _fill_nastran_ith_displacement(\n result, name, deflects, t123_offset,\n cases, model, key, icase,\n form_dict, header_dict, keys_map,\n xyz_cid0,\n nnodes, node_ids, log, dim_max=dim_max)\n except ValueError:\n if not t123_offset == 3:\n raise\n log.error('skipping %s result; t123_offset=%s; type=%s' % (\n name, t123_offset, result[key].__class__.__name__))\n return icase\n\ndef _fill_nastran_ith_displacement(result, name: str, deflects: bool, t123_offset,\n cases, model: OP2, key, icase: int,\n form_dict: Dict[Tuple[Any, Any], str],\n header_dict: Dict[Tuple[Any, Any], str],\n keys_map: Dict[str, Any],\n xyz_cid0,\n nnodes: int, node_ids, log, dim_max: float=1.0) -> int:\n \"\"\"helper for ``_fill_nastran_displacements`` to unindent the code a bit\"\"\"\n if t123_offset == 0:\n title1 = name + ' T_XYZ'\n else:\n assert t123_offset == 3, t123_offset\n title1 = name + ' R_XYZ'\n #title2 = name + ' R_XYZ'\n\n case = result[key]\n subcase_idi = case.isubcase\n if not hasattr(case, 'data'):\n print('str(%s) has no data...' % case.__class.__name__)\n return icase\n\n if not case.is_sort1:\n log.warning('Skipping because SORT2\\n' + str(case))\n return icase\n\n t123, tnorm, ntimes = _get_t123_tnorm(case, node_ids, nnodes,\n t123_offset=t123_offset)\n\n titles = []\n scales = []\n headers = []\n #if deflects:\n if deflects:\n nastran_res = DisplacementResults(subcase_idi, titles, headers,\n xyz_cid0, t123, tnorm,\n scales,\n uname=name)\n\n #dmax = []\n for itime in range(ntimes):\n dt = case._times[itime]\n\n #if name == 'Displacement':\n # (6673, )\n #normiii = np.linalg.norm(t123[itime, :, :], axis=1)\n #print(normiii.shape)\n #print('Displacement; itime=%s time=%s tnorm=%s' % (\n #itime, dt, normiii.max()))\n #dmax.append(normiii.max())\n\n tnorm_abs_max = get_tnorm_abs_max(case, t123, tnorm, itime)\n\n # mode = 2; freq = 75.9575 Hz\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n #if tnorm_abs_max == 0.0:\n #scale = self.displacement_scale_factor\n #else:\n #scale = self.displacement_scale_factor / tnorm_abs_max\n\n scale = dim_max\n if tnorm_abs_max > 0.0:\n scale = dim_max / tnorm_abs_max * 0.10\n scales.append(scale)\n titles.append(title1)\n headers.append(f'{title1}: {header}')\n cases[icase] = (nastran_res, (itime, title1)) # do I keep this???\n formii = (title1, icase, [])\n form_dict[(key, itime)].append(formii)\n icase += 1\n\n #if name == 'Displacement':\n # Displacement; itime=361 time=3.61 tnorm=1.46723\n #print('dmax = ', max(dmax))\n #pass\n nastran_res.save_defaults()\n else:\n nastran_res = ForceTableResults(subcase_idi, titles, headers,\n t123, tnorm,\n scales, #deflects=deflects,\n uname=name)\n for itime in range(ntimes):\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n #tnorm_abs_max = get_tnorm_abs_max(case, t123, tnorm, itime)\n #tnorm_abs_max = tnorm.max()\n scale = 1.\n scales.append(scale)\n titles.append(title1)\n headers.append(f'{title1}: {header}')\n cases[icase] = (nastran_res, (itime, title1)) # do I keep this???\n formii = (title1, icase, [])\n form_dict[(key, itime)].append(formii)\n icase += 1\n nastran_res.save_defaults()\n return icase\n\ndef _fill_nastran_temperatures(cases, model: OP2, key, icase: int,\n form_dict, header_dict, keys_map, nnodes: int, log) -> int:\n \"\"\"loads the nodal temperatures\"\"\"\n #nids = self.node_ids\n temperature_like = [\n (model.temperatures, 'Temperature'),\n ]\n for (result, name) in temperature_like:\n if key not in result:\n continue\n case = result[key]\n subcase_idi = case.isubcase\n if not hasattr(case, 'data'):\n continue\n\n if not case.is_sort1:\n log.warning('Skipping because SORT2\\n' + str(case))\n continue\n assert case.is_sort1, case.is_sort1\n\n ntimes = case.ntimes\n for itime in range(ntimes):\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n\n loads = case.data[itime, :, :]\n nxyz = norm(loads[:, :3], axis=1)\n assert len(nxyz) == nnodes, 'len(nxyz)=%s nnodes=%s' % (\n len(nxyz), nnodes)\n\n temp_res = GuiResult(subcase_idi, header=f'{name}: {header}', title=name,\n location='node', scalar=loads[:, 0])\n cases[icase] = (temp_res, (0, name))\n form_dict[(key, itime)].append((name, icase, []))\n icase += 1\n return icase\n\ndef print_empty_elements(model, element_ids, is_element_on, log_error):\n \"\"\"prints the first 20 elements that aren't supportedas part of the stress results\"\"\"\n ioff = np.where(is_element_on == 0)[0]\n eids_off = []\n for eid in element_ids[ioff]:\n element = model.elements[eid]\n if element.type not in ['CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CVISC']:\n eids_off.append(eid)\n\n print('stress_eids_off = %s' % np.array(element_ids[ioff]))\n log_error('stress_eids_off = %s' % element_ids[ioff])\n\n for eid in eids_off[:20]:\n element = model.elements[eid]\n print(element.rstrip())\n print('-----------------------------------')\n\n\ndef _get_t123_tnorm(case, nids, nnodes: int, t123_offset: int=0):\n \"\"\"\n helper method for _fill_op2_oug_oqg\n\n Parameters\n ----------\n case : DisplacementArray, ForceArray, etc.\n the OP2 result object???\n nids : (nnodes,) int ndarray\n the nodes in the model???\n nnodes : int\n the number of nodes in the model???\n t123_offset : int; default=0\n 0 : translations / forces\n 3 : rotations / moments\n\n Returns\n -------\n t123 : (ntimes, nnodes, 3) float ndarray\n the translations or rotations\n tnorm : (ntimes, 3) float ndarray\n ???\n ntimes : int\n number of times\n\n \"\"\"\n assert case.is_sort1, case.is_sort1\n\n itime0 = 0\n t1 = case.data[itime0, :, 0]\n ndata = t1.shape[0]\n if nnodes != ndata:\n #print('nnodes=%s ndata=%s' % (nnodes, ndata))\n nidsi = case.node_gridtype[:, 0]\n #assert len(nidsi) == nnodes, 'nidsi=%s nnodes=%s' % (nidsi, nnodes)\n j = np.searchsorted(nids, nidsi) # searching for nidsi\n\n try:\n if not np.allclose(nids[j], nidsi):\n msg = 'nids[j]=%s nidsi=%s' % (nids[j], nidsi)\n raise RuntimeError(msg)\n except IndexError:\n msg = 'node_ids = %s\\n' % list(nids)\n msg += 'nidsi in disp = %s\\n' % list(nidsi)\n raise IndexError(msg)\n\n # (itime, nnodes, xyz)\n # (901, 6673, 3)\n t123 = case.data[:, :, t123_offset:t123_offset+3]\n ntimes = case.ntimes\n\n if nnodes != ndata:\n dtype = t123.dtype.name\n t123i = np.zeros((ntimes, nnodes, 3), dtype=dtype)\n t123i[:, j, :] = t123\n t123 = t123i\n\n # (itime, nnodes, xyz)\n # tnorm (901, 3)\n tnorm = norm(t123, axis=2) # I think this is wrong...\n #print('tnorm.shape ', tnorm.shape)\n assert len(tnorm) == t123.shape[0]\n else:\n # (itime, nnodes, xyz)\n # tnorm (901, 3)\n\n # float32s are apparently buggy in numpy if you have small numbers\n # see models/elements/loadstep_elememnts.op2\n try:\n tnorm = norm(t123, axis=1)\n except FloatingPointError:\n dtype_map = {\n 'float32': 'float64',\n 'complex64': 'complex128',\n }\n dtype = dtype_map[t123.dtype.name]\n t123 = t123.astype(dtype=dtype)\n tnorm = norm(t123, axis=1)\n\n #print('skipping %s' % name)\n #print(t123.max(axis=1))\n #for itime, ti in enumerate(t123):\n #print('itime=%s' % itime)\n #print(ti.tolist())\n assert len(tnorm) == t123.shape[0]\n\n assert t123.shape[0] == ntimes, 'shape=%s expected=(%s, %s, 3)' % (t123.shape, ntimes, nnodes)\n assert t123.shape[1] == nnodes, 'shape=%s expected=(%s, %s, 3)' % (t123.shape, ntimes, nnodes)\n return t123, tnorm, ntimes\n\n\ndef _get_times(model, key):\n \"\"\"\n Get the times/frequencies/eigenvalues/loadsteps used on a given\n subcase\n \"\"\"\n table_types = model.get_table_types()\n is_real = True\n is_data = False\n is_static = False\n times = None\n for table_type in table_types:\n if not model.has_result(table_type) or table_type.startswith('responses.'):\n #model.log.debug('no table_type=%s' % table_type)\n continue\n\n table = model.get_result(table_type)\n if len(table) == 0:\n continue\n #print(key, table, type(table))\n\n if key in table:\n is_data = True\n case = table[key]\n #print(case)\n is_real = case.is_real\n\n # you're presumably looking here because of a bug\n # are you sure the keys are the right length?\n #print(\"is_real=%r nonlinear_factor=%r _times=%s\" % (\n #is_real, case.nonlinear_factor, case._times))\n if case.nonlinear_factor is not None:\n times = case._times\n is_static = False\n else:\n is_static = True\n times = np.zeros(1, dtype='int32')\n #print('times = ', times)\n break\n #return is_data, is_static, is_real, times\n return is_data, is_static, is_real, times\n\ndef get_tnorm_abs_max(case, t123, tnorm, itime):\n \"\"\"\n The normalization value is consistent for static, frequency, transient,\n and load step cases, but is independent for modal cases.\n \"\"\"\n if case.analysis_code in [1, 5, 6, 10, 11]:\n # dependent\n # 1-statics\n # 5-frequency\n # 6-transient\n # 10-nonlinear statics\n # 11-old nonlinear statics\n tnorm_abs_max = tnorm.max()\n elif case.analysis_code in [2, 7, 8, 9]:\n # independent\n # 2-eigenvectors\n # 7-pre-buckling\n # 8-post-buckling\n # 9-complex eigenvalues\n tnorm_abs_max = np.linalg.norm(t123[itime, :, :], axis=1).max()\n else:\n raise NotImplementedError(f'analysis_code={case.analysis_code}\\ncase:\\n{case}')\n return tnorm_abs_max\n\ndef read_des_filename(des_filename):\n \"\"\"\n DESIGN CYCLE : 30\n 1\n Topology Optimization Element Density Distribution\n Total number of element 3912\n 1115 0\n 0.1408992E-01\n 1116 0\n 0.1628276E-01\n \"\"\"\n with open(des_filename, 'r') as des_file:\n lines = des_file.readlines()\n\n i = 0\n word, ncycles_str = lines[0].split(':')\n word = word.strip()\n assert word == 'DESIGN CYCLE'\n unused_ncycles = int(ncycles_str)\n i += 3\n assert lines[i].startswith('Total number of element'), lines[i]\n nelements = int(lines[i].split()[-1])\n i += 1\n\n eids = []\n fractional_mass = []\n for unused_ielement in range(nelements):\n #print(lines[i].strip())\n eid, zero = lines[i].split()\n frac = float(lines[i+1])\n assert zero == '0', lines[i].strip()\n eids.append(eid)\n fractional_mass.append(frac)\n i += 2\n eids = np.array(eids, dtype='int32')\n fractional_mass = np.array(fractional_mass, dtype='float32')\n desvars = {\n 'eids' : eids,\n 'fractional_mass' : fractional_mass,}\n return desvars\n\n\ndef _get_stress_table_types() -> List[str]: # pragma: no cover\n \"\"\"\n Gets the list of Nastran stress objects that the GUI supports\n \"\"\"\n table_types = [\n # OES - tCode=5 thermal=0 s_code=0,1 (stress/strain)\n # OES - CELAS1/CELAS2/CELAS3/CELAS4 stress\n 'celas1_stress',\n 'celas2_stress',\n 'celas3_stress',\n 'celas4_stress',\n\n # OES - CELAS1/CELAS2/CELAS3/CELAS4 strain\n 'celas1_strain',\n 'celas2_strain',\n 'celas3_strain',\n 'celas4_strain',\n\n # OES - isotropic CROD/CONROD/CTUBE stress\n 'crod_stress',\n 'conrod_stress',\n 'ctube_stress',\n\n # OES - isotropic CROD/CONROD/CTUBE strain\n 'crod_strain',\n 'conrod_strain',\n 'ctube_strain',\n\n # OES - isotropic CBAR stress\n 'cbar_stress',\n # OES - isotropic CBAR strain\n 'cbar_strain',\n # OES - isotropic CBEAM stress\n 'cbeam_stress',\n # OES - isotropic CBEAM strain\n 'cbeam_strain',\n\n # OES - isotropic CTRIA3/CQUAD4 stress\n 'ctria3_stress',\n 'cquad4_stress',\n\n # OES - isotropic CTRIA3/CQUAD4 strain\n 'ctria3_strain',\n 'cquad4_strain',\n\n # OES - isotropic CTETRA/CHEXA/CPENTA stress\n 'ctetra_stress',\n 'chexa_stress',\n 'cpenta_stress',\n\n # OES - isotropic CTETRA/CHEXA/CPENTA strain\n 'ctetra_strain',\n 'chexa_strain',\n 'cpenta_strain',\n\n # OES - CSHEAR stress\n 'cshear_stress',\n # OES - CSHEAR strain\n 'cshear_strain',\n # OES - CEALS1 224, CELAS3 225\n 'nonlinear_spring_stress',\n # OES - GAPNL 86\n 'nonlinear_cgap_stress',\n # OES - CBUSH 226\n 'nolinear_cbush_stress',\n ]\n\n table_types += [\n # OES - CTRIAX6\n 'ctriax_stress',\n 'ctriax_strain',\n\n 'cbush_stress',\n 'cbush_strain',\n 'cbush1d_stress_strain',\n\n # OES - nonlinear CROD/CONROD/CTUBE stress\n 'nonlinear_rod_stress',\n 'nonlinear_rod_strain',\n\n # OESNLXR - CTRIA3/CQUAD4 stress\n 'nonlinear_plate_stress',\n 'nonlinear_plate_strain',\n #'hyperelastic_plate_stress',\n 'hyperelastic_cquad4_strain',\n\n # OES - composite CTRIA3/CQUAD4 stress\n 'cquad4_composite_stress',\n 'cquad8_composite_stress',\n 'ctria3_composite_stress',\n 'ctria6_composite_stress',\n\n 'cquad4_composite_strain',\n 'cquad8_composite_strain',\n 'ctria3_composite_strain',\n 'ctria6_composite_strain',\n\n # OGS1 - grid point stresses\n 'grid_point_surface_stresses', # tCode=26\n 'grid_point_volume_stresses', # tCode=27\n ]\n return table_types\n\ndef _get_stress_times(model: OP2, isubcase: int) -> Tuple[bool, bool, bool, Any]: # pragma: no cover\n \"\"\"Are there any stress/strain results?\"\"\"\n table_types = _get_stress_table_types()\n is_real = True\n is_data = False\n is_static = False\n times = None\n for table_type in table_types:\n if not hasattr(model, table_type):\n # print('no table_type=%s' % table_type)\n continue\n table = getattr(model, table_type)\n if isubcase in table:\n is_data = True\n case = table[isubcase]\n is_real = case.is_real\n if case.nonlinear_factor is not None:\n times = case._times\n is_static = False\n else:\n is_static = True\n times = np.zeros(1, dtype='int32')\n break\n #return is_data, is_static, is_real, times\n return is_data, is_static, is_real, times\n\ndef _fill_op2_grid_point_surface_stresses(eids_all, cases, model: OP2,\n times, key, icase: int,\n form_dict, header_dict, keys_map) -> int:\n if key not in model.grid_point_surface_stresses:\n return icase\n\n #grid_point_surface_stresses[(1, 1, 1, 0, 666, '', '')]\n # type=GridPointSurfaceStressesArray nelements=99\n # data: [1, nelements, 8] where 8=[nx, ny, txy, angle, majorP, minorP, tmax, ovm]\n # node_element.shape = (99, 2)\n # location.shape = (99,)\n # data.shape = (1, 99, 8)\n # sort1\n # lsdvmns = [1]\n case = model.grid_point_surface_stresses[key]\n\n if case.is_complex:\n return icase\n #print(case.get_stats())\n #eids_all = self.element_ids\n nelements = len(eids_all)\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n subcase_id = key[0]\n\n\n eidsi = case.node_element[:, 0]\n nidsi = case.node_element[:, 1]\n\n icentroid = np.where(nidsi == 0)[0]\n eids_res = eidsi[icentroid]\n assert eids_res.min() > 0, eids_res\n ueids_res = np.unique(eids_res)\n #print('eids_res =', eids_res.tolist(), len(eids_res))\n #print('ueids_res=', ueids_res.tolist(), len(ueids_res))\n\n i = np.searchsorted(eids_all, ueids_res)\n ui = np.unique(i)\n j = np.where(i < len(ui) - 1)[0]\n i2 = i[j]\n\n #print('i =', i.tolist(), len(i))\n #print('ui =', ui.tolist(), len(ui))\n #print('j =', j.tolist(), len(j))\n #print('i2 =', i2.tolist(), len(i2))\n #ueids_res2 = eids_all[i2]\n\n #ueids_res1 = ueids_res[:len(ui) - 1]\n #print('ueids_res1 =', ueids_res1.tolist(), len(ueids_res1))\n #print('ueids_res2 =', ueids_res2.tolist(), len(ueids_res2))\n\n #eid_exists = ueids_res1 == ueids_res2\n #print(\"eid_exists =\", eid_exists)\n #ueids3 = ueids_res1[eid_exists]\n #print('ueids3=', ueids3, len(ueids3))\n\n if len(i2) != len(np.unique(i2)):\n msg = 'i_gpstress=%s is not unique\\n' % str(i2)\n #print('eids = %s\\n' % str(list(eids)))\n #print('eidsi = %s\\n' % str(list(eidsi)))\n raise RuntimeError(msg)\n\n for itime, unused_dt in enumerate(times):\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n\n # [nx, ny, txy, angle, majorP, minorP, tmax, ovm]\n nx = np.full(nelements, np.nan, dtype='float32')\n ny = np.full(nelements, np.nan, dtype='float32')\n txy = np.full(nelements, np.nan, dtype='float32')\n angle = np.full(nelements, np.nan, dtype='float32')\n major = np.full(nelements, np.nan, dtype='float32')\n minor = np.full(nelements, np.nan, dtype='float32')\n tmax = np.full(nelements, np.nan, dtype='float32')\n ovm = np.full(nelements, np.nan, dtype='float32')\n\n nx[i2] = case.data[itime, i2, 0]\n ny[i2] = case.data[itime, i2, 1]\n txy[i2] = case.data[itime, i2, 2]\n angle[i2] = case.data[itime, i2, 3]\n major[i2] = case.data[itime, i2, 4]\n minor[i2] = case.data[itime, i2, 5]\n tmax[i2] = case.data[itime, i2, 6]\n ovm[i2] = case.data[itime, i2, 7]\n\n headers = ['nx', 'ny', 'txy', 'majorP', 'minorP', 'tmax', 'ovm']\n form = [('Surface Stresses', None, [])]\n formi = form[0][2]\n form_dict[(key, itime)] = form\n\n for header, resi in zip(headers, (nx, ny, txy, angle, major, minor, ovm)):\n ese_res = GuiResult(subcase_id, header=header,\n title=header, data_format='%.3e',\n location='centroid', scalar=resi)\n cases[icase] = (ese_res, (subcase_id, header))\n formi.append((header, icase, []))\n icase += 1\n return icase\n\ndef _fill_op2_grid_point_stresses_volume_direct(nids, cases, model: OP2,\n times, key, icase: int,\n form_dict, header_dict, keys_map) -> int:\n if key not in model.grid_point_stresses_volume_direct:\n return icase\n\n case = model.grid_point_stresses_volume_direct[key]\n if case.is_complex:\n return icase\n nnodes = len(nids)\n\n keys_map[key] = (case.subtitle, case.label,\n case.superelement_adaptivity_index, case.pval_step)\n subcase_id = key[0]\n\n nids2 = case.node\n i = np.searchsorted(nids, nids2)\n if len(i) != len(np.unique(i)):\n msg = 'i_gpstress=%s is not unique\\n' % str(i)\n #print('eids = %s\\n' % str(list(eids)))\n #print('eidsi = %s\\n' % str(list(eidsi)))\n raise RuntimeError(msg)\n\n for itime, unused_dt in enumerate(times):\n dt = case._times[itime]\n header = _get_nastran_header(case, dt, itime)\n header_dict[(key, itime)] = header\n\n # volume direct\n #['ox', 'oy', 'oz', 'txy', 'tyz', 'txz', 'pressure', 'ovm']\n ox = np.full(nnodes, np.nan, dtype='float32')\n oy = np.full(nnodes, np.nan, dtype='float32')\n oz = np.full(nnodes, np.nan, dtype='float32')\n txy = np.full(nnodes, np.nan, dtype='float32')\n tyz = np.full(nnodes, np.nan, dtype='float32')\n txz = np.full(nnodes, np.nan, dtype='float32')\n ovm = np.full(nnodes, np.nan, dtype='float32')\n\n ox[i] = case.data[itime, :, 0]\n oy[i] = case.data[itime, :, 1]\n oz[i] = case.data[itime, :, 2]\n txy[i] = case.data[itime, :, 3]\n tyz[i] = case.data[itime, :, 4]\n txz[i] = case.data[itime, :, 5]\n ovm[i] = case.data[itime, :, 7]\n\n headers = ['oxx', 'oyy', 'ozz', 'txy', 'tyz', 'txz', 'ovm']\n form = [('Volume Direct', None, [])]\n formi = form[0][2]\n form_dict[(key, itime)] = form\n\n for header, resi in zip(headers, (ox, oy, oz, txy, tyz, txz, ovm)):\n ese_res = GuiResult(subcase_id, header=header,\n title=header, data_format='%.3e',\n location='node', scalar=resi)\n cases[icase] = (ese_res, (subcase_id, header))\n formi.append((header, icase, []))\n icase += 1\n return icase\n\n"
] | [
[
"numpy.frombuffer",
"numpy.array"
],
[
"numpy.nanmax",
"numpy.allclose",
"numpy.isfinite",
"numpy.unique",
"numpy.array_equal",
"numpy.abs",
"numpy.nanmin",
"numpy.linalg.norm",
"numpy.full",
"numpy.searchsorted",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bsipocz/glue | [
"7b7e4879b4c746b2419a0eca2a17c2d07a3fded3",
"7b7e4879b4c746b2419a0eca2a17c2d07a3fded3",
"7b7e4879b4c746b2419a0eca2a17c2d07a3fded3",
"7b7e4879b4c746b2419a0eca2a17c2d07a3fded3"
] | [
"glue/clients/__init__.py",
"glue/clients/profile_viewer.py",
"glue/qt/widgets/subset_facet.py",
"glue/core/tests/test_coordinate_links.py"
] | [
"from matplotlib import rcParams, rcdefaults\n#standardize mpl setup\nrcdefaults()\n\nfrom .histogram_client import HistogramClient\nfrom .image_client import ImageClient\nfrom .scatter_client import ScatterClient\n",
"import numpy as np\nfrom matplotlib.transforms import blended_transform_factory\n\nfrom ..core.callback_property import CallbackProperty, add_callback\n\n\nPICK_THRESH = 30 # pixel distance threshold for picking\n\n\nclass Grip(object):\n\n def __init__(self, viewer, artist=True):\n self.viewer = viewer\n self.enabled = True\n\n self.artist = None\n if artist:\n self.artist = self._artist_factory()\n\n def remove(self):\n raise NotImplementedError()\n\n def _artist_factory(self):\n raise NotImplementedError()\n\n def pick_dist(self, x, y):\n \"\"\"\n Return the distance, in pixels,\n between a point in (x,y) data space and\n the grip\n \"\"\"\n raise NotImplementedError()\n\n def dblclick(self, x, y):\n \"\"\"Respond to a double-click event\n\n Default is to ignore\n \"\"\"\n pass\n\n def select(self, x, y):\n \"\"\"\n Process a selection event (click) at x,y\n \"\"\"\n raise NotImplementedError()\n\n def drag(self, x, y):\n \"\"\"\n Process a drag to x, y\n \"\"\"\n raise NotImplementedError()\n\n def release(self):\n \"\"\"\n Process a release\n \"\"\"\n raise NotImplementedError()\n\n def disable(self):\n self.enabled = False\n if self.artist is not None:\n self.artist.set_visible(False)\n self.viewer.axes.figure.canvas.draw()\n\n def enable(self):\n self.enabled = True\n if self.artist is not None:\n self.artist.set_visible(True)\n self.viewer.axes.figure.canvas.draw()\n\n\nclass ValueGrip(Grip):\n value = CallbackProperty(None)\n\n def __init__(self, viewer, artist=True):\n super(ValueGrip, self).__init__(viewer, artist)\n self._drag = False\n\n def _artist_factory(self):\n return ValueArtist(self)\n\n def dblclick(self, x, y):\n self.value = x\n\n def pick_dist(self, x, y):\n xy = [[x, y], [self.value, y]]\n xypix = self.viewer.axes.transData.transform(xy)\n return abs(xypix[1, 0] - xypix[0, 0])\n\n def select(self, x, y):\n if self.pick_dist(x, y) > PICK_THRESH:\n return\n self._drag = True\n\n def drag(self, x, y):\n if self._drag:\n self.value = x\n\n def release(self):\n self._drag = False\n\n\nclass RangeGrip(Grip):\n range = CallbackProperty((None, None))\n\n def __init__(self, viewer):\n super(RangeGrip, self).__init__(viewer)\n\n # track state during drags\n self._move = None\n self._ref = None\n self._refx = None\n self._refnew = None\n\n def _artist_factory(self):\n return RangeArtist(self)\n\n def pick_dist(self, x, y):\n xy = np.array([[x, y],\n [self.range[0], y],\n [self.range[1], y],\n [sum(self.range) / 2, y]])\n xypix = self.viewer.axes.transData.transform(xy)\n dx = np.abs(xypix[1:] - xypix[0])[:, 0]\n return min(dx)\n\n def select(self, x, y):\n if self.pick_dist(x, y) > PICK_THRESH:\n return self.new_select(x, y)\n\n cen = sum(self.range) / 2.\n wid = self.range[1] - self.range[0]\n if x < cen - wid / 4.:\n self._move = 'left'\n elif x < cen + wid / 4.:\n self._move = 'center'\n self._ref = self.range\n self._refx = x\n else:\n self._move = 'right'\n\n def new_select(self, x, y):\n \"\"\"\n Begin a selection in \"new range\" mode.\n In this mode, the previous grip position is ignored,\n and the new range is defined by the select/release positions\n \"\"\"\n self._refnew = x\n self.range = (x, x)\n\n def new_drag(self, x, y):\n \"\"\"\n Drag the selection in \"new mode\"\n \"\"\"\n if self._refnew is not None:\n self._set_range(self._refnew, x)\n\n def drag(self, x, y):\n if self._refnew is not None:\n return self.new_drag(x, y)\n\n if self._move == 'left':\n if x > self.range[1]:\n self._move = 'right'\n self._set_range(x, self.range[1])\n\n elif self._move == 'center':\n dx = (x - self._refx)\n self._set_range(self._ref[0] + dx, self._ref[1] + dx)\n else:\n if x < self.range[0]:\n self._move = 'left'\n self._set_range(self.range[0], x)\n\n def _set_range(self, lo, hi):\n self.range = min(lo, hi), max(lo, hi)\n\n def release(self):\n self._move = None\n self._ref = None\n self._refx = None\n self._refnew = None\n\n\nclass ValueArtist(object):\n\n def __init__(self, grip, **kwargs):\n self.grip = grip\n add_callback(grip, 'value', self._update)\n ax = self.grip.viewer.axes\n\n kwargs.setdefault('lw', 2)\n kwargs.setdefault('alpha', 0.5)\n kwargs.setdefault('c', '#ffb304')\n trans = blended_transform_factory(ax.transData, ax.transAxes)\n self._line, = ax.plot([grip.value, grip.value], [0, 1],\n transform=trans, **kwargs)\n\n def _update(self, value):\n self._line.set_xdata([value, value])\n self._line.axes.figure.canvas.draw()\n\n def set_visible(self, visible):\n self._line.set_visible(visible)\n\n\nclass RangeArtist(object):\n\n def __init__(self, grip, **kwargs):\n self.grip = grip\n add_callback(grip, 'range', self._update)\n ax = grip.viewer.axes\n trans = blended_transform_factory(ax.transData, ax.transAxes)\n\n kwargs.setdefault('lw', 2)\n kwargs.setdefault('alpha', 0.5)\n kwargs.setdefault('c', '#ffb304')\n self._line, = ax.plot(self.x, self.y, transform=trans, **kwargs)\n\n @property\n def x(self):\n l, r = self.grip.range\n return [l, l, l, r, r, r]\n\n @property\n def y(self):\n return [0, 1, .5, .5, 0, 1]\n\n def _update(self, rng):\n self._line.set_xdata(self.x)\n self._line.axes.figure.canvas.draw()\n\n def set_visible(self, visible):\n self._line.set_visible(visible)\n\n\ndef _build_axes(figure):\n\n # tight-layout clobbers manual positioning\n try:\n figure.set_tight_layout(False)\n except AttributeError: # old MPL\n pass\n\n ax2 = figure.add_subplot(122)\n ax1 = figure.add_subplot(121, sharex=ax2)\n\n return ax1, ax2\n\n\nclass ProfileViewer(object):\n value_cls = ValueGrip\n range_cls = RangeGrip\n\n def __init__(self, figure):\n self.axes, self.resid_axes = _build_axes(figure)\n\n self._artist = None\n self._resid_artist = None\n self._x = self._xatt = self._y = self._yatt = None\n self._resid = None\n self.connect()\n\n self._fit_artists = []\n self.active_grip = None # which grip should receive events?\n self.grips = []\n self._xlabel = ''\n\n def set_xlabel(self, xlabel):\n self._xlabel = xlabel\n\n def autoscale_ylim(self):\n x, y = self._x, self._y\n xlim = self.axes.get_xlim()\n mask = (xlim[0] <= x) & (x <= xlim[1])\n ymask = y[mask]\n if ymask.size == 0:\n return\n\n ylim = np.nan_to_num([np.nanmin(ymask), np.nanmax(ymask)])\n self.axes.set_ylim(ylim[0], ylim[1] + .05 * (ylim[1] - ylim[0]))\n\n if self._resid is None:\n return\n assert self._resid.size == y.size\n\n ymask = self._resid[mask]\n ylim = np.nan_to_num([np.nanmin(ymask), np.nanmax(ymask)])\n diff = .05 * (ylim[1] - ylim[0])\n self.resid_axes.set_ylim(ylim[0] - diff, ylim[1] + diff)\n\n def _relayout(self):\n if self._resid_artist is not None:\n self.axes.set_position([0.1, .35, .88, .6])\n self.resid_axes.set_position([0.1, .15, .88, .2])\n self.resid_axes.set_xlabel(self._xlabel)\n self.resid_axes.set_visible(True)\n self.axes.set_xlabel('')\n [t.set_visible(False) for t in self.axes.get_xticklabels()]\n else:\n self.resid_axes.set_visible(False)\n self.axes.set_position([0.1, .15, .88, .83])\n self.axes.set_xlabel(self._xlabel)\n [t.set_visible(True) for t in self.axes.get_xticklabels()]\n\n def set_profile(self, x, y, xatt=None, yatt=None, **kwargs):\n \"\"\"\n Set a new line profile\n\n :param x: X-coordinate data\n :type x: array-like\n\n :param y: Y-coordinate data\n :type y: array-like\n\n :param xatt: ComponentID associated with X axis\n :type xatt: :class:`~glue.core.data.CompoenntID`\n\n :param yatt: ComponentID associated with Y axis\n :type yatt: :class:`~glue.core.data.CompoenntID`\n\n Extra kwargs are passed to matplotlib.plot, to\n customize plotting\n\n Returns the created MPL artist\n \"\"\"\n self._clear_fit()\n self._x = np.asarray(x).ravel()\n self._xatt = xatt\n self._y = np.asarray(y).ravel()\n self._yatt = yatt\n if self._artist is not None:\n self._artist.remove()\n\n kwargs.setdefault('drawstyle', 'steps-mid')\n\n self._artist = self.axes.plot(x, y, **kwargs)[0]\n self._relayout()\n self._redraw()\n\n return self._artist\n\n def _clear_fit(self):\n for a in self._fit_artists:\n a.remove()\n self._fit_artists = []\n if self._resid_artist is not None:\n self._resid_artist.remove()\n self._resid_artist = None\n\n def connect(self):\n connect = self.axes.figure.canvas.mpl_connect\n self._down_id = connect('button_press_event', self._on_down)\n self._up_id = connect('button_release_event', self._on_up)\n self._move_id = connect('motion_notify_event', self._on_move)\n\n def disconnect(self):\n off = self.axes.figure.canvas.mpl_disconnect\n self._down_id = off(self._down_id)\n self._up_id = off(self._up_id)\n self._move_id = off(self._move_id)\n\n def _on_down(self, event):\n if not event.inaxes:\n return\n\n if event.dblclick:\n if self.active_grip is not None:\n self.active_grip.dblclick(event.xdata, event.ydata)\n return\n\n if self.active_grip is not None and self.active_grip.enabled:\n self.active_grip.select(event.xdata, event.ydata)\n\n def _on_up(self, event):\n if not event.inaxes:\n return\n if self.active_grip is None or not self.active_grip.enabled:\n return\n\n self.active_grip.release()\n\n def _on_move(self, event):\n if not event.inaxes or event.button != 1:\n return\n if self.active_grip is None or not self.active_grip.enabled:\n return\n\n self.active_grip.drag(event.xdata, event.ydata)\n\n def _redraw(self):\n self.axes.figure.canvas.draw()\n\n def profile_data(self, xlim=None):\n if self._x is None or self._y is None:\n raise ValueError(\"Must set profile first\")\n\n x = self._x\n y = self._y\n if xlim is not None:\n mask = (min(xlim) <= x) & (x <= max(xlim))\n x = x[mask]\n y = y[mask]\n\n return x, y\n\n def fit(self, fitter, xlim=None):\n try:\n x, y = self.profile_data(xlim)\n dy = None\n except ValueError:\n raise ValueError(\"Must set profile before fitting\")\n\n result = fitter.build_and_fit(x, y)\n\n return result, x, y, dy\n\n def plot_fit(self, fitter, fit_result):\n self._clear_fit()\n x = self._x\n y = fitter.predict(fit_result, x)\n self._fit_artists = fitter.plot(fit_result, self.axes, x)\n resid = self._y - y\n self._resid = resid\n self._resid_artist, = self.resid_axes.plot(x, resid, 'k')\n self.autoscale_ylim()\n self._relayout()\n\n def new_value_grip(self, callback=None):\n \"\"\"\n Create and return new ValueGrip\n\n :param callback: A callback function to be invoked\n whenever the grip.value property changes\n \"\"\"\n result = self.value_cls(self)\n result.value = self._center[0]\n\n if callback is not None:\n add_callback(result, 'value', callback)\n self.grips.append(result)\n self.active_grip = result\n return result\n\n def new_range_grip(self, callback=None):\n \"\"\"\n Create and return new RangeGrip\n\n :param callback: A callback function to be invoked\n whenever the grip.range property changes\n \"\"\"\n result = self.range_cls(self)\n center = self._center[0]\n width = self._width\n result.range = center - width / 4, center + width / 4\n\n if callback is not None:\n add_callback(result, 'range', callback)\n\n self.grips.append(result)\n self.active_grip = result\n\n return result\n\n @property\n def _center(self):\n \"\"\"Return the data coordinates of the axes center, as (x, y)\"\"\"\n xy = self.axes.transAxes.transform([.5, .5])\n xy = self.axes.transData.inverted().transform(xy)\n return tuple(xy.ravel())\n\n @property\n def _width(self):\n \"\"\"Return the X-width of axes in data units\"\"\"\n xlim = self.axes.get_xlim()\n return xlim[1] - xlim[0]\n\n def pick_grip(self, x, y):\n \"\"\"\n Given a coordinate in Data units,\n return the enabled Grip object nearest\n that point, or None if none are nearby\n \"\"\"\n grips = [h for h in self.grips if h.enabled]\n if not grips:\n return\n\n dist, grip = min((h.pick_dist(x, y), h)\n for h in grips)\n\n if dist < PICK_THRESH:\n return grip\n",
"from ...external.qt.QtGui import (QDialog, QDoubleValidator, QIcon)\nimport numpy as np\nfrom matplotlib import cm\n\n\nfrom ..qtutil import pretty_number, cmap2pixmap, load_ui\nfrom ...core.util import colorize_subsets, facet_subsets\nfrom ..widget_properties import ButtonProperty\n\n\nclass SubsetFacet(object):\n log = ButtonProperty('ui.log')\n\n def __init__(self, collect, default=None, parent=None):\n \"\"\"Create a new dialog for subset faceting\n\n :param collect: The :class:`~glue.core.DataCollection` to use\n :param default: The default dataset in the collection (optional)\n \"\"\"\n self.ui = load_ui('subset_facet', parent)\n self.ui.setWindowTitle(\"Subset Facet\")\n self._collect = collect\n\n self.ui.component_selector.setup(self._collect)\n if default is not None:\n self.ui.component_selector.data = default\n\n val = QDoubleValidator(-1e100, 1e100, 4, None)\n self.ui.component_selector.component_changed.connect(self._set_limits)\n\n combo = self.ui.color_scale\n for cmap in [cm.cool, cm.RdYlBu, cm.RdYlGn, cm.RdBu, cm.Purples]:\n combo.addItem(QIcon(cmap2pixmap(cmap)), cmap.name, cmap)\n\n def _set_limits(self):\n data = self.ui.component_selector.data\n cid = self.ui.component_selector.component\n\n vals = data[cid]\n\n wmin = self.ui.min\n wmax = self.ui.max\n\n wmin.setText(pretty_number(np.nanmin(vals)))\n wmax.setText(pretty_number(np.nanmax(vals)))\n\n @property\n def cmap(self):\n combo = self.ui.color_scale\n index = combo.currentIndex()\n return combo.itemData(index)\n\n def _apply(self):\n lo, hi = self.ui.min.text(), self.ui.max.text()\n try:\n lo, hi = float(lo), float(hi)\n except ValueError:\n return # limits not set. Abort\n if not np.isfinite(lo) or not np.isfinite(hi):\n return\n\n steps = self.ui.num.value()\n\n data = self.ui.component_selector.data\n cid = self.ui.component_selector.component\n\n subsets = facet_subsets(self._collect, cid, lo=lo, hi=hi,\n steps=steps, log=self.log)\n colorize_subsets(subsets, self.cmap)\n\n @classmethod\n def facet(cls, collect, default=None, parent=None):\n \"\"\"Class method to create facted subsets\n The arguments are the same as __init__\n \"\"\"\n self = cls(collect, parent=parent, default=default)\n value = self.ui.exec_()\n\n if value == QDialog.Accepted:\n self._apply()\n",
"import numpy as np\nfrom astropy.io import fits\nfrom cStringIO import StringIO\n\nfrom .. import Data, DataCollection\nfrom ..coordinates import coordinates_from_header\nfrom ..link_helpers import LinkSame\nfrom .util import make_file\n\n\ndef test_wcs_3d_to_2d():\n \"\"\" For a \"normal\" XYV cube, linking XY world should be\n enough to propagate XY pixel\n \"\"\"\n d = Data(label='D1')\n with make_file(test_fits, suffix='.fits', decompress=True) as file:\n header = fits.getheader(file)\n d.coords = coordinates_from_header(header)\n d.add_component(np.zeros((3, 2, 1)), label='test')\n\n d2 = Data(label='D2')\n d2.coords = coordinates_from_header(header)\n d2.add_component(np.zeros((3, 2, 1)), label='test2')\n\n dc = DataCollection([d, d2])\n dc.add_link(LinkSame(d.get_world_component_id(1),\n d2.get_world_component_id(1)))\n dc.add_link(LinkSame(d.get_world_component_id(2),\n d2.get_world_component_id(2)))\n\n py = d.get_pixel_component_id(1)\n px = d.get_pixel_component_id(2)\n py2 = d2.get_pixel_component_id(1)\n px2 = d2.get_pixel_component_id(2)\n\n np.testing.assert_array_almost_equal(d2[px], d2[px2])\n np.testing.assert_array_almost_equal(d2[py], d2[py2])\n\n\ndef test_link_velocity():\n \"\"\" For a normal PPV cube, linking velocity world should be\n enough to get pixel V\"\"\"\n d = Data(label='D1')\n with make_file(test_fits, suffix='.fits', decompress=True) as file:\n header = fits.getheader(file)\n d.coords = coordinates_from_header(header)\n d.add_component(np.zeros((3, 2, 1)), label='test')\n\n d2 = Data(label='D2')\n d2.coords = coordinates_from_header(header)\n d2.add_component(np.zeros((3, 2, 1)), label='test2')\n\n dc = DataCollection([d, d2])\n dc.add_link(LinkSame(d.get_world_component_id(0),\n d2.get_world_component_id(0)))\n\n pz = d.get_pixel_component_id(0)\n pz2 = d2.get_pixel_component_id(0)\n\n np.testing.assert_array_almost_equal(d2[pz], d2[pz2])\n\n\nclass TestDependentAxes(object):\n\n def test_base(self):\n d = Data(x=[1, 2, 3])\n assert d.coords.dependent_axes(0) == (0,)\n\n d = Data(x=[[1, 2], [3, 4]])\n assert d.coords.dependent_axes(0) == (0,)\n assert d.coords.dependent_axes(1) == (1,)\n\n def header2(self, proj='SIN'):\n result = fits.Header()\n result['NAXIS'] = 2\n result['NAXIS1'] = 100\n result['NAXIS2'] = 100\n result['CRPIX1'] = 1\n result['CRPIX2'] = 1\n result['CDELT1'] = 1\n result['CDELT2'] = 1\n result['CTYPE1'] = 'RA---%s' % proj\n result['CTYPE2'] = 'DEC--%s' % proj\n result['CRVAL1'] = 1\n result['CRVAL2'] = 1\n return result\n\n def header3(self, proj='SIN'):\n result = self.header2(proj)\n result.update(NAXIS=3, NAXIS3=1, CDELT3=1,\n CRPIX3=3, CTYPE3='VOPT')\n return result\n\n def header4(self):\n result = fits.Header()\n result.update(WCSAXES=4,\n CRPIX1=513,\n CRPIX2=513,\n CRPIX3=1,\n CRPIX4=1,\n CDELT1=-6.94444444444E-05,\n CDELT2=6.94444444444E-05,\n CDELT3=10000.1667626,\n CDELT4=1,\n CTYPE1='RA---SIN',\n CTYPE2='DEC--SIN',\n CTYPE3='VOPT',\n CTYPE4='STOKES',\n CRVAL1=56.7021416715,\n CRVAL2=68.0961055596,\n CRVAL3=-280000.000241,\n CRVAL4=1,\n PV2_1=0,\n PV2_2=0,\n LONPOLE=180,\n LATPOLE=68.0961055596,\n RESTFRQ=34596380000,\n RADESYS='FK5',\n EQUINOX=2000,\n SPECSYS='BARYCENT')\n return result\n\n def test_wcs_ppv(self):\n\n header = self.header3()\n\n d = Data(label='D1')\n d.coords = coordinates_from_header(header)\n d.add_component(np.zeros((3, 2, 1)), label='test')\n\n assert d.coords.dependent_axes(0) == (0,)\n assert d.coords.dependent_axes(1) == (1, 2)\n assert d.coords.dependent_axes(2) == (1, 2)\n\n def test_wcs_alma(self):\n header = self.header4()\n\n d = Data(label='D1')\n d.coords = coordinates_from_header(header)\n d.add_component(np.zeros((3, 2, 1, 1)), label='test')\n\n assert d.coords.dependent_axes(0) == (0,)\n assert d.coords.dependent_axes(1) == (1,)\n assert d.coords.dependent_axes(2) == (2, 3)\n assert d.coords.dependent_axes(3) == (2, 3)\n\n\ntest_fits = 'x\\x9c\\xed\\x97Qs\\xa2H\\x14\\x85\\xf7\\xa7\\xdc\\xa75I\\x05B\\x83\\xa0\\xb8\\x95\\x07\\xd462\\x11q\\xa0\\xcdL\\xe6%\\x85\\xd21T!X\\x80\\x93\\xf1\\xdf\\xef\\x05uuv\\xccN\\xc0<mq\\x1e\\x04\\x11>O\\x9f\\xee\\xa6o\\xbb\\xa65\\x19Q\\x80[8!\\x0670\\x8f\\xa3\\xe78Y\\xa6\\x90\\xc500\\x99\\x0bi\\xe6E\\xbe\\x97\\xf8\\xa7\\x1e\\x00\\xe8\\x9alb~=\\xc9\\x13\\xb4&\\xf2\\xbc$\\xf16\\xe0{\\x99\\x07\\xd9f\\xc5OS\\x0e\\x1a\\x1b_M\\x17\\xde\\xf0\\xa7 /Z/g<\\x81\\xf8yO\\x0e\\x96<J\\x838J\\xdf\\xe6\\x917x\\xe4wn\\xde\\xe0\\xc9\\x1f\\xccS>\\x8e\\xd7\\xb3-\\x8b\\x8e\\x19\\x9e\\x15\\x9dw1\\x08\\xf9\\x8f`\\x16r0\\x97\\xde\\x82\\x03K\\xbc(]\\xc5I\\x06\\xee&\\xcd\\xf8\\xf2\\x12\\xf2\\xce\\xf62\\x08R\\xf0\\xf9s\\x10q\\x1f\\x82\\x08\\x1aF\\x9a%q\\x14/7\\x07\\x1e\\x8e\\x02(.\\xaf^6i0O\\x1b\\xd7\\xf0=\\x0e\\xd7K\\x0eJK\\xbb\\x86U\\x8eWT\\xfd/\\x98\\x05\\xb3y\\xec\\xf3\\x0e\\xc8\\x92D\\x8c?\\rQ\\x14\\xf1\\x0e\\xfcP\\xf5!\\xf4\\rFs\\x9f\\xb7\\xd0\\xc0\\x9f\\x9b\\x82\\xa4\\x08De2\\xe9\\xc8\\xed\\x8e,7\\xb0\\x83\\x9f\\x03t;O\\xb8\\x97a\\xa7\\xe6\\x03\\x87\\xc3\\xc5#J\\xb0,\\xa1\\xdfg//\\x9d\\xe5\\xb2\\x93\\xa60e\\x97\\xc8\\xb1\\xbb\\x9fh\\x8f\\x15\\xbc\\tu\\\\:u\\x8b\\x18\\x1a\\xbb8n\\xca\\xe6\\xc7\\xe8\\x88\\xba={\\x82\\xbcA\\xcf1l\\x814\\xad\\xc6\\xe1\\xe7\\xd2<s\\xec2gjQ\\xe4\\xb9\\xf4\\xf3\\xd46\\r8\\xc2\\x95\\xe7\\xd9]\\xf7)\\xcfp7^0CY\\x94u\\xa2\\xabDk\\x11R\\x9e7e\\xdb\\xe3\\xcf\\xe3\\x8f(bS\\xd7\\x8a\\xf9;\\xb4\\xa7\\x8e\\xfb~\\xde\\xc8e\\'x\\xb2\\x8cC@\\xd1\\x95\\xf2<:\\xb1{\\xc3_xP\\xb4\\\\\\x12\\xcb\\xb7\\xd7\\xf8fZS6\\xdc\\xf1\\xb4\\xb6\\xd8&\\x8a\\xdeV\\xe5\\x96Nd\\xb9\\x8d\\xbc>\\xbds(}\\xb7C\\x1c.\\x0f\\x063\\xed-O\\xd6DM\\xd1\\x9b\\xedv\\x13\\x91\\x9a\\xa2\\x95\\xe7=\\x8c\\\\\\'?\\x9ex\\x1f\\x90\\xa2\\xbd\\xf7\\xd6M\\x89\\xf8\\xa0;\\x1d\\x9b\\xac\\xe0\\xe5\\xc3\\xee>\\xbf\\xf4\\xf3\\xf8c\\xc6U\\t\\x1c\\xb8\\xf7\\x8fO\\x03\\x87~\\xde\\xfa#\\xe8\\x89\\xb4u\\x89\\xb4U]\\x95\\x9b*\\xf2\\xee\\x86\\xdf\\xca\\xf0F\\xe6\\x98\\x1ex;\\xe5XY\"J\\xab<\\xaf\\xfbe{<\\x91\\x9f\\xac\\xe6\\xf9Y\\xe5\\xfd\\x8d\\x8db\\xfe\\x12\\xa5g?\\x11A:k\\xfe\\xf6\\xd8\\xe3\\x84\\x16\\xebQ\\xc31\\x04Ap\\x07\\xa3\\xf3x\\xce\\x831:^\\xdf\\xd4\\x96\\xa8\\xa9\\xfaN\\x15x}:b{\\x9e \\x89\\x92\\xa4)m\\x94.\\xe5\\xaa\\xe2\\x0f\\xcb\\x83c\\x7fJ\\xce\\xdc\\xabb~\\xc5\\xfa\\xdb\\xe8\\xd3\\xde\\x07\\xe5\\xf7\\xebz\\xbe3Y1\\xbf\\x7fx\\x1f\\x94\\xdf\\x91?\\xa1\\xa9\\xe9MQ\\'\\xca9\\xf9\\x15\\xf5F\\xe3\\x81\\x8el\\x01_7\\xe7\\xe7wT\\xbf\\x08\\xba\\xaea\\xab[\\xba$\\xb7Z\\xad\\x8a\\xf9\\x1d\\xd7C\\x9a&6eE#\\xe7\\xe4w\\xaa\\xbeB\\xa3\\xa2T\\x96\\x0604]f;\\x8fp\\xc7#\\x9e`m\\xe2\\xc3l\\x03E\\xa5\\x00\\x0e_$\\x81\\xef\\x07\\xd1\\x02&I\\xbcH\\xbc%\\xe0\\xda\\xfc\\x9b\\xff\\xd8\\xf3\\xba^\\xcaC,\\xbf\\xc0]\\xcf\\xb2\\xc4\\x9b\\xe7\\xe4*\\xda\\xf3\\n\\xdd\\x85\\xf1\\xcc\\x0b\\x0f\\xec\\x89\\x87\\xa6x\\xc6\\x93\\xb4\\x83\\xb5e\\x9c\\xf8XH\\xaf\\xe2p\\x83\\x85^\\x80\\xf7\\xfd\\x17\\xefK\\x10\\xf9\\xf1+0,\\xe1o\\xbf\\xf30\\x9e\\x07\\xd9\\xe6l\\x7fE}\\x9b\\xcf\\x11\\xc8\\xd7\\xf3\\xed\\xb1\"o\\x1c\\x07)\\x87W\\x1e,^\\xb2\\xbc\\x07\\xc8\\xcd*~\\xbdp\\xcd;\\xcb\\xb8\\x96/\\xab\\xf0`\\x10b]<\\x08xXt)n8\\xfa\\xf9&\\xa6\\xa2?w\\x85\\xf5,f<\\x08B\\xcc\\xbf\\x03\\x9f\\x82h~\\xb5\\xf0\\xd6i\\x1ax\\xd1U\\xfe\\xad\\x1c\\xcf\\x8cV\\xeb\\x0cl6\\x00w\\x8e%}\\xa7\\xac\\xaf\\x7f\\xf3@ST-\\xdf\\xf3\\xe1I\\xab\\xc2\\xbec/:\\xeeW\\x7f\\xb8V\\xadZ\\xb5j\\xd5\\xaa\\xf5\\xbf\\xd4\\x1f\\xb5j\\xd5\\xaaU\\xabV\\xadZ\\xb5j\\xd5z\\xb7\\xfe\\x06\\xb6\\x02\\x94\\xfe'\n"
] | [
[
"matplotlib.rcdefaults"
],
[
"numpy.nanmax",
"numpy.abs",
"numpy.asarray",
"numpy.nanmin",
"matplotlib.transforms.blended_transform_factory"
],
[
"numpy.nanmax",
"numpy.nanmin",
"numpy.isfinite"
],
[
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lxuechen/swissknife | [
"43dbd36f1e998ebe29c0b85fafd0de765dfb5de8",
"43dbd36f1e998ebe29c0b85fafd0de765dfb5de8",
"43dbd36f1e998ebe29c0b85fafd0de765dfb5de8",
"43dbd36f1e998ebe29c0b85fafd0de765dfb5de8"
] | [
"experiments/explainx/numerical.py",
"experiments/priv_fair/plots/acc_on_the_line.py",
"experiments/priv_fair/misc/log.py",
"experiments/explainx/BLIP/models/nlvr_encoder.py"
] | [
"import math\nfrom typing import Union, Sequence\n\nimport torch\n\n\ndef logmeanexp(x: Union[Sequence[torch.Tensor], torch.Tensor], keepdim=False, dim=0):\n if isinstance(x, (tuple, list)):\n elem0 = x[0]\n if elem0.dim() == 0:\n x = torch.stack(x)\n elif elem0.dim() == 1:\n x = torch.cat(x, dim=0)\n else:\n raise ValueError\n return torch.logsumexp(x, dim=dim, keepdim=keepdim) - math.log(x.size(dim))\n",
"\"\"\"\nCIFAR-10 -> CINIC-10, CIFAR-10.2 experiment on 11/30/21.\n\npython -m experiments.priv_fair.plots.acc_on_the_line\n\"\"\"\n\nimport fire\nimport numpy as np\n\nfrom swissknife import utils\nfrom ...simclrv2_florian.download import available_simclr_models\n\ndataset2name = {\n \"cinic-10\": \"CINIC-10\",\n 'cifar-10.2': \"CIFAR-10.2\"\n}\n\n\ndef main(\n base_dir=\"/Users/xuechenli/Desktop/dump_a100/acc-on-the-line\",\n tasks=(\"private\", \"non_private\"),\n seeds=tuple(range(5)),\n ood_datasets=(\"cinic-10\", \"cifar-10.2\"),\n colors=('red', 'blue'),\n):\n for ood_dataset in ood_datasets: # One figure for each ood dataset.\n plots = []\n ebars = []\n for task, color in zip(tasks, colors):\n errorbar = dict(x=[], y=[], yerr=[], xerr=[], ls='none', fmt='none', label=f\"{task}\")\n for model in available_simclr_models: # Each model provides one datapoint.\n model = \"simclr_\" + model\n xvals, yvals = [], []\n for seed in seeds:\n path = utils.join(base_dir, model, task, f\"{seed}\")\n\n log_history_path = utils.join(path, 'log_history.json')\n log_history = utils.jload(log_history_path)\n last_result = log_history[-1]\n\n id_acc = last_result[\"test_zeon\"]\n od_acc = last_result[ood_dataset][\"test_zeon\"]\n\n xvals.append(id_acc)\n yvals.append(od_acc)\n\n xavg, xstd = np.mean(xvals), np.std(xvals)\n yavg, ystd = np.mean(yvals), np.std(yvals)\n errorbar[\"x\"].append(xavg)\n errorbar[\"y\"].append(yavg)\n\n errorbar[\"xerr\"].append(xstd)\n errorbar[\"yerr\"].append(ystd)\n\n # Fit a line.\n from scipy import stats\n k, b, r, pval, stderr = stats.linregress(x=errorbar[\"x\"], y=errorbar[\"y\"])\n linear_interp_x = np.array(errorbar[\"x\"])\n linear_interp_y = k * linear_interp_x + b\n plot = dict(x=linear_interp_x, y=linear_interp_y, color=color, label=f\"{task} ($R^2={r ** 2:.3f}$)\")\n\n ebars.append(errorbar)\n plots.append(plot)\n\n ood_dataset_name = dataset2name[ood_dataset]\n img_path = (\n f\"/Users/xuechenli/remote/swissknife/experiments/priv_fair/plots/acc_on_the_line/cifar-10->{ood_dataset}\"\n )\n utils.plot_wrapper(\n img_path=img_path,\n suffixes=('.png', '.pdf'),\n errorbars=ebars,\n plots=plots,\n options=dict(xlabel=\"CIFAR-10 accuracy\", ylabel=f\"{ood_dataset_name} accuracy\")\n )\n\n\nif __name__ == \"__main__\":\n fire.Fire(main)\n",
"import numpy as np\nimport os\nimport shutil\nimport sys\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\n\n\ndef model_input(data, device):\n datum = data.data[0:1]\n if isinstance(datum, np.ndarray):\n return torch.from_numpy(datum).float().to(device)\n else:\n return datum.float().to(device)\n\n\ndef get_script():\n py_script = os.path.basename(sys.argv[0])\n return os.path.splitext(py_script)[0]\n\n\ndef get_specified_params(hparams):\n keys = [k.split(\"=\")[0][2:] for k in sys.argv[1:]]\n specified = {k: hparams[k] for k in keys}\n return specified\n\n\ndef make_hparam_str(hparams, exclude):\n return \",\".join([f\"{key}_{value}\"\n for key, value in sorted(hparams.items())\n if key not in exclude])\n\n\nclass Logger(object):\n def __init__(self, logdir):\n\n if logdir is None:\n self.writer = None\n else:\n if os.path.exists(logdir) and os.path.isdir(logdir):\n shutil.rmtree(logdir)\n\n self.writer = SummaryWriter(log_dir=logdir)\n\n def log_model(self, model, input_to_model):\n if self.writer is None:\n return\n self.writer.add_graph(model, input_to_model)\n\n def log_epoch(self, epoch, train_loss, train_acc, test_loss, test_acc, epsilon=None):\n if self.writer is None:\n return\n self.writer.add_scalar(\"Loss/train\", train_loss, epoch)\n self.writer.add_scalar(\"Loss/test\", test_loss, epoch)\n self.writer.add_scalar(\"Accuracy/train\", train_acc, epoch)\n self.writer.add_scalar(\"Accuracy/test\", test_acc, epoch)\n\n if epsilon is not None:\n self.writer.add_scalar(\"Acc@Eps/train\", train_acc, 100*epsilon)\n self.writer.add_scalar(\"Acc@Eps/test\", test_acc, 100*epsilon)\n\n def log_scalar(self, tag, scalar_value, global_step):\n if self.writer is None or scalar_value is None:\n return\n self.writer.add_scalar(tag, scalar_value, global_step)\n\n",
"import math\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor, device\nfrom torch import nn\nimport torch.utils.checkpoint\nfrom transformers.activations import ACT2FN\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n)\nfrom transformers.modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\nfrom transformers.utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word and position embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n self.config = config\n\n def forward(\n self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length: seq_length + past_key_values_length]\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n embeddings = inputs_embeds\n\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config, is_cross_attention):\n super().__init__()\n self.config = config\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n if is_cross_attention:\n self.key = nn.Linear(config.encoder_width, self.all_head_size)\n self.value = nn.Linear(config.encoder_width, self.all_head_size)\n else:\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n self.save_attention = False\n\n def save_attn_gradients(self, attn_gradients):\n self.attn_gradients = attn_gradients\n\n def get_attn_gradients(self):\n return self.attn_gradients\n\n def save_attention_map(self, attention_map):\n self.attention_map = attention_map\n\n def get_attention_map(self):\n return self.attention_map\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n if is_cross_attention and self.save_attention:\n self.save_attention_map(attention_probs)\n attention_probs.register_hook(self.save_attn_gradients)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs_dropped = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs_dropped = attention_probs_dropped * head_mask\n\n context_layer = torch.matmul(attention_probs_dropped, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n outputs = outputs + (past_key_value,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config, twin=False, merge=False):\n super().__init__()\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n if twin:\n self.dense0 = nn.Linear(config.hidden_size, config.hidden_size)\n self.dense1 = nn.Linear(config.hidden_size, config.hidden_size)\n else:\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if merge:\n self.act = ACT2FN[config.hidden_act]\n self.merge_layer = nn.Linear(config.hidden_size * 2, config.hidden_size)\n self.merge = True\n else:\n self.merge = False\n\n def forward(self, hidden_states, input_tensor):\n if type(hidden_states) == list:\n hidden_states0 = self.dense0(hidden_states[0])\n hidden_states1 = self.dense1(hidden_states[1])\n if self.merge:\n # hidden_states = self.merge_layer(self.act(torch.cat([hidden_states0,hidden_states1],dim=-1)))\n hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1))\n else:\n hidden_states = (hidden_states0 + hidden_states1) / 2\n else:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config, is_cross_attention=False, layer_num=-1):\n super().__init__()\n if is_cross_attention:\n self.self0 = BertSelfAttention(config, is_cross_attention)\n self.self1 = BertSelfAttention(config, is_cross_attention)\n else:\n self.self = BertSelfAttention(config, is_cross_attention)\n self.output = BertSelfOutput(config, twin=is_cross_attention, merge=(is_cross_attention and layer_num >= 6))\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n if type(encoder_hidden_states) == list:\n self_outputs0 = self.self0(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states[0],\n encoder_attention_mask[0],\n past_key_value,\n output_attentions,\n )\n self_outputs1 = self.self1(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states[1],\n encoder_attention_mask[1],\n past_key_value,\n output_attentions,\n )\n attention_output = self.output([self_outputs0[0], self_outputs1[0]], hidden_states)\n\n outputs = (attention_output,) + self_outputs0[1:] # add attentions if we output them\n else:\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config, layer_num):\n super().__init__()\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BertAttention(config)\n self.layer_num = layer_num\n if self.config.add_cross_attention:\n self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention,\n layer_num=layer_num)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n mode=None,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n\n if mode == 'multimodal':\n assert encoder_hidden_states is not None, \"encoder_hidden_states must be given for cross-attention layers\"\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions=output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[\n 1:-1] # add cross attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n mode='multimodal',\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n\n for i in range(self.config.num_hidden_layers):\n layer_module = self.layer[i]\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n mode=mode,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n mode=mode,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = BertConfig\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nclass BertModel(BertPreTrainedModel):\n \"\"\"\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertEmbeddings(config)\n\n self.encoder = BertEncoder(config)\n\n self.pooler = BertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device,\n is_decoder: bool) -> Tensor:\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\n Arguments:\n attention_mask (:obj:`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (:obj:`Tuple[int]`):\n The shape of the input to the model.\n device: (:obj:`torch.device`):\n The device of the input to the model.\n\n Returns:\n :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length,\n # seq_length]\n if is_decoder:\n batch_size, seq_length = input_shape\n\n seq_ids = torch.arange(seq_length, device=device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n # in case past_key_values are used we need to add a prefix ones mask to the causal mask\n # causal and attention masks must have same type with pytorch version < 1.3\n causal_mask = causal_mask.to(attention_mask.dtype)\n\n if causal_mask.shape[1] < attention_mask.shape[1]:\n prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]\n causal_mask = torch.cat(\n [\n torch.ones((batch_size, seq_length, prefix_seq_len), device=device,\n dtype=causal_mask.dtype),\n causal_mask,\n ],\n axis=-1,\n )\n\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multimodal',\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`,\n `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple\n having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n device = input_ids.device\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n device = inputs_embeds.device\n elif encoder_embeds is not None:\n input_shape = encoder_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n device = encoder_embeds.device\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds or encoder_embeds\")\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,\n device, is_decoder)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if encoder_hidden_states is not None:\n if type(encoder_hidden_states) == list:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()\n else:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n\n if type(encoder_attention_mask) == list:\n encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]\n elif encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n if encoder_embeds is None:\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n else:\n embedding_output = encoder_embeds\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n mode=mode,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n"
] | [
[
"torch.stack",
"torch.logsumexp",
"torch.cat"
],
[
"numpy.std",
"scipy.stats.linregress",
"numpy.array",
"numpy.mean"
],
[
"torch.from_numpy",
"torch.utils.tensorboard.SummaryWriter"
],
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.matmul",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ColinRioux/DeepPseudo | [
"f7b2ec1d5c60ae15b5cdb5dcc8de8b4d2f354340"
] | [
"src/EncoderLayer.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom src.EncoderBlockLayer import EncoderBlockLayer\nfrom src.PositionalEncodingLayer import PositionalEncodingLayer\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, vocab_size, max_len, d_model, n_heads, hidden_size, kernel_size, dropout, n_layers, scale, seq_thresh):\n super(EncoderLayer, self).__init__()\n self.vocab_size = vocab_size\n self.max_len = max_len\n self.d_model = d_model\n self.n_heads = n_heads\n self.hidden_size = hidden_size\n self.kernel_size = kernel_size\n self.dropout = nn.Dropout(p=dropout)\n self.n_layers = n_layers\n self.scale = scale\n self.token_embedding = nn.Embedding(vocab_size, d_model)\n self.position_encoding = PositionalEncodingLayer(d_model=d_model, max_len=max_len)\n self.encoder_block_layers = nn.ModuleList(\n [EncoderBlockLayer(d_model=d_model, n_heads=n_heads, hidden_size=hidden_size,\n dropout=dropout,seq_thresh=seq_thresh) for _ in range(n_layers)])\n self.fc_embedding_hidden = nn.Linear(d_model, hidden_size)\n self.fc_hidden_embedding = nn.Linear(hidden_size, d_model)\n self.conv1ds = nn.ModuleList([nn.Conv1d(hidden_size, hidden_size * 2, kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2) for _ in range(n_layers)])\n\n def forward(self, src_sequences, src_mask):\n \"\"\"\n :param Tensor[batch_size, src_len] src_sequences\n :param Tensor[batch_size, src_len] src_mask\n :return Tensor[batch_size, src_len, d_model] outputs\n \"\"\"\n token_embedded = self.token_embedding(src_sequences) # [batch_size, src_len, d_model]\n position_encoded = self.position_encoding(src_sequences) # [batch_size, src_len, d_model]\n outputs = self.dropout(token_embedded) + position_encoded # [batch_size, src_len, d_model]\n\n embedded = outputs\n for layer in self.encoder_block_layers:\n outputs = layer(src_inputs=outputs, src_mask=src_mask) # [batch_size, src_len, d_model]\n\n conv_output = self.fc_embedding_hidden(embedded) # [batch_size, src_len, hidden_size]\n conv_output = conv_output.permute(0, 2, 1) # [batch_size, hidden_size, src_len]\n for conv1d in self.conv1ds:\n conv_output = self.dropout(conv_output)\n conved = conv1d(conv_output) # [batch_size, hidden_size * 2, src_len]\n conved = F.glu(conved, dim=1) # [batch_size, hidden_size, src_len]\n conv_output = (conved + conv_output) * self.scale # [batch_size, hidden_size, src_len] Residual connection\n conv_output = conv_output.permute(0, 2, 1) # [batch_size, src_len, hidden_size]\n conv_output = self.fc_hidden_embedding(conv_output) # [batch_size, src_len, d_model]\n\n outputs = outputs + conv_output\n return outputs"
] | [
[
"torch.nn.Dropout",
"torch.nn.functional.glu",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.Conv1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jmcabreira/Dynamic-risk-assessment-system | [
"9ad320b12eb8948345743e403a6a49268de72858"
] | [
"scoring.py"
] | [
"from flask import Flask, session, jsonify, request\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport os\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nimport json\n\n#################Load config.json and get path variables\nwith open('config.json','r') as f:\n config = json.load(f) \n\ntest_data_path = os.path.join(config['test_data_path'], 'testdata.csv') \nmodel_path = os.path.join(config['output_model_path'], 'trainedmodel.pkl') \nscore_path = os.path.join(config['output_model_path'], 'latestscore.txt') \n\n#################Function for model scoring\ndef score_model(test_data_path):\n #this function should take a trained model, load test data, and calculate an F1 score for the model relative to the test data\n #it should write the result to the latestscore.txt file\n test_df = pd.read_csv(test_data_path)\n\n X_test = test_df.drop(['corporation', 'exited'], axis=1)\n y_test = test_df['exited']\n \n # Read model\n with open(model_path , 'rb') as file:\n model = pickle.load(file)\n \n # model scoring\n y_pred = model.predict(X_test)\n print(\"pred: \", (y_pred))\n print(\"teste: \",(y_test.values))\n f1_score = metrics.f1_score(y_test.values, y_pred)\n print(f'F1 Score: {f1_score}')\n\n print(f\"Savind F1 score in {score_path}\")\n with open(score_path, 'w') as file:\n file.write(str(f1_score))\n\n return f1_score\n\n\nif __name__ == '__main__':\n f1_score = score_model(test_data_path)\n\n"
] | [
[
"sklearn.metrics.f1_score",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Data-Science-in-Mechanical-Engineering/edge | [
"586eaba2f0957e75940f4f19fa774603f57eae89",
"586eaba2f0957e75940f4f19fa774603f57eae89",
"586eaba2f0957e75940f4f19fa774603f57eae89",
"586eaba2f0957e75940f4f19fa774603f57eae89"
] | [
"edge/gym_wrappers/environment_wrapper.py",
"edge/graphics/subplotter/sample_subplotter.py",
"edge/space/space.py",
"test/safety_models_test.py"
] | [
"import gym.spaces as gspaces\n\nfrom edge.envs.environments import Environment\nfrom edge.space import StateActionSpace\nfrom . import BoxWrapper, DiscreteWrapper\n\n\nclass DummyDynamics:\n def __init__(self, stateaction_space):\n self.stateaction_space = stateaction_space\n\n @property\n def state_space(self):\n return self.stateaction_space.state_space\n\n @property\n def action_space(self):\n return self.stateaction_space.action_space\n\n def is_feasible_state(self, s):\n return True\n\n\ndef get_index_length(gym_box):\n return len(gym_box.low.reshape(-1))\n\n\nclass GymEnvironmentWrapper(Environment):\n def __init__(self, gym_env, shape=None, failure_critical=False,\n control_frequency=None, **kwargs):\n self.gym_env = gym_env\n if shape is None:\n obs_shape = None\n action_shape = None\n if isinstance(gym_env.action_space, gspaces.Box):\n if shape is not None:\n action_space_ndim = get_index_length(gym_env.action_space)\n action_shape = shape[-action_space_ndim:]\n action_space = BoxWrapper(\n gym_env.action_space, discretization_shape=action_shape,\n **kwargs\n )\n elif isinstance(gym_env.action_space, gspaces.Discrete):\n action_space = DiscreteWrapper(gym_env.action_space)\n else:\n raise TypeError(f'Gym environment action_space is of type {type(gym_env.action_space)}, but only Box '\n 'and Discrete are currently supported')\n\n if isinstance(gym_env.observation_space, gspaces.Box):\n if shape is not None:\n state_space_ndim = get_index_length(gym_env.observation_space)\n obs_shape = shape[:state_space_ndim]\n state_space = BoxWrapper(\n gym_env.observation_space,\n discretization_shape=obs_shape,\n **kwargs\n )\n elif isinstance(gym_env.observation_space, gspaces.Discrete):\n state_space = DiscreteWrapper(gym_env.observation_space)\n else:\n raise TypeError(f'Gym environment observation_space is of type {type(gym_env.observation_space)}, but only '\n 'Box and Discrete are currently supported')\n\n self.info = {}\n self._done = False\n self.failure_critical = failure_critical\n self.control_frequency = control_frequency\n\n dynamics = DummyDynamics(StateActionSpace(state_space, action_space))\n\n super(GymEnvironmentWrapper, self).__init__(\n dynamics=dynamics,\n reward=None,\n default_initial_state=gym_env.reset(),\n random_start=True\n )\n\n @property\n def in_failure_state(self):\n cost = self.info.get('cost')\n return cost is not None and cost != 0\n\n @property\n def done(self):\n return self._done\n\n @property\n def has_failed(self):\n # Same as in_failure_state, for compatibility with generic Environment\n return self.in_failure_state\n\n def reset(self, s=None):\n # Usually, Environments take s as a parameter, but this is not supported by safety_gym, so we\n # raise a meaningful error for the user\n if s is not None:\n raise ValueError('Selecting the initial state is not supported for Gym environments')\n reset_output = self.gym_env.reset()\n # Safety gym does not return anything with reset, whereas Gym returns\n # the state\n if reset_output is None:\n self.s = self.gym_env.obs()\n else:\n self.s = reset_output\n self._done = self.in_failure_state\n return self.s\n\n def step(self, action):\n gym_action = self.dynamics.action_space.to_gym(action)\n\n def do_one_gym_step():\n if not self.failure_critical or not self.has_failed:\n gym_new_state, reward, done, info = self.gym_env.step(\n gym_action\n )\n s = self.dynamics.state_space.from_gym(gym_new_state)\n # Gym does not put a hard constraint on the fact that the state\n # stays in the limit of the Box. Edge crashes if this happens,\n # so we project the resulting state in state-space\n s = self.dynamics.state_space.closest_in(s)\n else:\n reward = 0\n return s, reward, done, info\n\n step_done = False\n n_gym_steps = 0\n while not step_done:\n self.s, reward, self._done, _ = do_one_gym_step()\n n_gym_steps += 1\n step_done = (self.control_frequency is None) or \\\n (n_gym_steps >= self.control_frequency) or \\\n (self._done)\n\n return self.s, reward, self.has_failed\n\n def render(self):\n self.gym_env.render()\n\n def compute_dynamics_map(self):\n # General note: Q_map stores the index of the next state. This\n # approximates the dynamics by projecting the state we end up in, and\n # may lead to errors. A more precise implementation would keep the\n # exact value of the next state instead of its index. So far, this\n # method is only used for the computation of the viability sets, and\n # this requires the index of the next state: implementing the more\n # precise method is useless for this.\n # However, the following implementation may need to change if this\n # method is used for something else.\n\n import numpy as np\n unwrapped_gym_env = self.gym_env.unwrapped\n Q_map = np.zeros(self.stateaction_space.shape, dtype=tuple)\n for sa_index, stateaction in iter(self.stateaction_space):\n state, action = self.stateaction_space.get_tuple(stateaction)\n state = self.state_space.to_gym(state)\n action = self.action_space.to_gym(action)\n unwrapped_gym_env.state = state\n next_state, reward, failed = self.step(action)\n next_state = self.state_space.from_gym(next_state)\n # Gym does not ensure the stability of the stateaction space under\n # the dynamics, so we enforce it.\n # This may lead to edge effects.\n next_state = self.state_space.closest_in(next_state)\n next_state_index = self.state_space.get_index_of(\n next_state, around_ok=True\n )\n Q_map[sa_index] = next_state_index\n return Q_map",
"from . import Subplotter\nimport numpy as np\n\n\ndef masked(to_mask, mask):\n return [item for item, keep in zip(to_mask, mask) if keep]\n\n\nclass SampleSubplotter(Subplotter):\n def __init__(self, colors):\n super(SampleSubplotter, self).__init__(colors)\n self.failed_samples = []\n self.unfailed_samples = []\n self.failed_colors = []\n self.unfailed_colors = []\n self.failed_markers = []\n self.unfailed_markers = []\n\n def incur_sample(self, state, action, failed, color=None, marker=None):\n if color is None:\n color = [0.9, 0.3, 0.3]\n # States and actions are stored in np arrays of shape (1,) (since we\n # are plotting them)\n if failed:\n marker = marker if marker is not None else 'x'\n self.failed_samples.append((state[0], action[0]))\n self.failed_colors.append(color)\n self.failed_markers.append(marker)\n else:\n marker = marker if marker is not None else '.'\n self.unfailed_samples.append((state[0], action[0]))\n self.unfailed_colors.append(color)\n self.unfailed_markers.append(marker)\n\n def flush_samples(self):\n self.failed_samples = []\n self.unfailed_samples = []\n self.failed_colors = []\n self.unfailed_colors = []\n self.failed_markers = []\n self.unfailed_markers = []\n\n def ensure_samples_in_at_least_one(self, *datasets):\n dataset = np.unique(\n np.vstack(datasets),\n axis=0\n )\n\n def is_in_dataset(to_check):\n return [np.isclose(x, dataset).all(axis=1).any() for x in to_check]\n failed_in = is_in_dataset(self.failed_samples)\n unfailed_in = is_in_dataset(self.unfailed_samples)\n\n def filter_list(to_filter, keep_bools):\n return [x for x, keep in zip(to_filter, keep_bools) if keep]\n self.failed_samples = filter_list(self.failed_samples, failed_in)\n self.unfailed_samples = filter_list(self.unfailed_samples, unfailed_in)\n self.failed_colors = filter_list(self.failed_colors, failed_in)\n self.unfailed_colors = filter_list(self.unfailed_colors, unfailed_in)\n self.failed_markers = filter_list(self.failed_markers, failed_in)\n self.unfailed_markers = filter_list(self.unfailed_markers, failed_in)\n\n def draw_on_axs(self, ax_Q):\n def scatter_stateactions(stateactions, colors, markers):\n markers_set = set(markers)\n for marker in markers_set:\n fltr = [m == marker for m in markers]\n if any(fltr):\n states, actions = zip(*masked(stateactions, fltr))\n ax_Q.scatter(\n actions,\n states,\n color=masked(colors, fltr),\n s=60,\n marker=marker,\n edgecolors='none'\n )\n\n if len(self.failed_samples) > 0:\n scatter_stateactions(self.failed_samples, self.failed_colors,\n self.failed_markers)\n if len(self.unfailed_samples) > 0:\n scatter_stateactions(self.unfailed_samples, self.unfailed_colors,\n self.unfailed_markers)\n",
"from itertools import product\nimport numpy as np\n\nfrom edge import error\n\n\nclass Space:\n \"\"\"Base data structure to handle state, action, and stateaction spaces\n A Space is an object that contains elements, from where we can sample, and that has limits\n \"\"\"\n def __init__(self):\n pass\n\n def contains(self, x):\n \"\"\"\n Abstract method.\n :param x:\n :return: True iff x is in the Space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def sample(self):\n \"\"\"Abstract method.\n Samples an element from the Space\n \"\"\"\n raise NotImplementedError\n\n def closest_in(self, x):\n \"\"\" Abstract method\n Returns the closest element in the Space\n :param x:\n :return:\n \"\"\"\n raise NotImplementedError\n\n @property\n def limits(self):\n \"\"\"Returns the limits of the Space\n :return: tuple<float>\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def element(*x):\n \"\"\"\n Wraps the arguments with the data structure used by Space elements, i.e., a np.ndarray. Approximately\n equivalent to np.atleast_1d.\n :param x: The list of arguments\n :return: element: np.ndarray\n \"\"\"\n if len(x) == 1:\n return np.atleast_1d(x[0])\n else:\n return np.atleast_1d(x)\n\n\nclass DiscretizableSpace(Space):\n \"\"\"\n Data structure to handle discretizable Spaces. The main difference is that a DiscretizableSpace can be indexed.\n The discretization is only useful when indexing the space, and not when checking that something is an element of\n the space\n \"\"\"\n def __init__(self, index_shape):\n \"\"\"\n :param index_shape: tuple: the shape of the discretization. This would correspond to the shape of the numpy\n array if you used it instead of a DiscretizableSpace\n \"\"\"\n super(DiscretizableSpace, self).__init__()\n\n self.index_shape = index_shape\n self.index_dim = len(self.index_shape)\n # DiscretizableSpaces are more complex than np.ndarrays. Indeed, an additional dimension is created after the\n # last indexing dimension. Then, the space can be seen as a np.ndarray of dimension `index_dim` whose values\n # are np.ndarrays of shape `(data_length,)`\n self.data_length = self.index_dim\n\n @property\n def shape(self):\n return self.index_shape\n\n def contains(self, x):\n raise NotImplementedError\n\n def is_on_grid(self, x):\n \"\"\" Abstract method\n :param x: the element of the space\n :return: boolean: True iff the element is on the discretization grid\n \"\"\"\n raise NotImplementedError\n\n def get_index_of(self, x, around_ok=False):\n \"\"\" Abstract method\n Returns the index of an element\n :param x: the element\n :param around_ok: boolean: whether the element should be exactly on the grid (False) or if some tolerance\n is accepted (True)\n :return: boolean\n \"\"\"\n raise NotImplementedError\n\n def __getitem__(self, index):\n \"\"\" Abstract method\n The indexing method\n :param index: the index\n :return: np.ndarray : the item\n \"\"\"\n raise NotImplementedError\n\n def sample_idx(self):\n \"\"\"Samples an index from the space\n :return: tuple\n \"\"\"\n ids = tuple(map(np.random.choice, self.index_shape))\n if len(ids) == 1:\n return ids[0]\n else:\n return ids\n\n def sample(self):\n \"\"\"Samples an element from the space\n :return: np.ndarray\n \"\"\"\n k = self.sample_idx()\n return self[k]\n\n def __iter__(self):\n \"\"\"\n :return: DiscretizableSpaceIterator\n \"\"\"\n return DiscretizableSpaceIterator(self)\n\n\nclass DiscretizableSpaceIterator:\n \"\"\"\n An iterator over a DiscretizableSpace\n \"\"\"\n def __init__(self, space):\n self.space = space\n if space.index_dim > 1:\n self.index_iter = product(\n *[range(l_k) for l_k in space.index_shape]\n )\n else:\n self.index_iter = iter(range(space.index_shape[0]))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Next item\n :return: tuple<(tuple, np.ndarray)>. The first item is the index of the element, and the second is the\n element itself\n \"\"\"\n index = next(self.index_iter)\n data = self.space[index]\n return (index, data)\n\n\nclass ProductSpace(DiscretizableSpace):\n \"\"\"\n Handles product of spaces. This class mainly implements the __getitem__ method, and provides some helper functions.\n \"\"\"\n def __init__(self, *sets):\n \"\"\"\n Initializer\n :param sets: The list of the sets to take the product. The order matters. Sets can themselves be ProductSpaces.\n Then, the dimensions are flattened.\n \"\"\"\n self._flattened_sets = []\n for s in sets:\n if isinstance(s, ProductSpace):\n for second_order_set in s._flattened_sets:\n self._flattened_sets.append(second_order_set)\n else:\n self._flattened_sets.append(s)\n self._n_flattened_sets = len(self._flattened_sets)\n\n index_shape = tuple([s.index_shape[0] for s in self._flattened_sets])\n\n super(ProductSpace, self).__init__(index_shape)\n\n self.sets = sets\n self.n_sets = len(sets)\n\n self._index_slices = [None] * self.n_sets\n current_index = 0\n for ns in range(self.n_sets):\n end_index = current_index + sets[ns].index_dim\n # We do not use boolean masks for _index_slices because indexing with a mask is only supported for np arrays\n self._index_slices[ns] = slice(current_index, end_index)\n current_index = end_index\n\n def __getitem__(self, index):\n \"\"\"\n The indexing method. Indexes are tuples, and each element should be one of the following:\n * an integer. Then, the corresponding dimension is simply indexed by the integer, as would a np.ndarray be,\n * a slice. Then, the corresponding dimension is simply indexed by the slice, as would a np.ndarray be,\n * a np.ndarray of shape (1,). Then, the corresponding dimension has the value in the np.ndarray.\n Finally, if some dimensions are not specified, the index is completed by concatenating as many\n `slice(None, None, None)` to the right as necessary.\n Example: with space = [0,1] x [0,1] x [0,1], with shape (11,?,3)\n space[1,np.ndarray([0.1337])] -> np.ndarray([[0.1, 0.1337, 0],\n [0.1, 0.1337, 0.5],\n [0.1, 0.1337, 1]\n ])\n Note: the typing of the index is only enforced by subclasses. This method does not care what the items in the\n index are.\n :param index: tuple: the index\n :return: np.ndarray\n \"\"\"\n if isinstance(index, np.ndarray):\n if index in self:\n return index\n else:\n index in self\n raise IndexError(f'Index {index} is understood as an element '\n 'of the Space and does not belong to it')\n\n if not isinstance(index, tuple):\n index = tuple([index])\n\n n_missing_indexes = self._n_flattened_sets - len(index)\n index = index + tuple([slice(None, None, None)\n for k in range(n_missing_indexes)])\n\n def get_dim(ns):\n \"\"\"\n Queries the set corresponding to dimension ns with its corresponding index. In general, the set is\n 1-dimensional (Segment or Discrete), since it is a flattened set. Then, the output is of shape (n,1), where\n n is the number of values required by the index\n :param ns: the number of the dimension\n :return: np.ndarray: the elements corresponding to the index on that dimension\n \"\"\"\n return np.atleast_1d(self._flattened_sets[ns][index[ns]])\n\n def isnotslice(x):\n \"\"\"\n Checks whether the argument is a slice\n :param x:\n :return: False iff x is a slice\n \"\"\"\n return not isinstance(x, slice)\n\n list_of_items = list(map(get_dim, list(range(self._n_flattened_sets))))\n item_is_1d = list(map(isnotslice, index))\n\n # NumPy limits the dimension of arrays to 32, so we need to be careful when meshgridding, and only extend\n # the dimensions along which the user has asked for more than 1 value (i.e., a slice)\n items_multidimensional = [item for item, is_1d in zip(list_of_items, item_is_1d) if not is_1d]\n if len(items_multidimensional) > 0:\n items_multidimensional_meshgrid = np.meshgrid(*items_multidimensional, indexing='ij')\n items_shape = items_multidimensional_meshgrid[0].shape\n idx_in_multidim = 0\n for item_index in range(len(list_of_items)):\n if not item_is_1d[item_index]:\n list_of_items[item_index] = items_multidimensional_meshgrid[idx_in_multidim]\n idx_in_multidim += 1\n else:\n assert list_of_items[item_index].shape == (1,)\n value = list_of_items[item_index][0]\n list_of_items[item_index] = value * np.ones(items_shape)\n items = np.stack(list_of_items, axis=-1)\n else:\n # squeeze returns a np scalar if the input is of shape (1,), so we ensure it is still an array\n items = np.atleast_1d(np.stack(list_of_items, axis=0).squeeze())\n\n # squeeze_dim = list(map(isnotslice, index))\n #\n # items_meshgrid = np.meshgrid(*list_of_items, indexing='ij')\n # items = np.stack(items_meshgrid, axis=-1)\n #\n # dims_to_squeeze = tuple([dim\n # for dim in range(len(squeeze_dim))\n # if squeeze_dim[dim]])\n # items = np.squeeze(items, axis=dims_to_squeeze)\n return items\n\n def _get_components(self, x, ns):\n \"\"\"\n Returns the component of element x on dimension ns, where ns indexes over the non-flattened sets.\n :param x: np.ndarray\n :param ns: int\n :return:\n \"\"\"\n return x[self._index_slices[ns]]\n\n def contains(self, x):\n if len(x) != self.data_length:\n raise ValueError(f\"Size mismatch: expected size {self.data_length}\"\n f\", got {len(x)}\")\n isin = True\n for ns in range(self.n_sets):\n s = self.sets[ns]\n x_slice = self._get_components(x, ns)\n isin = isin and (x_slice in s)\n\n return isin\n\n def is_on_grid(self, x):\n if len(x) != self.data_length:\n raise ValueError(f\"Size mismatch: expected size {self.data_length}\"\n f\", got {len(x)}\")\n ison = True\n for ns in range(self.n_sets):\n s = self.sets[ns]\n x_slice = self._get_components(x, ns)\n ison = ison and s.is_on_grid(x_slice)\n\n return ison\n\n def get_index_of(self, x, around_ok=False):\n if len(x) != self.data_length:\n raise ValueError(f'Size mismatch: expected size {self.data_length}'\n f', got {len(x)}')\n index = [None] * self.index_dim\n for ns in range(self.n_sets):\n s = self.sets[ns]\n index_slice = self._index_slices[ns]\n index[index_slice] = np.atleast_1d(\n s.get_index_of(x[index_slice], around_ok)\n ).tolist()\n return tuple(index)\n\n def closest_in(self, x):\n if x in self:\n return x\n y = x.copy()\n for ns in range(self.n_sets):\n mask = self._index_slices[ns]\n y[mask] = self.sets[ns].closest_in(x[mask])\n return y\n\n @property\n def limits(self):\n limits = [None] * self._n_flattened_sets\n for ns in range(self._n_flattened_sets):\n s = self._flattened_sets[ns]\n limits[ns] = s.limits\n return tuple(limits)\n\n def get_component(self, x, target):\n \"\"\"\n Returns the component of element x on space target.\n :param x: np.ndarray\n :param target: Space\n :return: np.ndarray: the components of x on target\n \"\"\"\n if target not in self.sets:\n raise error.InvalidTarget\n for ns in range(self.n_sets):\n if self.sets[ns] == target:\n n_target = ns\n break\n else:\n raise error.InvalidTarget\n mask = self._index_slices[n_target]\n return np.hstack(x[mask])\n\n def get_index_component(self, index, target):\n if target not in self.sets:\n raise error.InvalidTarget\n for ns in range(self.n_sets):\n if self.sets[ns] == target:\n n_target = ns\n break\n else:\n raise error.InvalidTarget\n mask = self._index_slices[n_target]\n masked = index[mask]\n if len(masked) == 1:\n return masked[0]\n return masked\n\n def from_components(self, *x_sets):\n out = []\n for k in range(len(x_sets)):\n if isinstance(x_sets[k], np.ndarray):\n out += list(x_sets[k].reshape((-1,1)))\n elif isinstance(x_sets[k], tuple):\n out += x_sets[k]\n else:\n out += (x_sets[k], )\n return tuple(out)\n",
"import unittest\nimport numpy as np\nimport tempfile\nimport gym\n\nfrom edge.envs import Hovership\nfrom edge.gym_wrappers import GymEnvironmentWrapper\nfrom edge.model.safety_models import MaternSafety\n\n\nclass TestHovership(Hovership):\n def __init__(self):\n super(TestHovership, self).__init__(\n random_start=True,\n dynamics_parameters={\n 'control_frequency': 0.1,\n 'ground_gravity': 0.1,\n 'shape': (2, 2)\n }\n )\n\n\nclass TestMeasure(MaternSafety):\n def __init__(self, env, gamma_optimistic, x_seed, y_seed):\n hyperparameters = {\n 'outputscale_prior': (1, 0.1),\n 'lengthscale_prior': (0.1, 0.05),\n 'noise_prior': (0.001, 0.001)\n }\n super(TestMeasure, self).__init__(env, gamma_optimistic,\n x_seed, y_seed,\n gp_params=hyperparameters)\n\n\nclass LunarLander(GymEnvironmentWrapper):\n def __init__(self, discretization_shape):\n gym_env = gym.make('LunarLanderContinuous-v2')\n super(LunarLander, self).__init__(gym_env, discretization_shape)\n\n\nclass TestSafetyMeasure(unittest.TestCase):\n def test_convergence(self):\n tol = 1e-5\n env = TestHovership()\n\n x_seed = np.array([1.8, 0.8])\n y_seed = np.array([1.])\n\n gamma = 0.1\n\n measure = TestMeasure(env=env, gamma_optimistic=gamma, x_seed=x_seed,\n y_seed=y_seed)\n\n epochs = 3\n max_steps = 100\n for episode in range(epochs):\n failed = True\n while failed:\n state = env.reset(s=x_seed[:1])\n failed = env.has_failed\n n_steps = 0\n while not failed and n_steps < max_steps:\n cautious_actions, covar_slice = measure.level_set(\n state, 0.05, gamma, return_covar=True\n )\n cautious_actions = cautious_actions.squeeze()\n covar_slice = covar_slice.squeeze()\n if not cautious_actions.any():\n raise NotImplementedError('Please implement the case where'\n ' no cautious action exists')\n else:\n cautious_indexes = np.argwhere(cautious_actions)\n most_variance_action = np.argmax(\n covar_slice[cautious_actions]\n )\n action = tuple(cautious_indexes[most_variance_action])\n action = env.action_space[action]\n new_state, reward, failed = env.step(action)\n measure.update(state, action, new_state, reward, failed)\n state = new_state\n failed = env.has_failed\n n_steps += 1\n\n final_measure = measure[:, :].reshape(\n env.stateaction_space.shape\n )\n expected_final = np.array([[0, 0], [0, 1]]).astype(np.bool)\n self.assertTrue(\n np.all((final_measure > tol) == expected_final),\n f'Final measure does not match the expected one. Final measure :\\n'\n f'{final_measure}\\nExpected final measure:\\n{expected_final}'\n )\n\n def test_level_set_shape_0(self):\n env = TestHovership()\n\n x_seed = np.array([1., 1.])\n y_seed = np.array([1.])\n\n gamma = 0.1\n\n measure = TestMeasure(env=env, gamma_optimistic=gamma, x_seed=x_seed,\n y_seed=y_seed)\n\n def check_level_set_on_query(query, query_len):\n output_level_shape = (query_len, ) + env.action_space.shape\n output_measure_shape = (query_len,)\n level_set = measure.level_set(query, 0, gamma)\n self.assertEqual(\n level_set.shape,\n output_level_shape,\n 'The level set does not have the right shape. '\n f'Expected shape: {output_level_shape} - '\n f'Actual shape: {level_set.shape}')\n\n meas = measure.measure(query, 0, gamma)\n self.assertEqual(\n meas.shape,\n output_measure_shape,\n 'The measure does not have the expected shape. '\n f'Expected shape: {output_measure_shape} - '\n f'Actual shape: {meas.shape}')\n\n s_query = np.array([0.5])\n check_level_set_on_query(s_query, 1)\n s_query = slice(None, None, None)\n check_level_set_on_query(s_query, np.prod(env.state_space.shape))\n\n def test_level_set_shape_1(self):\n env = LunarLander(discretization_shape=tuple(10 for _ in range(8)))\n\n x_seed = np.array([0, 1.4, 0, 0, 0, 0, 0, 0, 1, 0])\n y_seed = np.array([1.])\n\n gamma = 0.1\n\n measure = TestMeasure(env=env, gamma_optimistic=gamma, x_seed=x_seed,\n y_seed=y_seed)\n\n def check_level_set_on_query(query, query_len):\n output_level_shape = (query_len, ) + env.action_space.shape\n output_measure_shape = (query_len,)\n level_set = measure.level_set(query, 0, gamma)\n self.assertEqual(\n level_set.shape,\n output_level_shape,\n 'The level set does not have the right shape. '\n f'Expected shape: {output_level_shape} - '\n f'Actual shape: {level_set.shape}')\n\n meas = measure.measure(query, 0, gamma)\n self.assertEqual(\n meas.shape,\n output_measure_shape,\n 'The measure does not have the expected shape. '\n f'Expected shape: {output_measure_shape} - '\n f'Actual shape: {meas.shape}')\n\n s_query = np.array([0, 1.4, 0, 0, 0, 0, 0, 0])\n check_level_set_on_query(s_query, 1)\n s_query = (\n slice(0, 5, 1),\n np.array([1.4]),\n np.array([0]),\n np.array([0]),\n np.array([0]),\n np.array([0]),\n np.array([0]),\n np.array([0]),\n )\n check_level_set_on_query(s_query, 5)\n\n def test_save_load(self):\n env = Hovership()\n x_seed = np.array([1.45, 0.6])\n y_seed = np.array([0.8])\n x_blank = np.array([0., 0])\n y_blank = np.array([0.])\n hyperparameters = {\n 'outputscale_prior': (0.4, 2),\n 'lengthscale_prior': (0.2, 0.2),\n 'noise_prior': (0.001, 0.002)\n }\n safety = MaternSafety(env, 0.7, x_seed, y_seed, hyperparameters)\n\n tmpdir = 'results/'#tempfile.TemporaryDirectory().name\n safety.save(tmpdir)\n safety.save_samples(tmpdir + 'samples.npz')\n\n blank = MaternSafety.load(tmpdir, env, 0.7, x_blank, y_blank)\n blank.load_samples(tmpdir + 'samples.npz')\n\n self.assertTrue((blank.gp.train_x == safety.gp.train_x).all())\n self.assertEqual(blank.gp.structure_dict, safety.gp.structure_dict)\n\n"
] | [
[
"numpy.zeros"
],
[
"numpy.vstack",
"numpy.isclose"
],
[
"numpy.hstack",
"numpy.stack",
"numpy.ones",
"numpy.atleast_1d",
"numpy.meshgrid"
],
[
"numpy.argwhere",
"numpy.all",
"numpy.argmax",
"numpy.prod",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kiwi-PUJ/DataLabelling | [
"1b89041dc371720be6254e1efb3ee8665ce69951"
] | [
"main.py"
] | [
"## @package Labelling_app\n# Labelling app software developed with Grabcut\n# \n# @version 1 \n#\n# Pontificia Universidad Javeriana\n# \n# Electronic Enginnering\n# \n# Developed by:\n# - Andrea Juliana Ruiz Gomez\n# Mail: <[email protected]>\n# GitHub: andrearuizg\n# - Pedro Eli Ruiz Zarate\n# Mail: <[email protected]>\n# GitHub: PedroRuizCode\n# \n# With support of:\n# - Francisco Carlos Calderon Bocanegra\n# Mail: <[email protected]>\n# GitHub: calderonf\n# - John Alberto Betancout Gonzalez\n# Mail: <[email protected]>\n# GitHub: JohnBetaCode\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\nfrom PyQt5.QtCore import Qt\nimport cv2\nimport numpy as np\nfrom time import time\nimport random\n\n\n## GUI class\nclass GUI(QMainWindow):\n\n ## The constructor\n #\n # Here you can configure the screen, buttons (rectangle, foreground,\n # iteration, open file, new label, original image, segmented image,\n # labelled image, previous, next, save, exit, reset), labels (video frame,\n # label, show image), spin box (video frames), list (labels), image panel,\n # checkbox (full screen, dark theme) and mouse events\n # @param self The object pointer.\n def __init__(self):\n super().__init__()\n app.setStyle('Fusion')\n\n screen = app.primaryScreen()\n rect = screen.size()\n width = rect.width()\n height = rect.height() - 30\n\n self.setGeometry(10, 10, width, height)\n self.setWindowTitle(\"Kiwi & PUJ - Labelling software\")\n self.setWindowIcon(QIcon(\"media/.icons/ICON.png\"))\n\n self.b_rec = QPushButton(self)\n self.b_rec.setText('&Rectangle')\n self.b_rec.move(((width // 2) - 210), 15)\n self.b_rec.setEnabled(False)\n self.b_rec.setShortcut('Ctrl+r')\n self.b_rec.clicked.connect(self.rectangle_)\n\n self.b_bg = QPushButton(self)\n self.b_bg.setText('&Background')\n self.b_bg.move(((width // 2) - 105), 15)\n self.b_bg.setEnabled(False)\n self.b_bg.setShortcut('Ctrl+b')\n self.b_bg.clicked.connect(self.background_)\n\n self.b_fg = QPushButton(self)\n self.b_fg.setText('&Foreground')\n self.b_fg.move(width // 2, 15)\n self.b_fg.setEnabled(False)\n self.b_fg.setShortcut('Ctrl+f')\n self.b_fg.clicked.connect(self.foreground_)\n\n self.b_it = QPushButton(self)\n self.b_it.setText('&Iteration')\n self.b_it.move(((width // 2) + 105), 15)\n self.b_it.setEnabled(False)\n self.b_it.setShortcut('Ctrl+i')\n self.b_it.clicked.connect(self.iteration_)\n\n f_open = QPushButton(self)\n f_open.setText('&Open file')\n f_open.setIcon(QIcon('media/.icons/file.png'))\n f_open.move(10, 15)\n f_open.setShortcut('Ctrl+o')\n f_open.clicked.connect(self.open_)\n\n t1 = QLabel(self)\n t1.setText(\"Video frames\")\n t1.move(10, height - 175)\n\n self.spin = QSpinBox(self)\n self.spin.move(10, height - 150)\n self.spin.setValue(30)\n self.spin.setRange(1, 999)\n self.spin.valueChanged.connect(self.sh_spin_val)\n\n t1 = QLabel(self)\n t1.setText(\"Labels\")\n t1.move(10, 90)\n\n self.b_new = QPushButton(self)\n self.b_new.setText('&New')\n self.b_new.setIcon(QIcon('media/.icons/new.png'))\n self.b_new.setEnabled(False)\n self.b_new.setShortcut('Ctrl+n')\n self.b_new.move(10, 120)\n self.b_new.clicked.connect(self.new_label)\n\n labels = open('/tmp/labels.txt', 'r').read()\n self.labels = list(labels.split(\"\\n\"))\n\n self.Label_n = QComboBox(self)\n for n in range(len(self.labels) - 1):\n self.Label_n.addItem(self.labels[n])\n self.Label_n.move(10, 150)\n self.Label_n.setEnabled(False)\n self.Label_n.activated[str].connect(self.sel_LN)\n\n t2 = QLabel(self)\n t2.setText(\"Show image\")\n t2.move(10, height // 2)\n\n self.b_or = QPushButton(self)\n self.b_or.setText('Original')\n self.b_or.move(10, (height // 2) + 30)\n self.b_or.setEnabled(False)\n self.b_or.clicked.connect(self.b_or_)\n\n self.b_seg = QPushButton(self)\n self.b_seg.setText('Segmented')\n self.b_seg.move(10, (height // 2) + 60)\n self.b_seg.setEnabled(False)\n self.b_seg.clicked.connect(self.b_seg_)\n\n self.b_lab = QPushButton(self)\n self.b_lab.setText('Labels')\n self.b_lab.move(10, (height // 2) + 90)\n self.b_lab.setEnabled(False)\n self.b_lab.clicked.connect(self.b_lab_)\n\n self.b_pre = QPushButton(self)\n self.b_pre.setText('Previous')\n self.b_pre.setIcon(QIcon('media/.icons/undo.png'))\n self.b_pre.move(10, height - 110)\n self.b_pre.setShortcut('Ctrl+Left')\n self.b_pre.setEnabled(False)\n self.b_pre.clicked.connect(self.b_pre_)\n\n self.b_nxt = QPushButton(self)\n self.b_nxt.setText('Next')\n self.b_nxt.setIcon(QIcon('media/.icons/redo.png'))\n self.b_nxt.move(10, height - 80)\n self.b_nxt.setShortcut('Ctrl+Right')\n self.b_nxt.setEnabled(False)\n self.b_nxt.clicked.connect(self.b_nxt_)\n\n self.b_sav = QPushButton(self)\n self.b_sav.setText('&SAVE')\n self.b_sav.setIcon(QIcon('media/.icons/save.png'))\n self.b_sav.move(10, height - 30)\n self.b_sav.setEnabled(False)\n self.b_sav.setShortcut('Ctrl+s')\n self.b_sav.clicked.connect(self.b_sav_)\n\n b_ext = QPushButton(self)\n b_ext.setText('EXIT')\n b_ext.setIcon(QIcon('media/.icons/exit.png'))\n b_ext.move(width - 110, height - 30)\n b_ext.clicked.connect(self.b_ext_)\n\n b_res = QPushButton(self)\n b_res.setText('RESET')\n b_res.move(width - 110, height - 80)\n b_res.clicked.connect(self.reset_)\n\n self.image_1 = QLabel(self)\n self.image_1.resize(640, 480)\n self.image_1.move((width // 2) - 320, (height // 2) - 200)\n\n self.dark = QCheckBox(self)\n self.dark.setText('Dark theme')\n self.dark.setChecked(True)\n self.dark.move((width - 110), 15)\n self.dark.toggled.connect(self.dark_)\n\n self.fs = QCheckBox(self)\n self.fs.setText('Full Screen')\n self.fs.setChecked(True)\n self.fs.move((width - 110), 35)\n self.fs.toggled.connect(self.fullScreen_)\n\n self.fullScreen_()\n self.show()\n self.reset_()\n self.dark_()\n\n self.image_1.mousePressEvent = self.mouse_down\n self.image_1.mouseMoveEvent = self.mouse_move\n self.image_1.mouseReleaseEvent = self.mouse_up\n\n ## Label selection function\n #\n # Select the label of the segmented image, and created the labelled image\n # file\n # @param self The object pointer.\n # @param text Label gave by the user\n def sel_LN(self, text):\n for n in range(len(self.labels) - 1):\n if text == self.labels[n]:\n self.contour_()\n self.colors = tuple(self.colors)\n cv2.drawContours(self.img_out, self.contours, -1, n + 1,\n thickness=cv2.FILLED)\n cv2.drawContours(self.img_label, self.contours, -1,\n self.colors[n], thickness=cv2.FILLED)\n\n ## Contour function\n #\n # Determine the contour of the segmented image\n # @param self The object pointer.\n def contour_(self):\n imgray = cv2.cvtColor(self.out, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(imgray, 1, 255, 0)\n self.contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n ## New label button function\n #\n # Set enable flag true\n # @param self The object pointer.\n def new_label(self):\n self.Label_n.setEnabled(True)\n self.b_lab_()\n\n ## Original picture button\n #\n # Show original picture\n # @param self The object pointer.\n def b_or_(self):\n self.showImage_(self.img_in)\n\n ## Segmented picture button\n #\n # Show segmented picture\n # @param self The object pointer.\n def b_seg_(self):\n self.showImage_(self.out)\n\n ## Labelled picture button\n #\n # Show labelled picture\n # @param self The object pointer.\n def b_lab_(self):\n self.showImage_(self.img_label)\n\n ## Spin value function\n #\n # Update video frame variable\n # @param self The object pointer.\n def sh_spin_val(self):\n self.value_sp = self.spin.value()\n\n ## Previous image button function\n #\n # Disable buttons and show warnings\n # @param self The object pointer.\n def b_pre_(self):\n if self.flag_save == 0:\n self.b_sav.setEnabled(False)\n self.b_bg.setEnabled(False)\n self.b_fg.setEnabled(False)\n self.b_it.setEnabled(False)\n self.b_new.setEnabled(False)\n self.Label_n.setEnabled(False)\n self.b_or.setEnabled(False)\n self.b_seg.setEnabled(False)\n self.b_lab.setEnabled(False)\n if self.flag_save == 1:\n self.show_alert()\n if self.file_vid == 0:\n self.file_num -= 1\n self.load()\n else:\n if self.flag_vid == 1:\n self.flag_vid = 0\n self.file_num -= 2\n self.frame_act -= int(self.value_sp)\n self.load_vid()\n else:\n self.flag_save = 0\n self.show_alert()\n\n ## Next image button function\n #\n # Disable buttons and show warnings\n # @param self The object pointer.\n def b_nxt_(self):\n if self.flag_save == 0:\n self.b_sav.setEnabled(False)\n self.b_bg.setEnabled(False)\n self.b_fg.setEnabled(False)\n self.b_it.setEnabled(False)\n self.b_new.setEnabled(False)\n self.Label_n.setEnabled(False)\n self.b_or.setEnabled(False)\n self.b_seg.setEnabled(False)\n self.b_lab.setEnabled(False)\n if self.file_vid == 0:\n self.file_num += 1\n self.load()\n else:\n self.frame_act += int(self.value_sp)\n self.load_vid()\n else:\n self.flag_save = 0\n self.show_alert()\n\n ## Save button function\n #\n # Save files\n # - for pictures only save the labelled mask\n # - for videos save labelled mask and original frame\n # @param self The object pointer.\n def b_sav_(self):\n str = (self.filename[self.file_num].split(\".\")[0]).split(\"/\")[-1]\n if self.file_vid == 0:\n outfile = 'media/%s-mask.png' % (str)\n outfile1 = 'media/%s.png' % (str)\n else:\n outfile = 'media/%s-frame-%s-mask.png' % (str, self.frame_act)\n outfile1 = 'media/%s-frame-%s.png' % (str, self.frame_act)\n original = '%s' % self.filename[self.file_num].split(\"/\")[-1]\n mask = '%s' % outfile.split(\"/\")[-1]\n tf = '%s' % (time() - self.ti)\n self.d_time[self.frame_num, ...] = [original, mask, tf]\n cv2.imwrite(outfile, self.img_out)\n cv2.imwrite(outfile1, self.img_in)\n self.frame_num += 1\n self.flag_save = 0\n self.flag_file = 1\n\n ## Exit button function\n #\n # Save time stamps csv and close app\n # @param self The object pointer.\n def b_ext_(self):\n if self.flag_file == 1:\n np.savetxt(\"media/timestamps.csv\", self.d_time, delimiter=\", \", \n fmt='%s')\n self.close()\n QApplication.quit()\n\n ## Open button function\n #\n # Open file dialog window\n # @param self The object pointer.\n def open_(self):\n self.filename, _ = QFileDialog.getOpenFileNames(None, 'Buscar Imagen',\n '.', 'Image Files (*.png *.jpg *.jpeg *.bmp *.mp4)')\n self.file_num = 0\n self.frame_num = 1\n self.flag_save = 0\n self.flag_vid = 0\n self.file_vid = 0\n self.b_rec.setEnabled(True)\n self.b_pre.setEnabled(True)\n self.b_nxt.setEnabled(True)\n self.load()\n\n ## Load function\n #\n # Open file in open cv\n # @param self The object pointer.\n def load(self):\n self.flag_save = 0\n if self.file_num < len(self.filename):\n if (self.filename[self.file_num].split(\".\")[-1] in \n ['png', 'jpg', 'jpeg', 'bmp']):\n self.img_in = cv2.imread(self.filename[self.file_num], \n cv2.IMREAD_UNCHANGED)\n self.img_in = cv2.resize(self.img_in, (640, 480))\n self.img_copy = self.img_in.copy()\n self.img_out = np.zeros((480, 640), np.uint8)\n self.img_label = self.img_in.copy()\n self.showImage_(self.img_in)\n else:\n self.file_vid = 1\n self.vid = cv2.VideoCapture(self.filename[self.file_num])\n self.length = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))\n self.frame_act = 1\n self.load_vid()\n else:\n self.b_ext_()\n if ((self.file_num == 0) and (self.file_vid == 0)):\n self.b_pre.setEnabled(False)\n else:\n self.b_pre.setEnabled(True)\n if ((self.file_num == (len(self.filename) - 1)) \n and (self.file_vid == 0)):\n self.b_nxt.setEnabled(False)\n else:\n self.b_nxt.setEnabled(True)\n\n ## Load video function\n #\n # Open video frames\n # @param self The object pointer.\n def load_vid(self):\n self.sh_spin_val()\n if self.vid.isOpened():\n if (self.frame_act <= self.length) and (self.frame_act > 0):\n self.vid.set(1, self.frame_act)\n ret, self.img_in = self.vid.read()\n self.img_in = cv2.resize(self.img_in, (640, 480))\n self.img_copy = self.img_in.copy()\n self.img_out = np.zeros((480, 640), np.uint8)\n self.img_label = self.img_in.copy()\n self.showImage_(self.img_in)\n else:\n self.flag_vid = 1\n self.vid.release()\n self.file_vid = 0\n self.file_num += 1\n self.load()\n\n ## Show image function\n #\n # Show picture in Pixmap\n # @param self The object pointer.\n # @param image Image to display.\n def showImage_(self, image):\n size = image.shape\n step = image.size / size[0]\n qformat = QImage.Format_Indexed8\n if len(size) == 3:\n if size[2] == 4:\n qformat = QImage.Format_RGBA8888\n else:\n qformat = QImage.Format_RGB888\n img = QImage(image, size[1], size[0], step, qformat)\n img = img.rgbSwapped()\n self.image_1.setPixmap(QPixmap.fromImage(img))\n self.resize(self.image_1.pixmap().size())\n\n ## Rectangle button function\n #\n # Enable flags to draw rectangle in picture\n # @param self The object pointer.\n def rectangle_(self):\n self.b_bg.setEnabled(True)\n self.b_fg.setEnabled(True)\n self.b_it.setEnabled(True)\n self.flag_rect = True\n self.flag_circle_fg = False\n self.flag_circle_bg = False\n self.ini_points = []\n self.ti = time()\n\n ## Background button function\n #\n # Enable flags to draw the background\n # @param self The object pointer.\n def background_(self):\n self.flag_rect = False\n self.flag_circle_fg = False\n self.flag_circle_bg = True\n\n ## Foreground button function\n #\n # Enable flags to draw the foreground\n # @param self The object pointer.\n def foreground_(self):\n self.flag_rect = False\n self.flag_circle_fg = True\n self.flag_circle_bg = False\n\n ## Iteration button function\n #\n # Iteration to make the segmented image\n # @param self The object pointer.\n def iteration_(self):\n self.b_sav.setEnabled(True)\n self.b_new.setEnabled(True)\n self.b_or.setEnabled(True)\n self.b_seg.setEnabled(True)\n self.b_lab.setEnabled(True)\n self.flag_save = 1\n self.flag_rect = False\n self.flag_circle_fg = False\n self.flag_circle_bg = False\n cv2.grabCut(self.img_in, self.mask, None, self.BGD_model,\n self.FGD_model, 1, cv2.GC_INIT_WITH_MASK)\n comp = (self.mask == 1) | (self.mask == 3)\n self.m_out = np.where(comp, 1, 0).astype('uint8')\n self.out = cv2.bitwise_and(self.img_in, self.img_in, mask=self.m_out)\n self.showImage_(self.out)\n\n ## Dark theme function\n #\n # Set dark or white theme\n # @param self The object pointer.\n def dark_(self):\n if self.dark.isChecked() is True:\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n palette.setColor(QPalette.Disabled, QPalette.Base, \n QColor(52, 52, 52))\n palette.setColor(QPalette.Disabled, QPalette.Text, \n QColor(57, 57, 57))\n palette.setColor(QPalette.Disabled, QPalette.Button, \n QColor(47, 47, 47))\n palette.setColor(QPalette.Disabled, QPalette.ButtonText, \n QColor(67, 67, 67))\n palette.setColor(QPalette.Disabled, QPalette.Window, \n QColor(49, 49, 49))\n palette.setColor(QPalette.Disabled, QPalette.WindowText, \n QColor(57, 57, 57))\n self.setPalette(palette)\n if self.dark.isChecked() is False:\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(239, 239, 239))\n palette.setColor(QPalette.WindowText, Qt.black)\n self.setPalette(palette)\n\n ## Show alert function\n #\n # Show alert when the labelled picture is not save\n # @param self The object pointer.\n def show_alert(self):\n warning = QMessageBox(self)\n warning.setIcon(QMessageBox.Warning)\n warning.setText(\"Remember to save the results\")\n warning.setWindowTitle(\"Warning\")\n warning.exec_()\n\n ## Maximized function\n #\n # Maximized window\n # @param self The object pointer.\n def maximized(self):\n self.showMaximized()\n\n ## Full-screen function\n #\n # Full-screen window\n # @param self The object pointer.\n def fullScreen_(self):\n if self.fs.isChecked() is True:\n self.showFullScreen()\n else:\n self.showMaximized()\n\n ## Mouse move function\n #\n # Make the rectangle or circles when user is pressing the mouse\n # @param self The object pointer.\n # @param event The mouse event.\n def mouse_move(self, event):\n x = event.pos().x()\n y = event.pos().y()\n if self.flag_rect is True:\n img_temp_m = self.img_in.copy()\n self.fin_points = [x, y]\n self.img_copy = cv2.rectangle(img_temp_m, tuple(self.ini_points), \n tuple(self.fin_points), (0, 0, 255), \n 5)\n if ((self.flag_circle_fg is True) and (self.start is True)):\n cv2.circle(self.img_copy, (x, y), 3, (255, 255, 255), -1)\n cv2.circle(self.mask, (x, y), 5, 1, -1)\n if ((self.flag_circle_bg is True) and (self.start is True)):\n cv2.circle(self.img_copy, (x, y), 3, (0, 0, 0), -1)\n cv2.circle(self.mask, (x, y), 5, 0, -1)\n self.showImage_(self.img_copy)\n\n ## Mouse down function\n #\n # Make the initial points of the rectangle or start circles\n # @param self The object pointer.\n # @param event The mouse event.\n def mouse_down(self, event):\n x = event.pos().x()\n y = event.pos().y()\n if self.flag_rect is True:\n self.ini_points = [x, y]\n if ((self.flag_rect is False) and ((self.flag_circle_fg is True) \n or (self.flag_circle_bg is True))):\n self.start = True\n\n ## Mouse up function\n #\n # Make the final points of the rectangle or finish circles\n # @param self The object pointer.\n # @param event The mouse event.\n def mouse_up(self, event):\n x = event.pos().x()\n y = event.pos().y()\n if self.flag_rect is True:\n img_temp = self.img_in.copy()\n self.fin_points = [x, y]\n self.img_copy = cv2.rectangle(img_temp, tuple(self.ini_points),\n tuple(self.fin_points), (0, 0, 255),\n 5)\n self.mask = np.zeros((480, 640), np.uint8)\n self.mask = cv2.rectangle(self.mask, tuple(self.ini_points), \n tuple(self.fin_points), 3, -1)\n self.flag_rect = False\n self.start = False\n self.showImage_(self.img_copy)\n\n ## Reset function\n #\n # Reset app\n # @param self The object pointer.\n def reset_(self):\n self.flag_file = 0\n self.d_time = np.zeros((10000, 3), dtype='U255')\n self.d_time[0, ...] = ['Img. Original', 'Img. Mask', 'Time (s)']\n self.BGD_model = np.zeros((1, 65), np.float64)\n self.FGD_model = np.zeros((1, 65), np.float64)\n self.ini_points, self.fin_points = [], []\n self.flag_rect = False\n self.flag_circle_fg = False\n self.flag_circle_bg = False\n self.start = False\n self.mask = np.zeros((640, 480), np.uint8)\n img = cv2.imread('media/.icons/INTRO.png', 1)\n img = cv2.resize(img, (640, 480))\n self.colors = np.random.randint(20, 255, (len(self.labels) - 1, 3))\n self.colors = []\n for n in range(len(self.labels) - 1):\n color = []\n for _ in range(3):\n color.append(random.randrange(0, 255))\n self.colors.append(tuple(color))\n self.showImage_(img)\n\n ## @var flag_file\n # It takes 0 value when the user hasn't chosen a file\n #\n # It takes 1 value when the user choose a file\n\n ## @var b_rec\n # Rectangle push button variable\n\n ## @var b_bg\n # Background push button variable\n\n ## @var b_fg\n # Foreground push button variable\n\n ## @var b_it\n # Iteration push button variable\n\n ## @var spin\n # Value of video frame variable at spin box\n\n ## @var b_new\n # New push button variable\n\n ## @var labels\n # List of labels that the user wrote on the labels.txt\n\n ## @var Label_n\n # List of labels\n\n ## @var b_or\n # Original image push button variable\n\n ## @var b_seg\n # Segmented image push button variable\n\n ## @var b_lab\n # Labelled image push button variable\n\n ## @var b_pre\n # Previous image push button variable\n\n ## @var b_nxt\n # Next image push button variable\n\n ## @var b_sav\n # Save image push button variable\n\n ## @var image_1\n # Image panel variable\n\n ## @var dark\n # Dark theme checkbox variable\n\n ## @var fs\n # Full screen checkbox variable\n\n ## @var colors\n # Variable of random colors generated when the application begins or \n # restart\n\n ## @var value_sp\n # Value at video frame spin box\n\n ## @var flag_save\n # It takes 0 value when the user hasn't saved a file\n #\n # It takes 1 value when the user save a file\n\n ## @var file_vid\n # It takes 0 value when the file is an image\n #\n # It takes 1 value when the file is a video\n\n ## @var flag_vid\n # Overflow when video frame is the first or the last\n\n ## @var d_time\n # List with the data of timestamps.csv\n\n ## @var file_num\n # Number of file that shows the image panel\n\n ## @var frame_num\n # Number of frame\n\n ## @var img_in\n # Input image or frame\n\n ## @var img_copy\n # Copy of input image or frame\n\n ## @var img_out\n # Output image\n\n ## @var img_label\n # Output labelled image\n\n ## @var vid\n # Video frame\n\n ## @var length\n # Length of video frames\n\n ## @var frame_act\n # Actual video frame\n\n ## @var flag_rect\n # It takes 0 value when the user hasn't pressed rectangle push button\n #\n # It takes 1 value when the user press rectangle push button\n\n ## @var flag_circle_fg\n # It takes 0 value when the user hasn't pressed foreground push button\n #\n # It takes 1 value when the user press foreground push button\n\n ## @var flag_circle_bg\n # It takes 0 value when the user hasn't pressed background push button\n #\n # It takes 1 value when the user press background push button\n\n ## @var ini_points\n # Initial coordinates of mouse at image panel after the rectangle push\n # button was pressed\n\n ## @var ti\n # Previous time. This variable is update after the rectangle push button\n # was pressed\n\n ## @var mask\n # Output mask of Grabcut algorithm. It can It takes 4 posible values:\n #\n # 0 - True background\n # 1 - True foreground\n # 2 - Possible background\n # 3 - Possible foreground\n\n ## @var m_out\n # Output mask. 0 and 2 values It takess 0 value; 1 and 3 values It takess 1 value.\n\n ## @var out\n # Out of segmented image\n\n ## @var fin_points\n # When the mouse is moving, it It takess the actual value of mouse coordinates\n # \n # When the mouse is up, it It takess the last value of mouse coordinates when\n # it was moving\n\n ## @var start\n # It takes 0 value when the user hasn't pressed background or foreground push\n # button. Can It takes 0 value when the user press background or foreground\n # push button and up the mouse in the image panel\n #\n # It takes 1 value when the user press background or foreground push button\n # and press the mouse in the image panel\n\n\n ## @var BGD_model\n # Variable exclusive of Grabcut algorithm\n\n ## @var FGD_model\n # Variable exclusive of Grabcut algorithm\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = GUI()\n sys.exit(app.exec_())\n"
] | [
[
"numpy.savetxt",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhaofang0627/HPBTT | [
"98cec9ff4ef95a01393718b024e9645e77fb70ee"
] | [
"data/background_pose.py"
] | [
"import os\n\nimport cv2\nimport numpy as np\nfrom absl import flags, app\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom .data_utils import RandomCrop\nfrom ..external.hmr.src.util import image as img_util\n\nimport tqdm\n\nbgData = './dataset/PRW-v16.04.20/frames'\nflags.DEFINE_string('PRW_img_path', bgData, 'Background Data Directory')\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\nclass BackgroundDataset(Dataset):\n\n def __getitem__(self, index):\n texture_img_path = self.data[index]\n texture_img = np.array(pil_loader(texture_img_path)) # / 255.0\n if texture_img is None or texture_img.shape[0] <= 0 or texture_img.shape[1] <= 0:\n return self.__getitem__(np.random.randint(0, self.__len__()))\n texture_img = self.random_crop(texture_img)\n if np.random.rand(1) > 0.5:\n # Need copy bc torch collate doesnt like neg strides\n texture_img = texture_img[:, ::-1, :]\n\n texture_img, _ = img_util.scale_and_crop(texture_img, self.scale_cmr, self.center, self.img_size_cmr)\n\n # Finally transpose the image to 3xHxW\n texture_img = texture_img / 255.0\n texture_img = np.transpose(texture_img, (2, 0, 1))\n\n return {'bg_img': texture_img}\n\n def __len__(self):\n return len(self.data)\n\n def __init__(self, opts, data_path_list, img_size=(128, 64)):\n self.data_path_list = data_path_list\n self.img_size = img_size\n self.img_size_cmr = opts.img_size\n self.scale_cmr = (float(opts.img_size) / max(img_size))\n center = np.round(np.array(img_size) / 2).astype(int)\n # image center in (x,y)\n self.center = center[::-1]\n self.data = []\n self.generate_index()\n\n self.random_crop = RandomCrop(output_size=self.img_size)\n\n def generate_index(self):\n print('generating background index')\n for data_path in self.data_path_list:\n for root, dirs, files in os.walk(data_path):\n for name in tqdm.tqdm(files):\n if name.endswith('.jpg'):\n self.data.append(os.path.join(root, name))\n\n print('finish generating background index, found texture image: {}'.format(len(self.data)))\n\n\n#----------- Data Loader ----------#\n#----------------------------------#\ndef data_loader(opts, shuffle=True):\n background_dataset = BackgroundDataset(opts, [opts.PRW_img_path])\n return DataLoader(dataset=background_dataset, batch_size=opts.batch_size, shuffle=shuffle,\n num_workers=opts.n_data_workers, drop_last=True)\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.random.rand",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adityaRakhecha/Image-Filters | [
"3d36008daf48ce16016e6152bcfb8dd422a095a0"
] | [
"sharpening.py"
] | [
"import cv2\nimport numpy as np\n\nclass sharpening:\n\n\tdef __init__(self):\n\t\tpass\n\t\n\tdef sharp(self,image):\n\t\t# Create sharpening kernel\n\t\tkernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n\n\t\t# applying the sharpening kernel to the input image & displaying it.\n\t\tsharpened = cv2.filter2D(image, -1, kernel)\n\n\t\t# Noise reduction\n\t\tsharpened = cv2.bilateralFilter(sharpened, 9, 75, 75) \n\t\treturn sharpened\n\n\n# Create an image object\nimage = cv2.imread(\"./car.jpg\")\n\ntmp_canvas = sharpening()\nres = tmp_canvas.sharp(image)\ncv2.imwrite('sharped.jpg', res)\ncv2.imshow('original',image)\ncv2.imshow('sharp',res)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yyyliu/latent-space-cartography | [
"c731029bfe540ac9ef703e832aac6774c01f075a"
] | [
"model/read.py"
] | [
"#!/usr/bin/env python\n\n'''\nReads an existing model and do something.\n'''\n\nfrom __future__ import print_function\nimport h5py\nfrom PIL import Image\nimport numpy as np\nimport os\n\nfrom keras import backend as K\n\nimport model\n\n# dataset config\nfrom config_emoji import *\n\nbatch_size = 100\n\n# path to the stored model\nbase = '/home/yliu0/data/{}/'.format(dset)\n\n# load training data\ndef load_data (fpath, original_img_size):\n f = h5py.File(fpath, 'r')\n dset = f[key_raw]\n\n x_train = dset[:train_split]\n x_test = dset[train_split:]\n\n x_train = x_train.astype('float32') / 255.\n x_train = x_train.reshape((x_train.shape[0],) + original_img_size)\n x_test = x_test.astype('float32') / 255.\n x_test = x_test.reshape((x_test.shape[0],) + original_img_size)\n\n return x_train, x_test\n\n# deserialize numpy array to image\ndef to_image (array):\n array = array.reshape(img_rows, img_cols, img_chns)\n array = 255 * (1.0 - array)\n return array.astype('uint8')\n\ndef visualize (x_test, encoder, generator, suffix=''):\n # encode and decode\n x_test_encoded = encoder.predict(x_test, batch_size=batch_size)\n x_test_decoded = generator.predict(x_test_encoded)\n\n m = 5\n original = np.zeros((img_rows * m, img_cols * m, img_chns), 'uint8')\n reconstructed = np.zeros((img_rows * m, img_cols * m, img_chns), 'uint8')\n\n def to_image (array):\n array = array.reshape(img_rows, img_cols, img_chns)\n array *= 255\n return array.astype('uint8')\n\n for i in range(m):\n for j in range(m):\n k = i * m + j\n orig = to_image(x_test[k])\n re = to_image(x_test_decoded[k])\n original[i * img_rows: (i + 1) * img_rows,\n j * img_cols: (j + 1) * img_cols] = orig\n reconstructed[i * img_rows: (i + 1) * img_rows,\n j * img_cols: (j + 1) * img_cols] = re\n\n img = Image.fromarray(original, img_mode)\n img.save('{}original.png'.format(imgbase))\n img = Image.fromarray(reconstructed, img_mode)\n img.save('{}reconstructed_{}.png'.format(imgbase, suffix))\n\n# run encoder through all points and save as a hdf5 file\n# indices should remain the same as raw data\ndef save_encoded (fn):\n # these will be numpy.ndarray with shape (length, latent_dim)\n x_test_encoded = encoder.predict(x_test, batch_size=batch_size)\n x_train_encoded = encoder.predict(x_train, batch_size=batch_size)\n encoded = np.concatenate((x_train_encoded, x_test_encoded), axis = 0)\n\n dim = encoded.shape[1]\n\n # remove previous result\n if os.path.exists(fn):\n os.remove(fn)\n \n f = h5py.File(fn, 'w')\n dset = f.create_dataset('latent', (1, dim), \n chunks=(1, dim),\n maxshape=(None, dim),\n dtype='float64')\n \n for i, val in enumerate(encoded):\n dset.resize((i + 1, dim))\n dset[i] = encoded[i]\n f.flush()\n \n f.close()\n\nif __name__ == '__main__':\n for latent_dim in dims:\n # input path\n rawpath = base + fn_raw\n resultbase = base + '{}_result/{}/'.format(dset, latent_dim)\n mpath = resultbase + '{}_model_dim={}.json'.format(dset, latent_dim)\n wpath = resultbase + '{}_model_dim={}.h5'.format(dset, latent_dim)\n\n # output path\n encode_path = base + 'latent{}.h5'.format(latent_dim)\n\n m = model.Vae(latent_dim = latent_dim, img_dim=(img_chns, img_rows, img_cols))\n vae, encoder, decoder = m.read(mpath, wpath)\n \n x_train, x_test = load_data(rawpath, m.original_img_size)\n # visualize(x_test, encoder, decoder)\n save_encoded(encode_path)\n"
] | [
[
"numpy.concatenate",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tianchenji/Multimodal-SVAE | [
"c76b7f8984610e32819510a7a5295124b97460be"
] | [
"models/blocks/Decoder.py"
] | [
"import torch.nn as nn\n\nclass Decoder(nn.Module):\n\n def __init__(self, layer_sizes, latent_size):\n\n super().__init__()\n\n self.MLP = nn.Sequential()\n\n input_size = latent_size\n\n for i, (in_size, out_size) in enumerate(zip([input_size]+layer_sizes[:-1], layer_sizes)):\n self.MLP.add_module(\n name=\"L{:d}\".format(i), module=nn.Linear(in_size, out_size))\n if i+1 < len(layer_sizes):\n self.MLP.add_module(name=\"A{:d}\".format(i), module=nn.ReLU())\n else:\n self.MLP.add_module(name=\"sigmoid\", module=nn.Sigmoid())\n\n def forward(self, z):\n\n x = self.MLP(z)\n\n return x"
] | [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weapp/numpy | [
"33cc5c6530d48102730b85cdf2835aaf480013ad"
] | [
"numpy/typing/tests/data/reveal/dtype.py"
] | [
"import numpy as np\n\ndtype_obj: np.dtype[np.str_]\n\nreveal_type(np.dtype(np.float64)) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]]\nreveal_type(np.dtype(np.int64)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]\n\n# String aliases\nreveal_type(np.dtype(\"float64\")) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]]\nreveal_type(np.dtype(\"float32\")) # E: numpy.dtype[numpy.floating[numpy.typing._32Bit]]\nreveal_type(np.dtype(\"int64\")) # E: numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]\nreveal_type(np.dtype(\"int32\")) # E: numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]\nreveal_type(np.dtype(\"bool\")) # E: numpy.dtype[numpy.bool_]\nreveal_type(np.dtype(\"bytes\")) # E: numpy.dtype[numpy.bytes_]\nreveal_type(np.dtype(\"str\")) # E: numpy.dtype[numpy.str_]\n\n# Python types\nreveal_type(np.dtype(complex)) # E: numpy.dtype[numpy.complexfloating[numpy.typing._\nreveal_type(np.dtype(float)) # E: numpy.dtype[numpy.floating[numpy.typing._\nreveal_type(np.dtype(int)) # E: numpy.dtype[numpy.signedinteger[numpy.typing._\nreveal_type(np.dtype(bool)) # E: numpy.dtype[numpy.bool_]\nreveal_type(np.dtype(str)) # E: numpy.dtype[numpy.str_]\nreveal_type(np.dtype(bytes)) # E: numpy.dtype[numpy.bytes_]\n\n# Special case for None\nreveal_type(np.dtype(None)) # E: numpy.dtype[numpy.floating[numpy.typing._\n\n# Dtypes of dtypes\nreveal_type(np.dtype(np.dtype(np.float64))) # E: numpy.dtype[numpy.floating[numpy.typing._64Bit]]\n\n# Parameterized dtypes\nreveal_type(np.dtype(\"S8\")) # E: numpy.dtype\n\n# Void\nreveal_type(np.dtype((\"U\", 10))) # E: numpy.dtype[numpy.void]\n\n# Methods and attributes\nreveal_type(dtype_obj.base) # E: numpy.dtype[numpy.str_]\nreveal_type(dtype_obj.subdtype) # E: Union[Tuple[numpy.dtype[numpy.str_], builtins.tuple[builtins.int]], None]\nreveal_type(dtype_obj.newbyteorder()) # E: numpy.dtype[numpy.str_]\nreveal_type(dtype_obj.type) # E: Type[numpy.str_]\n"
] | [
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
irfanumar1994/pytorch-transformers | [
"f257b96a879e38922eaa377be383be69372e78f1"
] | [
"pytorch_transformers/modeling_bert.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model. \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom io import open\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .modeling_utils import PreTrainedModel, prune_linear_layer\nfrom .configuration_bert import BertConfig\nfrom .file_utils import add_start_docstrings\n\nlogger = logging.getLogger(__name__)\n\nBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n 'bert-base-german-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-large-cased-whole-word-masking-finetuned-squad': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n 'bert-base-cased-finetuned-mrpc': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin\",\n}\n\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\ndef get_weighted_loss(loss_fct, inputs, labels, weights):\n loss = 0.0\n for i in range(weights.shape[0]):\n loss += (weights[i] + 1.0) * loss_fct(inputs[i:i + 1], labels[i:i + 1])\n\n return loss / (sum(weights) + weights.shape[0])\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept (ImportError, AttributeError) as e:\n logger.info(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\")\n BertLayerNorm = torch.nn.LayerNorm\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads\n for head in heads:\n # Compute how many pruned heads are before the head and move the index accordingly\n head = head - sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_tensor, attention_mask, head_mask=None):\n self_outputs = self.self(input_tensor, attention_mask, head_mask)\n attention_output = self.output(self_outputs[0], input_tensor)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n attention_outputs = self.attention(hidden_states, attention_mask, head_mask)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size,\n config.vocab_size,\n bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = BertConfig\n pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nBERT_START_DOCSTRING = r\"\"\" The BERT model was proposed in\n `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_\n by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer\n pre-trained using a combination of masked language modeling objective and next sentence prediction\n on a large corpus comprising the Toronto Book Corpus and Wikipedia.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:\n https://arxiv.org/abs/1810.04805\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs:\n\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``\n\n (b) For single sequences:\n\n ``tokens: [CLS] the dog is hairy . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0``\n\n Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n\n Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.\n See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and\n :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n@add_start_docstrings(\"The bare Bert Model transformer outputing raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertModel(BertPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertModel.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n encoder_outputs = self.encoder(embedding_output,\n extended_attention_mask,\n head_mask=head_mask)\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with two heads on top as done during the pre-training:\n a `masked language modeling` head and a `next sentence prediction (classification)` head. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForPreTraining(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForPreTraining.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n prediction_scores, seq_relationship_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the input and output embeddings.\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder,\n self.bert.embeddings.word_embeddings)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n masked_lm_labels=None, next_sentence_label=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForMaskedLM(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForMaskedLM.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, masked_lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the input and output embeddings.\n Export to TorchScript can't handle parameter sharing so we are cloning them instead.\n \"\"\"\n self._tie_or_clone_weights(self.cls.predictions.decoder,\n self.bert.embeddings.word_embeddings)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n masked_lm_labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForNextSentencePrediction(BertPreTrainedModel):\n r\"\"\"\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Next sequence prediction (classification) loss.\n **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n seq_relationship_scores = outputs[0]\n\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n next_sentence_label=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n seq_relationship_score = self.cls(pooled_output)\n\n outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n outputs = (next_sentence_loss,) + outputs\n\n return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForSequenceClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None, weights=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n if weights is None:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n else:\n loss = get_weighted_loss(loss_fct,\n logits.view(-1, self.num_labels),\n labels.view(-1), weights)\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForMultipleChoice(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above).\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForMultipleChoice.from_pretrained('bert-base-uncased')\n choices = [\"Hello, my dog is cute\", \"Hello, my cat is amazing\"]\n input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices\n labels = torch.tensor(1).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, classification_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForMultipleChoice, self).__init__(config)\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None):\n num_choices = input_ids.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1))\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForTokenClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForTokenClassification.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, labels=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)\nclass BertForQuestionAnswering(BertPreTrainedModel):\n r\"\"\"\n **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-start scores (before SoftMax).\n **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-end scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n start_positions = torch.tensor([1])\n end_positions = torch.tensor([3])\n outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n loss, start_scores, end_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n start_positions=None, end_positions=None):\n\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask)\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n"
] | [
[
"torch.nn.Softmax",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.MSELoss",
"torch.zeros",
"torch.zeros_like",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.Tanh",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.matmul",
"torch.arange",
"numpy.transpose",
"tensorflow.train.list_variables",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
eladyaniv01/sc2-pathlib | [
"ae1737af0dd3d418016941dcb4ac30bcfb36726f"
] | [
"sc2pathlibp/path_finder.py"
] | [
"from .sc2pathlib import PathFind\n\n# from . import _sc2pathlib\n# import sc2pathlib\nimport numpy as np\nfrom typing import Union, List, Tuple\nfrom math import floor\n\n\ndef to_float2(original: Tuple[int, int]) -> Tuple[float, float]:\n return (original[0] + 0.5, original[1] + 0.5)\n\n\nclass PathFinder:\n def __init__(self, maze: Union[List[List[int]], np.array]):\n \"\"\" \n pathing values need to be integers to improve performance. \n Initialization should be done with array consisting values of 0 and 1.\n \"\"\"\n self._path_find = PathFind(maze)\n self.heuristic_accuracy = 1 # Octile distance\n\n def normalize_influence(self, value: int):\n \"\"\" \n Normalizes influence to integral value. \n Influence does not need to be calculated each frame, but this quickly resets\n influence values to specified value without changing available paths.\n \"\"\"\n self._path_find.normalize_influence(value)\n\n @property\n def width(self) -> int:\n \"\"\"\n :return: Width of the defined map\n \"\"\"\n return self._path_find.width\n\n @property\n def height(self) -> int:\n \"\"\"\n :return: Height of the defined map\n \"\"\"\n return self._path_find.height\n\n @property\n def map(self) -> List[List[int]]:\n \"\"\"\n :return: map as list of lists [x][y] in python readable format\n \"\"\"\n return self._path_find.map\n\n def reset(self):\n \"\"\"\n Reset the pathfind map data to it's original state\n \"\"\"\n self._path_find.reset()\n\n def set_map(self, data: List[List[int]]):\n self._path_find.map = data\n\n def create_block(self, center: Union[Tuple[float, float], List[Tuple[float, float]]], size: Tuple[int, int]):\n if isinstance(center, list):\n self._path_find.create_blocks(center, size)\n else:\n self._path_find.create_block(center, size)\n\n def remove_block(self, center: Union[Tuple[float, float], List[Tuple[float, float]]], size: Tuple[int, int]):\n if isinstance(center, list):\n self._path_find.remove_blocks(center, size)\n else:\n self._path_find.remove_block(center, size)\n\n def find_path(\n self, start: (float, float), end: (float, float), large: bool = False\n ) -> Tuple[List[Tuple[int, int]], float]:\n \"\"\"\n Finds a path ignoring influence.\n\n :param start: Start position in float tuple\n :param end: Start position in float tuple\n :param large: Unit is large and requires path to have width of 2 to pass\n :return: Tuple of points and total distance.\n \"\"\"\n start_int = (floor(start[0]), floor(start[1]))\n end_int = (floor(end[0]), floor(end[1]))\n if large:\n return self._path_find.find_path_large(start_int, end_int, self.heuristic_accuracy)\n return self._path_find.find_path(start_int, end_int, self.heuristic_accuracy)\n\n def find_path_influence(\n self, start: (float, float), end: (float, float), large: bool = False\n ) -> (List[Tuple[int, int]], float):\n \"\"\"\n Finds a path that takes influence into account\n\n :param start: Start position in float tuple\n :param end: Start position in float tuple\n :param large: Unit is large and requires path to have width of 2 to pass\n :return: Tuple of points and total distance including influence.\n \"\"\"\n start_int = (floor(start[0]), floor(start[1]))\n end_int = (floor(end[0]), floor(end[1]))\n if large:\n return self._path_find.find_path_influence_large(start_int, end_int, self.heuristic_accuracy)\n return self._path_find.find_path_influence(start_int, end_int, self.heuristic_accuracy)\n\n def safest_spot(self, destination_center: (float, float), walk_distance: float) -> (Tuple[int, int], float):\n destination_int = (floor(destination_center[0]), floor(destination_center[1]))\n return self._path_find.lowest_influence_walk(destination_int, walk_distance)\n\n def lowest_influence_in_grid(self, destination_center: (float, float), radius: int) -> (Tuple[int, int], float):\n destination_int = (floor(destination_center[0]), floor(destination_center[1]))\n return self._path_find.lowest_influence(destination_int, radius)\n\n def add_influence(self, points: List[Tuple[float, float]], value: float, distance: float, flat: bool = False):\n list = []\n for point in points:\n list.append((floor(point[0]), floor(point[1])))\n\n if flat:\n self._path_find.add_influence_flat(list, value, distance)\n else:\n self._path_find.add_influence(list, value, distance)\n\n def add_influence_walk(self, points: List[Tuple[float, float]], value: float, distance: float, flat: bool = False):\n list = []\n for point in points:\n list.append((floor(point[0]), floor(point[1])))\n\n if flat:\n self._path_find.add_walk_influence_flat(list, value, distance)\n else:\n self._path_find.add_walk_influence(list, value, distance)\n\n def find_low_inside_walk(\n self, start: (float, float), target: (float, float), distance: Union[int, float]\n ) -> (Tuple[float, float], float):\n \"\"\"\n Finds a compromise where low influence matches with close position to the start position.\n\n This is intended for finding optimal position for unit with more range to find optimal position to fight from\n :param start: This is the starting position of the unit with more range\n :param target: Target that the optimal position should be optimized for\n :param distance: This should represent the firing distance of the unit with more range\n :return: Tuple for position and influence distance to reach the destination\n \"\"\"\n # start_int = (floor(start[0]), floor(start[1]))\n # target_int = (floor(target[0]), floor(target[1]))\n return self._path_find.find_low_inside_walk(start, target, distance)\n\n def plot(self, path: List[Tuple[int, int]], image_name: str = \"map\", resize: int = 4):\n \"\"\"\n Uses cv2 to draw current pathing grid.\n \n requires opencv-python\n\n :param path: list of points to colorize\n :param image_name: name of the window to show the image in. Unique names update only when used multiple times.\n :param resize: multiplier for resizing the image\n :return: None\n \"\"\"\n import cv2\n\n image = np.array(self._path_find.map, dtype=np.uint8)\n for point in path:\n image[point] = 255\n image = np.rot90(image, 1)\n resized = cv2.resize(image, dsize=None, fx=resize, fy=resize)\n cv2.imshow(image_name, resized)\n cv2.waitKey(1)\n"
] | [
[
"numpy.rot90",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
videetparekh/model-zoo-models | [
"431d6e8f04c343a2e6dbc140e1b060cc5f0a089d",
"431d6e8f04c343a2e6dbc140e1b060cc5f0a089d"
] | [
"ssd_mobilenetv2/Evaluator.py",
"ssd_mobilenetv2/ssd_layers.py"
] | [
"import sys\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom eval_utils import *\n\n\nclass Evaluator:\n def GetPascalVOCMetrics(self,\n boundingboxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation):\n \"\"\"Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n ret = [] # list containing metrics (precision, recall, average precision) of each class\n # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])\n groundTruths = []\n # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])\n detections = []\n # Get all classes\n classes = []\n # Loop through all bounding boxes and separate them into GTs and detections\n for bb in boundingboxes.getBoundingBoxes():\n # [imageName, class, confidence, (bb coordinates XYX2Y2)]\n if bb.getBBType() == BBType.GroundTruth:\n groundTruths.append([\n bb.getImageName(),\n bb.getClassId(), 1,\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n else:\n detections.append([\n bb.getImageName(),\n bb.getClassId(),\n bb.getConfidence(),\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n # get class\n if bb.getClassId() not in classes:\n classes.append(bb.getClassId())\n classes = sorted(classes)\n # Precision x Recall is obtained individually by each class\n # Loop through by classes\n for c in classes:\n # Get only detection of class c\n dects = []\n [dects.append(d) for d in detections if d[1] == c]\n # Get only ground truths of class c\n gts = []\n [gts.append(g) for g in groundTruths if g[1] == c]\n npos = len(gts)\n # sort detections by decreasing confidence\n dects = sorted(dects, key=lambda conf: conf[2], reverse=True)\n TP = np.zeros(len(dects))\n FP = np.zeros(len(dects))\n # create dictionary with amount of gts for each image\n det = Counter([cc[0] for cc in gts])\n for key, val in det.items():\n det[key] = np.zeros(val)\n # print(\"Evaluating class: %s (%d detections)\" % (str(c), len(dects)))\n # Loop through detections\n for d in range(len(dects)):\n # print('dect %s => %s' % (dects[d][0], dects[d][3],))\n # Find ground truth image\n gt = [gt for gt in gts if gt[0] == dects[d][0]]\n iouMax = sys.float_info.min\n for j in range(len(gt)):\n # print('Ground truth gt => %s' % (gt[j][3],))\n iou = Evaluator.iou(dects[d][3], gt[j][3])\n if iou > iouMax:\n iouMax = iou\n jmax = j\n # Assign detection as true positive/don't care/false positive\n if iouMax >= IOUThreshold:\n if det[dects[d][0]][jmax] == 0:\n TP[d] = 1 # count as true positive\n # print(\"TP\")\n det[dects[d][0]][jmax] = 1 # flag as already 'seen'\n # - A detected \"cat\" is overlaped with a GT \"cat\" with IOU >= IOUThreshold.\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # compute precision, recall and average precision\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / npos\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n # Depending on the method, call the right implementation\n if method == MethodAveragePrecision.EveryPointInterpolation:\n [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)\n else:\n [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)\n # add class result in the dictionary to be returned\n r = {\n 'class': c,\n 'precision': prec,\n 'recall': rec,\n 'AP': ap,\n 'interpolated precision': mpre,\n 'interpolated recall': mrec,\n 'total positives': npos,\n 'total TP': np.sum(TP),\n 'total FP': np.sum(FP)\n }\n ret.append(r)\n return ret\n\n def PlotPrecisionRecallCurve(self,\n classId,\n boundingBoxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=False,\n showInterpolatedPrecision=False,\n savePath=None,\n showGraphic=True):\n \"\"\"PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n classId: The class that will be plot;\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A dictionary containing information and metric about the class. The keys of the\n dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)\n result = None\n for res in results:\n if res['class'] == classId:\n result = res\n break\n if result is None:\n raise IOError('Error: Class %d could not be found.' % classId)\n\n precision = result['precision']\n recall = result['recall']\n average_precision = result['AP']\n mpre = result['interpolated precision']\n mrec = result['interpolated recall']\n npos = result['total positives']\n total_tp = result['total TP']\n total_fp = result['total FP']\n\n if showInterpolatedPrecision:\n if method == MethodAveragePrecision.EveryPointInterpolation:\n plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')\n elif method == MethodAveragePrecision.ElevenPointInterpolation:\n # Uncomment the line below if you want to plot the area\n # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')\n # Remove duplicates, getting only the highest precision of each recall value\n nrec = []\n nprec = []\n for idx in range(len(mrec)):\n r = mrec[idx]\n if r not in nrec:\n idxEq = np.argwhere(mrec == r)\n nrec.append(r)\n nprec.append(max([mpre[int(id)] for id in idxEq]))\n plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')\n plt.plot(recall, precision, label='Precision')\n plt.xlabel('recall')\n plt.ylabel('precision')\n if showAP:\n ap_str = \"{0:.2f}%\".format(average_precision * 100)\n plt.title('Precision x Recall curve \\nClass: %s, AP: %s' % (str(classId), ap_str))\n # plt.title('Precision x Recall curve \\nClass: %s, AP: %.4f' % (str(classId),\n # average_precision))\n else:\n plt.title('Precision x Recall curve \\nClass: %d' % classId)\n plt.legend(shadow=True)\n plt.grid()\n ############################################################\n # Uncomment the following block to create plot with points #\n ############################################################\n # plt.plot(recall, precision, 'bo')\n # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',\n # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']\n # dicPosition = {}\n # dicPosition['left_zero'] = (-30,0)\n # dicPosition['left_zero_slight'] = (-30,-10)\n # dicPosition['right_zero'] = (30,0)\n # dicPosition['left_up'] = (-30,20)\n # dicPosition['left_down'] = (-30,-25)\n # dicPosition['right_up'] = (20,20)\n # dicPosition['right_down'] = (20,-20)\n # dicPosition['up_zero'] = (0,30)\n # dicPosition['up_right'] = (0,30)\n # dicPosition['left_zero_long'] = (-60,-2)\n # dicPosition['down_zero'] = (-2,-30)\n # vecPositions = [\n # dicPosition['left_down'],\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',\n # dicPosition['left_up'],\n # dicPosition['left_up'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'U', 'C', 'M', 'F',\n # dicPosition['left_zero'],\n # dicPosition['right_up'],\n # dicPosition['right_down'],\n # dicPosition['down_zero'], #'D', 'B', 'H', 'P'\n # dicPosition['left_up'],\n # dicPosition['up_zero'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'E', 'X', 'N', 'T',\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['left_zero_long'],\n # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',\n # dicPosition['right_down'],\n # dicPosition['left_down'],\n # dicPosition['right_up'],\n # dicPosition['down_zero']\n # ] # 'L', 'S', 'G', 'O'\n # for idx in range(len(labels)):\n # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)\n # plt.annotate(labels[idx],\n # xy=(recall[idx],precision[idx]), xycoords='data',\n # xytext=vecPositions[idx], textcoords='offset points',\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"),\n # bbox=box)\n if savePath is not None:\n plt.savefig(savePath)\n if showGraphic is True:\n plt.show()\n # plt.waitforbuttonpress()\n ret = {}\n ret['class'] = classId\n ret['precision'] = precision\n ret['recall'] = recall\n ret['AP'] = average_precision\n ret['interpolated precision'] = mpre\n ret['interpolated recall'] = mrec\n ret['total positives'] = npos\n ret['total TP'] = total_tp\n ret['total FP'] = total_fp\n return ret\n\n @staticmethod\n def CalculateAveragePrecision(rec, prec):\n mrec = []\n mrec.append(0)\n [mrec.append(e) for e in rec]\n mrec.append(1)\n mpre = []\n mpre.append(0)\n [mpre.append(e) for e in prec]\n mpre.append(0)\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ii = []\n for i in range(len(mrec) - 1):\n if mrec[1:][i] != mrec[0:-1][i]:\n ii.append(i + 1)\n ap = 0\n for i in ii:\n ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])\n # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]\n return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]\n\n @staticmethod\n # 11-point interpolated average precision\n def ElevenPointInterpolatedAP(rec, prec):\n # def CalculateAveragePrecision2(rec, prec):\n mrec = []\n # mrec.append(0)\n [mrec.append(e) for e in rec]\n # mrec.append(1)\n mpre = []\n # mpre.append(0)\n [mpre.append(e) for e in prec]\n # mpre.append(0)\n recallValues = np.linspace(0, 1, 11)\n recallValues = list(recallValues[::-1])\n rhoInterp = []\n recallValid = []\n # For each recallValues (0, 0.1, 0.2, ... , 1)\n for r in recallValues:\n # Obtain all recall values higher or equal than r\n argGreaterRecalls = np.argwhere(mrec[:-1] >= r)\n pmax = 0\n # If there are recalls above r\n if argGreaterRecalls.size != 0:\n pmax = max(mpre[argGreaterRecalls.min():])\n recallValid.append(r)\n rhoInterp.append(pmax)\n # By definition AP = sum(max(precision whose recall is above r))/11\n ap = sum(rhoInterp) / 11\n # Generating values for the plot\n rvals = []\n rvals.append(recallValid[0])\n [rvals.append(e) for e in recallValid]\n rvals.append(0)\n pvals = []\n pvals.append(0)\n [pvals.append(e) for e in rhoInterp]\n pvals.append(0)\n # rhoInterp = rhoInterp[::-1]\n cc = []\n for i in range(len(rvals)):\n p = (rvals[i], pvals[i - 1])\n if p not in cc:\n cc.append(p)\n p = (rvals[i], pvals[i])\n if p not in cc:\n cc.append(p)\n recallValues = [i[0] for i in cc]\n rhoInterp = [i[1] for i in cc]\n return [ap, rhoInterp, recallValues, None]\n\n # For each detections, calculate IOU with reference\n @staticmethod\n def _getAllIOUs(reference, detections):\n ret = []\n bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n # img = np.zeros((200,200,3), np.uint8)\n for d in detections:\n bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n iou = Evaluator.iou(bbReference, bb)\n # Show blank image with the bounding boxes\n # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)\n # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)\n ret.append((iou, reference, d)) # iou, reference, detection\n # cv2.imshow(\"comparing\",img)\n # cv2.waitKey(0)\n # cv2.destroyWindow(\"comparing\")\n return sorted(ret, key=lambda i: i[0], reverse=True) # sort by iou (from highest to lowest)\n\n @staticmethod\n def iou(boxA, boxB):\n # if boxes dont intersect\n if Evaluator._boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n # boxA = (Ax1,Ay1,Ax2,Ay2)\n # boxB = (Bx1,By1,Bx2,By2)\n @staticmethod\n def _boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n @staticmethod\n def _getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n @staticmethod\n def _getUnionAreas(boxA, boxB, interArea=None):\n area_A = Evaluator._getArea(boxA)\n area_B = Evaluator._getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n @staticmethod\n def _getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n",
"\"\"\"Some special pupropse layers for SSD.\"\"\"\n\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.layers import InputSpec\nfrom tensorflow.keras.layers import Layer\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\ntf.disable_v2_behavior()\n\nclass Normalize(Layer):\n \"\"\"Normalization layer as described in ParseNet paper.\n\n # Arguments\n scale: Default feature scale.\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n Same as input\n\n # References\n http://cs.unc.edu/~wliu/papers/parsenet.pdf\n\n #TODO\n Add possibility to have one scale for all features.\n \"\"\"\n def __init__(self, scale, **kwargs):\n if tf.keras.backend.image_data_format() == 'channels_last':\n self.axis = 3\n else:\n self.axis = 1\n self.scale = scale\n super(Normalize, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n shape = (input_shape[self.axis],)\n init_gamma = self.scale * np.ones(shape)\n self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))\n self.trainable_weights = [self.gamma]\n\n def call(self, x, mask=None):\n output = K.l2_normalize(x, self.axis)\n output *= self.gamma\n return output\n\n def get_config(self):\n config = super().get_config().copy()\n config['name'] = 'Normalize'\n return config\n\n\nclass PriorBox(Layer):\n \"\"\"Generate the prior boxes of designated sizes and aspect ratios.\n\n # Arguments\n img_size: Size of the input image as tuple (w, h).\n min_size: Minimum box size in pixels.\n max_size: Maximum box size in pixels.\n aspect_ratios: List of aspect ratios of boxes.\n flip: Whether to consider reverse aspect ratios.\n variances: List of variances for x, y, w, h.\n clip: Whether to clip the prior's coordinates\n such that they are within [0, 1].\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n 3D tensor with shape:\n (samples, num_boxes, 8)\n\n # References\n https://arxiv.org/abs/1512.02325\n\n #TODO\n Add possibility not to have variances.\n Add Theano support\n \"\"\"\n def __init__(self, img_size, min_size=None, max_size=None, aspect_ratios=None,\n flip=True, variances=[0.1], clip=True, **kwargs):\n if tf.keras.backend.image_data_format() == 'channels_last':\n self.waxis = 2\n self.haxis = 1\n else:\n self.waxis = 3\n self.haxis = 2\n self.img_size = img_size\n if min_size <= 0:\n raise Exception('min_size must be positive.')\n self.min_size = min_size\n self.flip = flip\n self.max_size = max_size\n self.aspect_ratios = [1.0]\n if max_size:\n if max_size < min_size:\n raise Exception('max_size must be greater than min_size.')\n self.aspect_ratios.append(1.0)\n if aspect_ratios:\n for ar in aspect_ratios:\n if ar in self.aspect_ratios:\n continue\n self.aspect_ratios.append(ar)\n if flip:\n self.aspect_ratios.append(1.0 / ar)\n self.variances = np.array(variances)\n self.clip = True\n super(PriorBox, self).__init__(**kwargs)\n\n def get_config(self):\n config = super().get_config().copy()\n config['img_size'] = self.img_size\n config['min_size'] = self.min_size\n config['max_size'] = self.max_size\n config['aspect_ratios'] = self.aspect_ratios\n config['flip'] = self.flip\n config['variances'] = self.variances\n config['clip'] = self.clip\n\n return config\n\n def compute_output_shape(self, input_shape):\n num_priors_ = len(self.aspect_ratios)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n num_boxes = num_priors_ * layer_width * layer_height\n return (input_shape[0], num_boxes, 8)\n\n def call(self, x, mask=None):\n input_shape = K.int_shape(x)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n img_width = self.img_size[0]\n img_height = self.img_size[1]\n # define prior boxes shapes\n box_widths = []\n box_heights = []\n for ar in self.aspect_ratios:\n if ar == 1 and len(box_widths) == 0:\n box_widths.append(self.min_size)\n box_heights.append(self.min_size)\n elif ar == 1 and len(box_widths) > 0:\n box_widths.append(np.sqrt(self.min_size * self.max_size))\n box_heights.append(np.sqrt(self.min_size * self.max_size))\n elif ar != 1:\n box_widths.append(self.min_size * np.sqrt(ar))\n box_heights.append(self.min_size / np.sqrt(ar))\n box_widths = 0.5 * np.array(box_widths)\n box_heights = 0.5 * np.array(box_heights)\n # define centers of prior boxes\n step_x = img_width / layer_width\n step_y = img_height / layer_height\n linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,\n layer_width)\n liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,\n layer_height)\n centers_x, centers_y = np.meshgrid(linx, liny)\n centers_x = centers_x.reshape(-1, 1)\n centers_y = centers_y.reshape(-1, 1)\n # define xmin, ymin, xmax, ymax of prior boxes\n num_priors_ = len(self.aspect_ratios)\n prior_boxes = np.concatenate((centers_x, centers_y), axis=1)\n prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))\n prior_boxes[:, ::4] -= box_widths\n prior_boxes[:, 1::4] -= box_heights\n prior_boxes[:, 2::4] += box_widths\n prior_boxes[:, 3::4] += box_heights\n prior_boxes[:, ::2] /= img_width\n prior_boxes[:, 1::2] /= img_height\n prior_boxes = prior_boxes.reshape(-1, 4)\n if self.clip:\n prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)\n # define variances\n num_boxes = len(prior_boxes)\n if len(self.variances) == 1:\n variances = np.ones((num_boxes, 4)) * self.variances[0]\n elif len(self.variances) == 4:\n variances = np.tile(self.variances, (num_boxes, 1))\n else:\n raise Exception('Must provide one or four variances.')\n prior_boxes = np.concatenate((prior_boxes, variances), axis=1)\n prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)\n\n prior_boxes_tensor = tf.ones([tf.shape(x)[0], 1, 1]) * prior_boxes_tensor\n\n return prior_boxes_tensor"
] | [
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.cumsum",
"matplotlib.pyplot.savefig",
"numpy.argwhere",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"numpy.divide",
"matplotlib.pyplot.ylabel"
],
[
"numpy.maximum",
"tensorflow.compat.v1.keras.backend.image_data_format",
"numpy.linspace",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.meshgrid",
"tensorflow.keras.backend.int_shape",
"numpy.sqrt",
"numpy.tile",
"numpy.ones",
"numpy.concatenate",
"tensorflow.keras.backend.l2_normalize",
"tensorflow.compat.v1.shape",
"numpy.array",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.backend.variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
carsault/chord_sequence_prediction | [
"6eb539a963ca6350bcf0c88b8d8756775ad7c488"
] | [
"utilities/modelsGen.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 9 18:00:12 2019\n\n@author: carsault\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utilities import utils\nfrom utilities.utils import *\n#%%\nclass ModelFamily(nn.Module):\n def __init__(self):\n super(ModelFamily, self).__init__()\n self.models = nn.ModuleDict()\n self.decim = []\n\n def addModel(self, model, decim):\n self.models[decim] = model\n self.decim.append(decim)\n \n def forward(self, x, args, bornInf, bornSup):\n out = []\n i = 0\n for d in self.decim:\n if d != str(1) :\n data = x[:,bornInf[int(d)]:bornSup[int(d)],:].to(args.device)\n out.append(self.models[d].encoder(data))\n i += 1\n out = torch.cat(out, 1)\n data = x[:,bornInf[1]:bornSup[1],:].to(args.device)\n #print(data)\n y = self.models[\"1\"](data,out)\n return y\n \n def train_epoch(self, training_generator, enc_optimizer, dec_optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if args.alphaRep == \"alphaRep\":\n local_batch = nn.functional.one_hot(local_batch.long(),self.encoder.n_categories) \n local_labels = nn.functional.one_hot(local_labels.long(),self.encoder.n_categories)\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n \n self.train() \n self.zero_grad()\n if args.alphaRep == \"alphaRep\":\n output = self(local_batch.float(), args, bornInf, bornSup)\n else:\n output = self(local_batch, args, bornInf, bornSup)\n #print(output.size())\n if args.decimList[0] != 1:\n loss = criterion(output, local_labels)\n else:\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n output = output.transpose(1,2)\n #print(topi.size())\n #loss = criterion(output, local_labels)\n #loss = criterion(output, topi)\n loss.backward()\n \n enc_optimizer.step()\n dec_optimizer.step()\n train_total_loss += loss\n return train_total_loss \n \nclass MlpTensorFamily(nn.Module):\n def __init__(self):\n super(MlpTensorFamily, self).__init__()\n self.encoderTensor = nn.ModuleDict()\n self.decim = []\n \n def addEncoder(self, enc):\n self.encoder = enc\n \n def addDecoder(self, dec):\n self.decoder = dec\n\n def addEncoderTensor(self, model, decim):\n self.encoderTensor[str(decim)] = model\n self.decim.append(decim)\n \n def forward(self, x, u, args):\n out = []\n out.append(self.encoder(x))\n for d in range(args.lenSeq-1):\n data = u[d]\n out.append(self.encoderTensor[str(d)](data))\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if args.alphaRep == \"alphaRep\":\n local_batch = nn.functional.one_hot(local_batch.long(),self.encoder.n_categories) \n local_labels = nn.functional.one_hot(local_labels.long(),self.encoder.n_categories)\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n \n self.train() \n self.zero_grad()\n if args.alphaRep == \"alphaRep\":\n output = self(local_batch.float())\n else:\n output = self(local_batch)\n #print(output.size())\n if args.decimList[0] != 1:\n loss = criterion(output, local_labels)\n else:\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n output = output.transpose(1,2)\n #print(topi.size())\n #loss = criterion(output, local_labels)\n #loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss \n \nclass ModelFamilySum(nn.Module):\n def __init__(self):\n super(ModelFamilySum, self).__init__()\n self.models = nn.ModuleDict()\n self.decim = []\n\n def addModel(self, model, decim):\n self.models[decim] = model\n self.decim.append(decim)\n \n def forward(self, x, args):\n out = []\n i = 0\n for d in self.decim:\n if d != str(1) :\n data = x[i].to(args.device)\n data = self.models[d](data)\n data = data.repeat(1,int(d),1)\n data = data.div(int(d))\n out.append(data)\n i += 1\n data = x[0].to(args.device)\n out.append(self.models[\"1\"](data))\n out = torch.stack(out)\n y = torch.sum(out, dim = 0)\n return y\n\n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if args.alphaRep == \"alphaRep\":\n local_batch = nn.functional.one_hot(local_batch.long(),self.encoder.n_categories) \n local_labels = nn.functional.one_hot(local_labels.long(),self.encoder.n_categories)\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n \n self.train() \n self.zero_grad()\n if args.alphaRep == \"alphaRep\":\n output = self(local_batch.float())\n else:\n output = self(local_batch)\n #print(output.size())\n if args.decimList[0] != 1:\n loss = criterion(output, local_labels)\n else:\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n output = output.transpose(1,2)\n #print(topi.size())\n #loss = criterion(output, local_labels)\n #loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss \n \n \nclass InOutModel(nn.Module):\n def __init__(self, encoder, decoder):\n super(InOutModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n def forward(self, x):\n y = self.encoder(x)\n y = self.decoder(y)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if args.alphaRep == \"alphaRep\":\n local_batch = nn.functional.one_hot(local_batch.long(),self.encoder.n_categories)\n local_labels = nn.functional.one_hot(local_labels.long(),self.encoder.n_categories)\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n \n self.train() \n self.zero_grad()\n if args.alphaRep == \"alphaRep\":\n output = self(local_batch.float())\n else:\n output = self(local_batch)\n #print(output.size())\n if args.decimList[0] != 1:\n loss = criterion(output, local_labels)\n else:\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n output = output.transpose(1,2)\n #print(topi.size())\n #loss = criterion(output, local_labels)\n #loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss\n \n \nclass InOutModelDouble(nn.Module):\n def __init__(self, encoder, encoderTensor, decoder):\n super(InOutModelDouble, self).__init__()\n self.encoder = encoder\n self.encoderTensor = encoderTensor\n self.decoder = decoder\n \n def forward(self, x, u):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderTensor(u)\n out.append(y2)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n \n self.train() \n self.zero_grad()\n tensorSim = computeTensor(local_batch, tf_mappingR.float())\n output = self(local_batch, tensorSim)\n loss = criterion(output, local_labels)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss\n\nclass InOutModelDoubleKey(nn.Module):\n def __init__(self, encoder, encoderKey, decoder):\n super(InOutModelDoubleKey, self).__init__()\n self.encoder = encoder\n self.encoderKey = encoderKey\n self.decoder = decoder\n \n def forward(self, x, u):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderKey(u)\n out.append(y2)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True)\n self.train() \n self.zero_grad()\n #tensorSim = computeTensor(local_batch, tf_mappingR.float())\n local_key = keyToOneHot(local_key)\n output = self(local_batch, local_key)\n #print(output.size())\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #print(topi.size())\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss\n\nclass InOutModelDoubleBeat(nn.Module):\n def __init__(self, encoder, encoderBeat, decoder):\n super(InOutModelDoubleBeat, self).__init__()\n self.encoder = encoder\n self.encoderBeat = encoderBeat\n self.decoder = decoder\n \n def forward(self, x, u):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderBeat(u)\n out.append(y2)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True) \n self.train() \n self.zero_grad()\n #tensorSim = computeTensor(local_batch, tf_mappingR.float())\n local_beat = beatToOneHot(local_beat)\n output = self(local_batch, local_beat)\n #print(output.size())\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #print(topi.size())\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss\n \nclass InOutModelTripleKeyBeat(nn.Module):\n def __init__(self, encoder, encoderBeat, encoderKey, decoder):\n super(InOutModelTripleKeyBeat, self).__init__()\n self.encoder = encoder\n self.encoderKey = encoderKey\n self.encoderBeat = encoderBeat\n self.decoder = decoder\n \n def forward(self, x, u, v):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderKey(u)\n out.append(y2)\n y3 = self.encoderBeat(v)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y\n \n def train_epoch(self, training_generator, optimizer, criterion, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True) \n self.train() \n self.zero_grad()\n #tensorSim = computeTensor(local_batch, tf_mappingR.float())\n local_key = keyToOneHot(local_key)\n local_beat = beatToOneHot(local_beat)\n output = self(local_batch, local_key, local_beat)\n #print(output.size())\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #print(topi.size())\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n loss.backward()\n \n optimizer.step()\n train_total_loss += loss\n return train_total_loss\n\n\nclass InOutModelTripleRawData(nn.Module):\n def __init__(self, encTensor1,encTensor2, decoder, args):\n super(InOutModelTripleRawData, self).__init__()\n self.encoderTensor1 = encTensor1\n self.encoderTensor2 = encTensor2\n self.decoder = decoder\n self.args = args\n \n def forward(self, x):\n out = []\n u = x.view(-1, int(self.args.lenSeq * self.args.n_categories))\n out.append(u)\n y2 = self.encoderTensor1(x)\n out.append(y2)\n y3 = self.encoderTensor2(x)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n y2 = nn.Softmax(dim=1)(y2)\n y3 = nn.Softmax(dim=1)(y3)\n return y, y2, y3\n #return y\n \n def train_epoch(self, training_generator, optimizerRecEnc, optimizerRecDec, optimizerKey, optimizerBeat, criterion, criterionKey, criterionBeat, bornInf, bornSup, tensorSim, args):\n train_total_loss = 0\n train_keytotal_loss = 0\n train_beattotal_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True)\n self.train()\n \n if args.key == True:\n output, beat, key = self(local_batch)\n self.zero_grad()\n local_key = keyToOneHot(local_key)\n local_key = local_key[:,0,:]\n losskey = criterionKey(key, local_key)\n losskey.backward()\n train_keytotal_loss += losskey\n optimizerKey.step()\n \n if args.beat == True:\n output, beat, key = self(local_batch)\n self.zero_grad()\n local_beat = beatToOneHot(local_beat)\n local_beat = local_beat[:,0,:]\n lossbeat = criterionBeat(beat, local_beat)\n lossbeat.backward()\n train_beattotal_loss += lossbeat \n optimizerBeat.step()\n \n if args.rec == True:\n output, beat, key = self(local_batch)\n self.zero_grad()\n loss = criterion(output, local_labels)\n loss.backward()\n train_total_loss += loss\n #optimizerRecEnc.step()\n optimizerRecDec.step()\n \n return train_total_loss, train_keytotal_loss, train_beattotal_loss\n \nclass InOutModelTriple(nn.Module):\n def __init__(self, encoder, encTensor1,encTensor2, decoder):\n super(InOutModelTriple, self).__init__()\n self.encoder = encoder\n self.encoderTensor1 = encTensor1\n self.encoderTensor2 = encTensor2\n self.decoder = decoder\n \n def forward(self, x):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderTensor1(x)\n out.append(y2)\n y3 = self.encoderTensor2(x)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n y2 = nn.Softmax(dim=1)(y2)\n y3 = nn.Softmax(dim=1)(y3)\n return y, y2, y3\n #return y\n \n def train_epoch(self, training_generator, optimizerRecEnc, optimizerRecDec, optimizerKey, optimizerBeat, criterion, criterionKey, criterionBeat, bornInf, bornSup, tensorSim, args):\n train_total_loss = 0\n train_keytotal_loss = 0\n train_beattotal_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if args.alphaRep == \"alphaRep\":\n local_batch = nn.functional.one_hot(local_batch.long(),args.n_categories) \n local_labels = nn.functional.one_hot(local_labels.long(),args.n_categories)\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True)\n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True)\n self.train()\n \n if args.key == True:\n #output, beat, key = self(local_batch)\n if args.alphaRep == \"alphaRep\":\n output, beat, key = self(local_batch.float())\n else:\n output, beat, key = self(local_batch)\n self.zero_grad()\n #local_key = keyToOneHot(local_key)\n #local_key = local_key[:,0,:]\n #print(key.size())\n #print(local_key.size())\n losskey = criterionKey(key, local_key[:,0].long())\n losskey.backward()\n train_keytotal_loss += losskey\n optimizerKey.step()\n \n if args.beat == True:\n #output, beat, key = self(local_batch)\n if args.alphaRep == \"alphaRep\":\n output, beat, key = self(local_batch.float())\n else:\n output, beat, key = self(local_batch)\n self.zero_grad()\n #local_beat = beatToOneHot(local_beat)\n #local_beat = local_beat[:,0,:]\n lossbeat = criterionBeat(beat, local_beat[:,0].long())\n lossbeat.backward()\n train_beattotal_loss += lossbeat \n optimizerBeat.step()\n \n if args.rec == True:\n #output, beat, key = self(local_batch)\n if args.alphaRep == \"alphaRep\":\n output, beat, key = self(local_batch.float())\n else:\n output, beat, key = self(local_batch)\n self.zero_grad()\n output = output.transpose(1,2)\n topv, topi = local_labels.topk(1)\n topi = topi[:,:,0]\n #loss = criterion(output, local_labels)\n loss = criterion(output, topi)\n #output = output.transpose(1,2)\n #loss = criterion(output, local_labels)\n loss.backward()\n train_total_loss += loss\n optimizerRecEnc.step()\n optimizerRecDec.step()\n \n return train_total_loss, train_keytotal_loss, train_beattotal_loss\n \nclass InOutModelTripleMatrix(nn.Module):\n def __init__(self, encoder, encTensor1,encTensor2, decoder):\n super(InOutModelTripleMatrix, self).__init__()\n self.encoder = encoder\n self.encoderTensor1 = encTensor1\n self.encoderTensor2 = encTensor2\n self.decoder = decoder\n \n def forward(self, x, m):\n out = []\n y1 = self.encoder(x, m)\n out.append(y1)\n y2 = self.encoderTensor1(x, m)\n out.append(y2)\n y3 = self.encoderTensor2(x, m)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n y2 = nn.Softmax(dim=1)(y2)\n y3 = nn.Softmax(dim=1)(y3)\n return y, y2, y3\n #return y\n \n def train_epoch(self, training_generator, optimizerRecEnc, optimizerRecDec, optimizerKey, optimizerBeat, criterion, criterionKey, criterionBeat, bornInf, bornSup, tf_mappingR, args):\n train_total_loss = 0\n train_keytotal_loss = 0\n train_beattotal_loss = 0\n for local_batch, local_labels, local_key, local_beat in training_generator:\n if len(args.decimList) == 1:\n local_batch = local_batch[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous()\n local_labels = local_labels[:,bornInf[args.decimList[0]]:bornSup[args.decimList[0]],:].contiguous() \n local_batch, local_labels = local_batch.to(args.device,non_blocking=True), local_labels.to(args.device,non_blocking=True) \n local_beat = local_beat.to(args.device,non_blocking=True)\n local_key = local_key.to(args.device,non_blocking=True)\n self.train()\n tensorSim = computeTensor(local_batch, tf_mappingR.float())\n \n if args.key == True:\n output, beat, key = self(local_batch, tensorSim)\n self.zero_grad()\n local_key = keyToOneHot(local_key)\n local_key = local_key[:,0,:]\n losskey = criterionKey(key, local_key)\n losskey.backward()\n train_keytotal_loss += losskey\n optimizerKey.step()\n \n if args.beat == True:\n output, beat, key = self(local_batch, tensorSim)\n self.zero_grad()\n local_beat = beatToOneHot(local_beat)\n local_beat = local_beat[:,0,:]\n lossbeat = criterionBeat(beat, local_beat)\n lossbeat.backward()\n train_beattotal_loss += lossbeat \n optimizerBeat.step()\n \n if args.rec == True:\n output, beat, key = self(local_batch, tensorSim)\n self.zero_grad()\n loss = criterion(output, local_labels)\n loss.backward()\n train_total_loss += loss\n optimizerRecEnc.step()\n optimizerRecDec.step()\n \n return train_total_loss, train_keytotal_loss, train_beattotal_loss\n \n \n \nclass InOutModelTripleMatrixBis(nn.Module):\n def __init__(self, encoder, encTensor1,encTensor2, decoder):\n super(InOutModelTripleMatrixBis, self).__init__()\n self.encoder = encoder\n self.encoderTensor1 = encTensor1\n self.encoderTensor2 = encTensor2\n self.decoder = decoder\n \n def forward(self, x, m):\n out = []\n y1 = self.encoder(x, m)\n out.append(y1)\n y2 = self.encoderTensor1(x)\n out.append(y2)\n y3 = self.encoderTensor2(x)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n y2 = nn.Softmax(dim=1)(y2)\n y3 = nn.Softmax(dim=1)(y3)\n return y, y2, y3\n #return y\n \nclass InOutModelTripleOLD1402(nn.Module):\n def __init__(self, encoder, encTensor1,encTensor2, decoder):\n super(InOutModelTripleOLD1402, self).__init__()\n self.encoder = encoder\n self.encoderTensor1 = encTensor1\n self.encoderTensor2 = encTensor2\n self.decoder = decoder\n \n def forward(self, x, u,v):\n out = []\n y1 = self.encoder(x)\n out.append(y1)\n y2 = self.encoderTensor1(u)\n out.append(y2)\n y3 = self.encoderTensor2(v)\n out.append(y3)\n out = torch.cat(out, 1)\n y = self.decoder(out)\n return y, y2, y3\n #return y\n \nclass FinalModel(nn.Module):\n def __init__(self, encoder, decoder):\n super(FinalModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n def forward(self, x, out):\n y = self.encoder(x)\n y = self.decoder(y, out)\n return y\n \nclass EncoderMLP(nn.Module):\n def __init__(self, lenSeq, n_categories, n_hidden, n_latent, decimRatio,n_layer = 1, dropRatio = 0.5):\n super(EncoderMLP, self).__init__()\n self.fc1 = nn.Linear(int(lenSeq * n_categories / decimRatio), n_hidden)\n self.bn1 = nn.BatchNorm1d(n_hidden)\n self.fc2 = nn.ModuleList()\n self.bn2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.bn2.append(nn.BatchNorm1d(n_hidden))\n self.fc3 = nn.Linear(n_hidden, n_latent)\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.decimRatio = decimRatio\n self.lenSeq = lenSeq\n self.n_layer = n_layer\n def forward(self, x):\n x = x.view(-1, int(self.lenSeq * self.n_categories/ self.decimRatio))\n x = F.relu(self.bn1(self.fc1(x)))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.bn2[i](self.fc2[i](x)))\n x = self.fc3(x)\n return x\n \n \nclass NetConv(nn.Module):\n def __init__(self):\n super(NetConv, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=4,padding=True)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=3,padding=True)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(20, 50)\n self.fc2 = nn.Linear(50, 5)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x.view(-1,1,8,8)), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 20)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n #return F.log_softmax(x)\n return x\n\nclass GaussianNoise(nn.Module):\n \"\"\"Gaussian noise regularizer.\n\n Args:\n sigma (float, optional): relative standard deviation used to generate the\n noise. Relative means that it will be multiplied by the magnitude of\n the value your are adding the noise to. This means that sigma can be\n the same regardless of the scale of the vector.\n is_relative_detach (bool, optional): whether to detach the variable before\n computing the scale of the noise. If `False` then the scale of the noise\n won't be seen as a constant but something to optimize: this will bias the\n network to generate vectors with smaller values.\n \"\"\"\n\n def __init__(self, args, sigma=0.1, is_relative_detach=True):\n super().__init__()\n self.sigma = sigma\n self.is_relative_detach = is_relative_detach\n self.noise = torch.tensor(0.0).to(args.device)\n\n def forward(self, x):\n if self.training and self.sigma != 0:\n scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x\n sampled_noise = self.noise.repeat(*x.size()).normal_() * scale\n x = x + sampled_noise\n return x \n\n# Convolutional neural network (two convolutional layers) \nclass ConvNet(nn.Module):\n def __init__(self, args, num_classes=25, drop_outRate = 0.6):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.BatchNorm2d(1),\n GaussianNoise(args, 0.3),\n nn.Conv2d(1, 64, kernel_size=(3,25), stride=1, padding=0),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Dropout(drop_outRate),\n #nn.MaxPool2d(kernel_size=(3,1), stride=1),\n nn.Conv2d(64, 32, kernel_size=(6,1), stride=1, padding=0),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Dropout(drop_outRate),\n nn.Conv2d(32, 200, kernel_size=(1,1), stride=1, padding=0),\n nn.BatchNorm2d(200),\n nn.ReLU(),\n nn.Dropout(drop_outRate))\n self.layer5 = nn.Sequential(nn.Linear(200,200), nn.Linear(200,50))\n \n def forward(self, x):\n out = self.layer1(x) \n out = out.view(out.size(0), -1)\n out = self.layer5(out)\n return out\n\nclass dilatConv(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories, latent):\n super(dilatConv, self).__init__()\n self.conv1 = nn.Conv2d(1, 250, 2, 1, dilation = (2,1))\n self.conv2 = nn.Conv2d(250, 25, 2, 1, dilation = (4,1))\n self.fc1 = nn.Linear(1150, 500)\n self.fc2 = nn.Linear(500, latent)\n self.lenPred = lenPred\n self.lenSeq = lenSeq\n self.n_categories = n_categories\n self.latent = latent\n\n def forward(self, x):\n x = x.view(-1, 1, self.lenSeq, self.n_categories)\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = x.view(-1, 1150)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n return x\n \n def name(self):\n return \"dilatConv\"\n \nclass dilatConvBatch(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories, latent, drop_outRate = 0.6):\n super(dilatConvBatch, self).__init__()\n self.batch0 = nn.BatchNorm2d(1)\n self.conv1 = nn.Conv2d(1, 250, 2, 1, dilation = (2,1))\n self.batch1 = nn.BatchNorm2d(250)\n self.do1 = nn.Dropout(drop_outRate)\n self.conv2 = nn.Conv2d(250, 25, 2, 1, dilation = (4,1))\n self.batch2 = nn.BatchNorm2d(25)\n self.do2 = nn.Dropout(drop_outRate)\n self.fc1 = nn.Linear(1150, 500)\n self.fc2 = nn.Linear(500, latent)\n self.lenPred = lenPred\n self.lenSeq = lenSeq\n self.n_categories = n_categories\n self.latent = latent\n\n def forward(self, x):\n x = self.batch0(x.view(-1, 1, self.lenSeq, self.n_categories))\n x = self.do1(F.relu(self.batch1(self.conv1(x))))\n x = self.do2(F.relu(self.batch2(self.conv2(x))))\n x = x.view(-1, 1150)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n return x\n \nclass dilatConvBatchV2(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories, latent, drop_outRate = 0.6):\n super(dilatConvBatchV2, self).__init__()\n self.batch0 = nn.BatchNorm2d(1)\n self.conv1 = nn.Conv2d(1, 50, (2,25), 1, dilation = (2,1))\n self.batch1 = nn.BatchNorm2d(50)\n self.do1 = nn.Dropout(drop_outRate)\n self.conv2 = nn.Conv2d(50, 30, (2,1) , 1, dilation = (4,1))\n self.batch2 = nn.BatchNorm2d(30)\n self.do2 = nn.Dropout(drop_outRate)\n #self.conv3 = nn.Conv2d(30, 10, 2, 1, dilation = (8,1))\n #self.batch3 = nn.BatchNorm2d(10)\n #self.do3 = nn.Dropout(drop_outRate)\n self.fc1 = nn.Linear(60, 100)\n self.fc2 = nn.Linear(100, latent)\n self.lenPred = lenPred\n self.lenSeq = lenSeq\n self.n_categories = n_categories\n self.latent = latent\n\n def forward(self, x):\n x = self.batch0(x.view(-1, 1, self.lenSeq, self.n_categories))\n x = self.do1(F.relu(self.batch1(self.conv1(x))))\n x = self.do2(F.relu(self.batch2(self.conv2(x))))\n #x = self.do3(F.relu(self.batch3(self.conv3(x))))\n x = x.view(-1, 60)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n return x\n \n def name(self):\n return \"dilatConv\"\n \nclass DecoderMLP(nn.Module):\n def __init__(self, lenPred, n_categories, n_hidden, n_latent, decimRatio, n_layer = 1, dropRatio = 0.5):\n super(DecoderMLP, self).__init__()\n self.fc1 = nn.Linear(n_latent , n_hidden)\n self.bn1 = nn.BatchNorm1d(n_hidden)\n self.fc2 = nn.ModuleList()\n self.bn2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.bn2.append(nn.BatchNorm1d(n_hidden))\n self.fc3 = nn.Linear(n_hidden, int(lenPred * n_categories / decimRatio))\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.decimRatio = decimRatio\n self.lenPred = lenPred\n self.n_layer = n_layer\n def forward(self, x):\n x = F.relu(self.bn1(self.fc1(x)))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.bn2[i](self.fc2[i](x)))\n x = self.fc3(x)\n x = x.view(-1, int(self.lenPred / self.decimRatio), self.n_categories)\n #if self.decimRatio == 1 :\n # x = nn.Softmax(dim=2)(x)\n if self.decimRatio != 1 :\n x = F.relu(x)\n return x\n \nclass DecoderMLPKey(nn.Module):\n def __init__(self, lenPred, n_categories, n_hidden, n_latent, decimRatio, n_layer = 1, dropRatio = 0.5):\n super(DecoderMLPKey, self).__init__()\n self.fc1 = nn.Linear(n_latent , n_hidden)\n self.bn1 = nn.BatchNorm1d(n_hidden)\n self.fc2 = nn.ModuleList()\n self.bn2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.bn2.append(nn.BatchNorm1d(n_hidden))\n self.fc3 = nn.Linear(n_hidden, int(lenPred * n_categories / decimRatio))\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.decimRatio = decimRatio\n self.lenPred = lenPred\n self.n_layer = n_layer\n def forward(self, x):\n x = F.relu(self.bn1(self.fc1(x)))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.bn2[i](self.fc2[i](x)))\n x = self.fc3(x)\n x = x.view(-1, self.n_categories)\n #if self.decimRatio == 1 :\n # x = nn.Softmax(dim=1)(x)\n #else:\n # x = F.relu(x)\n #x = F.sigmoid(x)\n return x \n \nclass DecoderFinal(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories, n_hidden, n_latent, n_layer = 1, dropRatio = 0.5):\n super(DecoderFinal, self).__init__()\n self.fc1 = nn.Linear(n_latent , n_hidden)\n self.bn1 = nn.BatchNorm1d(n_hidden)\n self.fc2 = nn.ModuleList()\n self.bn2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.bn2.append(nn.BatchNorm1d(n_hidden))\n self.fc3 = nn.Linear(n_hidden, lenPred * n_categories)\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.lenPred = lenPred\n self.n_layer = n_layer\n def forward(self, x, out):\n x = torch.cat((x,out), 1) \n x = F.relu(self.bn1(self.fc1(x)))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.bn2[i](self.fc2[i](x)))\n x = self.fc3(x)\n x = x.view(-1, self.lenPred, self.n_categories)\n #x = nn.Softmax(dim=2)(x)\n return x\n\n#%%\n#%%\nclass VAEModelFamily(nn.Module):\n def __init__(self):\n super(VAEModelFamily, self).__init__()\n self.models = nn.ModuleDict()\n self.decim = []\n\n def addModel(self, model, decim):\n self.models[decim] = model\n self.decim.append(decim) \n \n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std \n def forward(self, x, args):\n out = []\n i = 0\n for d in self.decim:\n if d != str(1) :\n data = x[i].to(args.device)\n out1, out2 = self.models[d].encoder(data)\n out.append(self.reparametrize(out1,out2))\n i += 1\n out = torch.cat(out, 1)\n data = x[0].to(args.device)\n #print(data)\n y = self.models[\"1\"](data,out)\n return y\n \nclass VAEInOutModel(nn.Module):\n def __init__(self, encoder, decoder):\n super(VAEInOutModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n def forward(self, x):\n y1, y2 = self.encoder(x)\n y = self.decoder(y1, y2)\n return y\n \nclass VAEFinalModel(nn.Module):\n def __init__(self, encoder, decoder):\n super(VAEFinalModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n def forward(self, x, out):\n y1, y2 = self.encoder(x)\n y = self.decoder(y1, y2, out)\n return y\n \nclass VAEEncoderMLP(nn.Module):\n def __init__(self, lenSeq, n_categories, n_hidden, n_latent, decimRatio, n_layer = 1, dropRatio = 0.5):\n super(VAEEncoderMLP, self).__init__()\n self.fc1 = nn.Linear(int(lenSeq * n_categories / decimRatio), n_hidden)\n self.fc2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.fc31 = nn.Linear(n_hidden, n_latent)\n self.fc32 = nn.Linear(n_hidden, n_latent)\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.decimRatio = decimRatio\n self.lenSeq = lenSeq\n self.n_layer = n_layer\n def forward(self, x):\n x = x.view(-1, int(self.lenSeq * self.n_categories/ self.decimRatio))\n x = F.relu(self.fc1(x))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.fc2[i](x))\n x1 = self.fc31(x)\n x2 = self.fc32(x)\n return x1, x2\n \nclass VAEDecoderMLP(nn.Module):\n def __init__(self, lenPred, n_categories, n_hidden, n_latent, decimRatio, n_layer = 1, dropRatio = 0.5):\n super(VAEDecoderMLP, self).__init__()\n self.fc1 = nn.Linear(n_latent , n_hidden)\n self.fc2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.fc3 = nn.Linear(n_hidden, int(lenPred * n_categories / decimRatio))\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.decimRatio = decimRatio\n self.lenPred = lenPred\n self.n_layer = n_layer\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std \n def forward(self, x1, x2):\n z = self.reparametrize(x1, x2)\n x = F.relu(self.fc1(z))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.fc2[i](x))\n x = self.fc3(x)\n x = x.view(-1, int(self.lenPred / self.decimRatio), self.n_categories)\n if self.decimRatio == 1 :\n x = nn.Softmax(dim=2)(x)\n return x, x1, x2\n \nclass VAEDecoderFinal(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories, n_hidden, n_latent, n_layer = 1, dropRatio = 0.5):\n super(VAEDecoderFinal, self).__init__()\n self.fc1 = nn.Linear(n_latent , n_hidden)\n self.fc2 = nn.ModuleList()\n for i in range(n_layer):\n self.fc2.append(nn.Linear(n_hidden, n_hidden))\n self.fc3 = nn.Linear(n_hidden, lenPred * n_categories)\n self.drop_layer = nn.Dropout(p=dropRatio)\n self.n_categories = n_categories\n self.lenPred = lenPred\n self.n_layer = n_layer\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n def forward(self, x1, x2, out):\n x = self.reparametrize(x1, x2)\n x = torch.cat([x,out], 1) \n x = F.relu(self.fc1(x))\n for i in range(self.n_layer):\n x = self.drop_layer(x)\n x = F.relu(self.fc2[i](x))\n x = self.fc3(x)\n x = x.view(-1, self.lenPred, self.n_categories)\n x = nn.Softmax(dim=2)(x)\n return x, x1, x2\n#%%\nclass MLPNet(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(lenSeq * n_categories, 1000)\n self.fc2 = nn.Linear(1000, 1000)\n self.fc3 = nn.Linear(1000, lenPred * n_categories)\n self.lenPred = lenPred\n self.n_categories = n_categories\n def forward(self, x):\n x = x.view(-1, 16*25)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n x = x.view(-1, self.lenPred, self.n_categories)\n x = nn.Softmax(dim=2)(x)\n return x\n \n#%%\nclass LeNet(nn.Module):\n def __init__(self, lenSeq, lenPred, n_categories):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(8*17*50, 500)\n self.fc2 = nn.Linear(500, lenPred * n_categories)\n self.lenPred = lenPred\n self.lenSeq = lenSeq\n self.n_categories = n_categories\n\n def forward(self, x):\n x = x.view(-1, 1, self.lenSeq, self.n_categories)\n x = F.relu(self.conv1(x))\n #x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n #x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 8*17*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n x = x.view(-1, self.lenPred, self.n_categories)\n x = nn.Softmax(dim=2)(x)\n return x\n \n def name(self):\n return \"LeNet\"\n \n#%%\n\n#%%\nn_inputs = 25\nn_hidden = 128\nbatch_size = 500\nlenSeq = 16\nn_categories=25\nclass MockupModel(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.model = nn.ModuleDict({\n 'lstm': nn.LSTM(\n input_size=n_inputs, # 45, see the data definition\n hidden_size=n_hidden, # Can vary\n num_layers = 3,\n dropout = 0.6, #0.6\n batch_first = True\n ),\n 'linear': nn.Linear(\n in_features=n_hidden,\n out_features=n_categories)\n })\n \n def forward(self, x):\n\n # From [batches, seqs, seq len, features]\n # to [seq len, batch data, features]\n # Data is fed to the LSTM\n out, _ = self.model['lstm'](x)\n #print(f'lstm output={out.size()}')\n\n # From [seq len, batch, num_directions * hidden_size]\n # to [batches, seqs, seq_len,prediction]\n out = out.view(batch_size, lenSeq, -1)\n #print(f'transformed output={out.size()}')\n\n # Data is fed to the Linear layer\n out = self.model['linear'](out)\n #print(f'linear output={out.size()}')\n\n # The prediction utilizing the whole sequence is the last one\n #y_pred = nn.Softmax()(y_pred)\n y_pred = out[:, -1]\n y_pred = nn.Softmax()(y_pred)\n \n #print(f'y_pred={y_pred.size()}')\n\n return y_pred\n#%% \nclass MockupModelMask(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.model = nn.ModuleDict({\n 'lstm': nn.LSTM(\n input_size=n_inputs, # 45, see the data definition\n hidden_size=n_hidden, # Can vary\n num_layers = 3,\n dropout = 0.6, #0.6\n batch_first = True\n ),\n 'linear': nn.Linear(\n in_features=n_hidden,\n out_features=n_categories)\n })\n \n def forward(self, x, nbZero, mask = False):\n\n # From [batches, seqs, seq len, features]\n # to [seq len, batch data, features]\n if mask == True:\n for i in range(x.size()[0]):\n for j in range(nbZero):\n x[i][randint(0,15)] = torch.zeros(n_inputs)\n # Data is fed to the LSTM\n out, _ = self.model['lstm'](x)\n #print(f'lstm output={out.size()}')\n\n # From [seq len, batch, num_directions * hidden_size]\n # to [batches, seqs, seq_len,prediction]\n out = out.view(batch_size, lenSeq, -1)\n #print(f'transformed output={out.size()}')\n\n # Data is fed to the Linear layer\n out = self.model['linear'](out)\n #print(f'linear output={out.size()}')\n\n # The prediction utilizing the whole sequence is the last one\n #y_pred = nn.Softmax()(y_pred)\n y_pred = out[:, -1]\n y_pred = nn.Softmax()(y_pred)\n \n #print(f'y_pred={y_pred.size()}')\n#%%\nclass ResBlock(nn.Module):\n def __init__(self, dim, dim_res=32):\n super().__init__()\n self.block = nn.Sequential(\n nn.ReLU(True),\n nn.Conv2d(dim, dim_res, 3, 1, 1),\n nn.BatchNorm2d(dim_res),\n nn.ReLU(True),\n nn.Conv2d(dim_res, dim, 1),\n nn.BatchNorm2d(dim),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n return x + self.block(x)\n \nclass View1(nn.Module):\n def __init__(self):\n super(View1, self).__init__()\n \n def forward(self, x):\n return x.view(-1,16*24)\n \nclass View2(nn.Module):\n def __init__(self):\n super(View2, self).__init__()\n \n def forward(self, x):\n return x.view(-1,1,16,25) #make it with lenPred\n#%%\n# Construct encoders and decoders for different types\ndef construct_enc_dec(input_dim, dim, embed_dim = 64):\n encoder, decoder = None, None\n # Image data\n encoder = nn.Sequential(\n nn.Conv2d(input_dim, int(dim / 2), 4, 2, 1),\n #nn.BatchNorm2d(dim),\n nn.ReLU(True),\n nn.Conv2d(int(dim / 2), dim, 4, 2, 1),\n #nn.BatchNorm2d(dim),\n nn.ReLU(True),\n nn.Conv2d(dim, dim, 3, 1, 1),\n ResBlock(dim),\n ResBlock(dim),\n nn.Conv2d(dim, embed_dim, 1)\n )\n decoder = nn.Sequential(\n nn.ConvTranspose2d(embed_dim, dim, 3, 1, 1),\n ResBlock(dim),\n ResBlock(dim),\n nn.ConvTranspose2d(dim, int(dim / 2), 4, 2, 1),\n #nn.BatchNorm2d(dim),\n nn.ReLU(True),\n nn.ConvTranspose2d(int(dim / 2), input_dim, 4, 2, 1),\n View1(),\n nn.Linear(16*24,16*25), #make it with lenPred\n View2()\n #nn.Tanh()\n )\n return encoder, decoder\n\n#%% Seq 2 Seq from https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html -> see also attention is page\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, device):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n \n self.device = device\n \n def forward(self, input, hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = embedded\n output, hidden = self.gru(output, hidden)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=self.device)\n \nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, device):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n \n self.device = device\n\n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=self.device)\n \n\n \n"
] | [
[
"torch.randn_like",
"torch.nn.Softmax",
"torch.nn.Dropout2d",
"torch.cat",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.nn.GRU",
"torch.sum",
"torch.nn.Embedding",
"torch.nn.Dropout",
"torch.nn.ModuleDict",
"torch.tensor",
"torch.nn.functional.relu",
"torch.nn.BatchNorm1d",
"torch.nn.LogSoftmax",
"torch.nn.ConvTranspose2d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.exp",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.LSTM",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kuo1220/verbose-barnacle | [
"7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae",
"0a1b9ed01e48092f4167e366cf7496c2b111ef6d"
] | [
"tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py",
"tensorflow/python/ops/variables.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for GBDT train function.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom google.protobuf import text_format\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.boosted_trees.proto import learner_pb2\nfrom tensorflow.contrib.boosted_trees.proto import tree_config_pb2\nfrom tensorflow.contrib.boosted_trees.python.ops import model_ops\nfrom tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch\nfrom tensorflow.contrib.boosted_trees.python.utils import losses\nfrom tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.python.feature_column import feature_column_lib as core_feature_column\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\n\n\ndef _squared_loss(label, unused_weights, predictions):\n \"\"\"Unweighted loss implementation.\"\"\"\n loss = math_ops.reduce_sum(\n math_ops.square(predictions - label), 1, keepdims=True)\n return loss\n\n\ndef _append_to_leaf(leaf, c_id, w):\n \"\"\"Helper method for building tree leaves.\n\n Appends weight contributions for the given class index to a leaf node.\n\n Args:\n leaf: leaf node to append to.\n c_id: class Id for the weight update.\n w: weight contribution value.\n \"\"\"\n leaf.sparse_vector.index.append(c_id)\n leaf.sparse_vector.value.append(w)\n\n\ndef _set_float_split(split, feat_col, thresh, l_id, r_id):\n \"\"\"Helper method for building tree float splits.\n\n Sets split feature column, threshold and children.\n\n Args:\n split: split node to update.\n feat_col: feature column for the split.\n thresh: threshold to split on forming rule x <= thresh.\n l_id: left child Id.\n r_id: right child Id.\n \"\"\"\n split.feature_column = feat_col\n split.threshold = thresh\n split.left_id = l_id\n split.right_id = r_id\n\n\nclass GbdtTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n super(GbdtTest, self).setUp()\n\n def testExtractFeatures(self):\n \"\"\"Tests feature extraction.\"\"\"\n with self.test_session():\n features = {}\n features[\"dense_float\"] = array_ops.zeros([2, 1], dtypes.float32)\n features[\"sparse_float\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.float32),\n array_ops.zeros([2], dtypes.int64))\n features[\"sparse_int\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.int64), array_ops.zeros([2],\n dtypes.int64))\n (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\n sparse_float_shapes, sparse_int_indices, sparse_int_values,\n sparse_int_shapes) = (\n gbdt_batch.extract_features(features, None, use_core_columns=False))\n self.assertEqual(len(fc_names), 3)\n self.assertAllEqual(fc_names,\n [\"dense_float\", \"sparse_float\", \"sparse_int\"])\n self.assertEqual(len(dense_floats), 1)\n self.assertEqual(len(sparse_float_indices), 1)\n self.assertEqual(len(sparse_float_values), 1)\n self.assertEqual(len(sparse_float_shapes), 1)\n self.assertEqual(len(sparse_int_indices), 1)\n self.assertEqual(len(sparse_int_values), 1)\n self.assertEqual(len(sparse_int_shapes), 1)\n self.assertAllEqual(dense_floats[0].eval(),\n features[\"dense_float\"].eval())\n self.assertAllEqual(sparse_float_indices[0].eval(),\n features[\"sparse_float\"].indices.eval())\n self.assertAllEqual(sparse_float_values[0].eval(),\n features[\"sparse_float\"].values.eval())\n self.assertAllEqual(sparse_float_shapes[0].eval(),\n features[\"sparse_float\"].dense_shape.eval())\n self.assertAllEqual(sparse_int_indices[0].eval(),\n features[\"sparse_int\"].indices.eval())\n self.assertAllEqual(sparse_int_values[0].eval(),\n features[\"sparse_int\"].values.eval())\n self.assertAllEqual(sparse_int_shapes[0].eval(),\n features[\"sparse_int\"].dense_shape.eval())\n\n def testExtractFeaturesWithTransformation(self):\n \"\"\"Tests feature extraction.\"\"\"\n with self.test_session():\n features = {}\n features[\"dense_float\"] = array_ops.zeros([2, 1], dtypes.float32)\n features[\"sparse_float\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.float32),\n array_ops.zeros([2], dtypes.int64))\n features[\"sparse_categorical\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.string), array_ops.zeros([2],\n dtypes.int64))\n feature_columns = set()\n feature_columns.add(layers.real_valued_column(\"dense_float\"))\n feature_columns.add(\n layers.feature_column._real_valued_var_len_column(\n \"sparse_float\", is_sparse=True))\n feature_columns.add(\n feature_column_lib.sparse_column_with_hash_bucket(\n \"sparse_categorical\", hash_bucket_size=1000000))\n (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\n sparse_float_shapes, sparse_int_indices, sparse_int_values,\n sparse_int_shapes) = (\n gbdt_batch.extract_features(\n features, feature_columns, use_core_columns=False))\n self.assertEqual(len(fc_names), 3)\n self.assertAllEqual(fc_names,\n [\"dense_float\", \"sparse_float\", \"sparse_categorical\"])\n self.assertEqual(len(dense_floats), 1)\n self.assertEqual(len(sparse_float_indices), 1)\n self.assertEqual(len(sparse_float_values), 1)\n self.assertEqual(len(sparse_float_shapes), 1)\n self.assertEqual(len(sparse_int_indices), 1)\n self.assertEqual(len(sparse_int_values), 1)\n self.assertEqual(len(sparse_int_shapes), 1)\n self.assertAllEqual(dense_floats[0].eval(),\n features[\"dense_float\"].eval())\n self.assertAllEqual(sparse_float_indices[0].eval(),\n features[\"sparse_float\"].indices.eval())\n self.assertAllEqual(sparse_float_values[0].eval(),\n features[\"sparse_float\"].values.eval())\n self.assertAllEqual(sparse_float_shapes[0].eval(),\n features[\"sparse_float\"].dense_shape.eval())\n self.assertAllEqual(sparse_int_indices[0].eval(),\n features[\"sparse_categorical\"].indices.eval())\n self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])\n self.assertAllEqual(sparse_int_shapes[0].eval(),\n features[\"sparse_categorical\"].dense_shape.eval())\n\n def testExtractFeaturesFromCoreFeatureColumns(self):\n \"\"\"Tests feature extraction when using core columns.\"\"\"\n with self.test_session():\n features = {}\n # Sparse float column does not exist in core, so only dense numeric and\n # categorical.\n features[\"dense_float\"] = array_ops.zeros([2, 1], dtypes.float32)\n features[\"sparse_categorical\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.string), array_ops.zeros([2],\n dtypes.int64))\n\n feature_columns = set()\n feature_columns.add(core_feature_column.numeric_column(\"dense_float\"))\n feature_columns.add(\n core_feature_column.categorical_column_with_hash_bucket(\n \"sparse_categorical\", hash_bucket_size=1000000))\n (fc_names, dense_floats, _, _, _, sparse_int_indices, sparse_int_values,\n sparse_int_shapes) = (\n gbdt_batch.extract_features(\n features, feature_columns, use_core_columns=True))\n self.assertEqual(len(fc_names), 2)\n self.assertAllEqual(fc_names, [\"dense_float\", \"sparse_categorical\"])\n self.assertEqual(len(dense_floats), 1)\n self.assertEqual(len(sparse_int_indices), 1)\n self.assertEqual(len(sparse_int_values), 1)\n self.assertEqual(len(sparse_int_shapes), 1)\n self.assertAllEqual(dense_floats[0].eval(),\n features[\"dense_float\"].eval())\n self.assertAllEqual(sparse_int_indices[0].eval(),\n features[\"sparse_categorical\"].indices.eval())\n self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])\n self.assertAllEqual(sparse_int_shapes[0].eval(),\n features[\"sparse_categorical\"].dense_shape.eval())\n\n def testTrainFnChiefNoBiasCentering(self):\n \"\"\"Tests the train function running on chief without bias centering.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n threshold: 1.0\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n def testTrainFnChiefSparseAndDense(self):\n \"\"\"Tests the train function with sparse and dense features.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n features[\"sparse_float\"] = sparse_tensor.SparseTensor(\n array_ops.zeros([2, 2], dtypes.int64),\n array_ops.zeros([2], dtypes.float32),\n array_ops.constant([4, 1], dtypes.int64))\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n sparse_float_binary_split_default_right {\n split{\n left_id: 1\n right_id: 2\n }\n }\n node_metadata {\n gain: 1.125\n }\n }\n nodes {\n leaf {\n vector {\n value: 1.0\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: -0.5\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n def testTrainFnChiefScalingNumberOfExamples(self):\n \"\"\"Tests the train function running on chief without bias centering.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n num_examples_fn = (\n lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=num_examples_fn,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n threshold: 1.0\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n def testTrainFnChiefWithBiasCentering(self):\n \"\"\"Tests the train function running on chief with bias centering.\"\"\"\n with self.test_session():\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=True,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect bias to be centered.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n expected_tree = \"\"\"\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\"\"\"\n self.assertEquals(len(output.trees), 1)\n self.assertAllEqual(output.tree_weights, [1.0])\n self.assertProtoEquals(expected_tree, output.trees[0])\n self.assertEquals(stamp_token.eval(), 1)\n\n def testTrainFnNonChiefNoBiasCentering(self):\n \"\"\"Tests the train function running on worker without bias centering.\"\"\"\n with self.test_session():\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=False,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Regardless of how many times the train op is run, a non-chief worker\n # can only accumulate stats so the tree ensemble never changes.\n for _ in range(5):\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 0)\n\n def testTrainFnNonChiefWithCentering(self):\n \"\"\"Tests the train function running on worker with bias centering.\"\"\"\n with self.test_session():\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=False,\n num_ps_replicas=0,\n center_bias=True,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Regardless of how many times the train op is run, a non-chief worker\n # can only accumulate stats so the tree ensemble never changes.\n for _ in range(5):\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 0)\n\n def testPredictFn(self):\n \"\"\"Tests the predict function.\"\"\"\n with self.test_session() as sess:\n # Create ensemble with one bias node.\n ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n text_format.Merge(\n \"\"\"\n trees {\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_tree_weight_updates: 1\n num_layers_grown: 1\n is_finalized: true\n }\"\"\", ensemble_config)\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=3,\n tree_ensemble_config=ensemble_config.SerializeToString(),\n name=\"tree_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=False,\n num_ps_replicas=0,\n center_bias=True,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n # Create predict op.\n mode = model_fn.ModeKeys.EVAL\n predictions_dict = sess.run(gbdt_model.predict(mode))\n self.assertEquals(predictions_dict[\"ensemble_stamp\"], 3)\n self.assertAllClose(predictions_dict[\"predictions\"],\n [[0.25], [0.25], [0.25], [0.25]])\n self.assertAllClose(predictions_dict[\"partition_ids\"], [0, 0, 0, 0])\n\n def testPredictFnWithLeafIndexAdvancedLeft(self):\n \"\"\"Tests the predict function with output leaf ids.\"\"\"\n with self.test_session() as sess:\n # Create ensemble with one bias node.\n ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n text_format.Merge(\n \"\"\"\n trees {\n nodes {\n dense_float_binary_split {\n threshold: 1.0\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.15\n }\n }\n }\n }\n trees {\n nodes {\n dense_float_binary_split {\n threshold: 0.99\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 00\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.23\n }\n }\n }\n }\n tree_weights: 1.0\n tree_weights: 1.0\n tree_metadata {\n num_tree_weight_updates: 1\n num_layers_grown: 1\n is_finalized: true\n }\n tree_metadata {\n num_tree_weight_updates: 1\n num_layers_grown: 1\n is_finalized: true\n }\"\"\", ensemble_config)\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=3,\n tree_ensemble_config=ensemble_config.SerializeToString(),\n name=\"tree_ensemble\")\n resources.initialize_resources(resources.shared_resources()).run()\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.constant(\n [[0.0], [1.0], [1.1], [2.0]], dtype=dtypes.float32)\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=False,\n num_ps_replicas=0,\n center_bias=True,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features,\n output_leaf_index=True)\n\n # Create predict op.\n mode = model_fn.ModeKeys.INFER\n predictions_dict = sess.run(gbdt_model.predict(mode))\n self.assertEquals(predictions_dict[\"ensemble_stamp\"], 3)\n # here are how the numbers in expected results are calculated,\n # 0.5 = 0.25 + 0.25\n # 0.48 = 0.25 + 0.23\n # 0.38 = 0.15 + 0.23\n # 0.38 = 0.15 + 0.23\n self.assertAllClose(predictions_dict[\"predictions\"],\n [[0.5], [0.48], [0.38], [0.38]])\n self.assertAllClose(predictions_dict[\"partition_ids\"], [0, 0, 0, 0])\n self.assertAllClose(predictions_dict[\"leaf_index\"],\n [[1, 1], [1, 2], [2, 2], [2, 2]])\n\n def testTrainFnMulticlassFullHessian(self):\n \"\"\"Tests the GBDT train for multiclass full hessian.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 1\n # Use full hessian multiclass strategy.\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.FULL_HESSIAN)\n learner_config.num_classes = 5\n learner_config.regularization.l1 = 0\n # To make matrix inversible.\n learner_config.regularization.l2 = 1e-5\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n batch_size = 3\n features[\"dense_float\"] = array_ops.constant(\n [0.3, 1.5, 1.1], dtype=dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=5,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.2]],\n dtype=dtypes.float32)\n\n labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)\n weights = array_ops.ones([batch_size, 1], dtypes.float32)\n\n partition_ids = array_ops.zeros([batch_size], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 0,\n }\n\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n losses.per_example_maxent_loss(\n labels,\n weights,\n predictions,\n num_classes=learner_config.num_classes)[0]),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 1)\n # We got 3 nodes: one parent and 2 leafs.\n self.assertEqual(len(output.trees[0].nodes), 3)\n self.assertAllClose(output.tree_weights, [1])\n self.assertEquals(stamp_token.eval(), 2)\n\n # Leafs should have a dense vector of size 5.\n expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]\n expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]\n self.assertArrayNear(expected_leaf_1,\n output.trees[0].nodes[1].leaf.vector.value, 1e-3)\n self.assertArrayNear(expected_leaf_2,\n output.trees[0].nodes[2].leaf.vector.value, 1e-3)\n\n def testTrainFnMulticlassDiagonalHessian(self):\n \"\"\"Tests the GBDT train for multiclass diagonal hessian.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 1\n # Use full hessian multiclass strategy.\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)\n learner_config.num_classes = 5\n learner_config.regularization.l1 = 0\n # To make matrix inversible.\n learner_config.regularization.l2 = 1e-5\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n batch_size = 3\n features = {}\n features[\"dense_float\"] = array_ops.constant(\n [0.3, 1.5, 1.1], dtype=dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=5,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.2]],\n dtype=dtypes.float32)\n\n labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)\n weights = array_ops.ones([batch_size, 1], dtypes.float32)\n\n partition_ids = array_ops.zeros([batch_size], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 0,\n }\n\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n losses.per_example_maxent_loss(\n labels,\n weights,\n predictions,\n num_classes=learner_config.num_classes)[0]),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 0)\n self.assertEqual(len(output.tree_weights), 0)\n self.assertEqual(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 1)\n # We got 3 nodes: one parent and 2 leafs.\n self.assertEqual(len(output.trees[0].nodes), 3)\n self.assertAllClose(output.tree_weights, [1])\n self.assertEqual(stamp_token.eval(), 2)\n\n # Leafs should have a dense vector of size 5.\n expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]\n expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]\n self.assertArrayNear(expected_leaf_1,\n output.trees[0].nodes[1].leaf.vector.value, 1e-3)\n self.assertArrayNear(expected_leaf_2,\n output.trees[0].nodes[2].leaf.vector.value, 1e-3)\n\n def testTrainFnMulticlassTreePerClass(self):\n \"\"\"Tests the GBDT train for multiclass tree per class strategy.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 1\n # Use full hessian multiclass strategy.\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.TREE_PER_CLASS)\n learner_config.num_classes = 5\n learner_config.regularization.l1 = 0\n # To make matrix inversible.\n learner_config.regularization.l2 = 1e-5\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {\n \"dense_float\":\n array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32),\n }\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=5,\n features=features)\n\n batch_size = 3\n predictions = array_ops.constant(\n [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],\n [0.0, 0.0, 0.0, 2.0, 1.2]],\n dtype=dtypes.float32)\n\n labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)\n weights = array_ops.ones([batch_size, 1], dtypes.float32)\n\n partition_ids = array_ops.zeros([batch_size], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n # This should result in a tree built for a class 2.\n \"num_trees\": 13,\n }\n\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n losses.per_example_maxent_loss(\n labels,\n weights,\n predictions,\n num_classes=learner_config.num_classes)[0]),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 0)\n self.assertEqual(len(output.tree_weights), 0)\n self.assertEqual(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output.ParseFromString(serialized.eval())\n self.assertEqual(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [1])\n self.assertEqual(stamp_token.eval(), 2)\n\n # One node for a split, two children nodes.\n self.assertEqual(3, len(output.trees[0].nodes))\n\n # Leafs will have a sparse vector for class 3.\n self.assertEqual(1,\n len(output.trees[0].nodes[1].leaf.sparse_vector.index))\n self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])\n self.assertAlmostEqual(\n -1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])\n\n self.assertEqual(1,\n len(output.trees[0].nodes[2].leaf.sparse_vector.index))\n self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])\n self.assertAllClose(\n 0.893284678459,\n output.trees[0].nodes[2].leaf.sparse_vector.value[0],\n atol=1e-4,\n rtol=1e-4)\n\n def testTrainFnChiefFeatureSelectionReachedLimitNoGoodSplit(self):\n \"\"\"Tests the train function running on chief with feature selection.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.max_number_of_unique_feature_columns = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float_0\"] = array_ops.ones([4, 1], dtypes.float32)\n # Feature 1 is predictive but it won't be used because we have reached the\n # limit of num_used_handlers >= max_number_of_unique_feature_columns\n features[\"dense_float_1\"] = array_ops.constant([0, 0, 1, 1],\n dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\":\n predictions,\n \"predictions_no_dropout\":\n predictions,\n \"partition_ids\":\n partition_ids,\n \"ensemble_stamp\":\n ensemble_stamp,\n \"num_trees\":\n 12,\n \"num_used_handlers\":\n array_ops.constant(1, dtype=dtypes.int64),\n \"used_handlers_mask\":\n array_ops.constant([True, False], dtype=dtypes.bool),\n }\n\n labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n # On second run, expect a trivial split to be chosen to basically\n # predict the average.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n feature_column: 0\n threshold: 1.0\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0\n }\n }\n nodes {\n leaf {\n vector {\n value: -0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n def testTrainFnChiefFeatureSelectionWithGoodSplits(self):\n \"\"\"Tests the train function running on chief with feature selection.\"\"\"\n with self.test_session() as sess:\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.max_number_of_unique_feature_columns = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float_0\"] = array_ops.ones([4, 1], dtypes.float32)\n # Feature 1 is predictive and is in our selected features so it will be\n # used even when we're at the limit.\n features[\"dense_float_1\"] = array_ops.constant([0, 0, 1, 1],\n dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\":\n predictions,\n \"predictions_no_dropout\":\n predictions,\n \"partition_ids\":\n partition_ids,\n \"ensemble_stamp\":\n ensemble_stamp,\n \"num_trees\":\n 12,\n \"num_used_handlers\":\n array_ops.constant(1, dtype=dtypes.int64),\n \"used_handlers_mask\":\n array_ops.constant([False, True], dtype=dtypes.bool),\n }\n\n labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n\n self.assertEquals(len(output.trees), 1)\n self.assertAllClose(output.tree_weights, [0.1])\n self.assertEquals(stamp_token.eval(), 2)\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n feature_column: 1\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0.5\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: -0.5\n }\n }\n }\"\"\"\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n def testTrainFnChiefFeatureSelectionReachedLimitIncrementAttemptedLayer(self):\n \"\"\"Tests the train function running on chief with feature selection.\"\"\"\n with self.test_session() as sess:\n tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n tree = tree_ensemble_config.trees.add()\n\n _set_float_split(\n tree.nodes.add().sparse_float_binary_split_default_right.split, 2,\n 4.0, 1, 2)\n _append_to_leaf(tree.nodes.add().leaf, 0, 0.5)\n _append_to_leaf(tree.nodes.add().leaf, 1, 1.2)\n tree_ensemble_config.tree_weights.append(1.0)\n metadata = tree_ensemble_config.tree_metadata.add()\n metadata.is_finalized = False\n metadata.num_layers_grown = 1\n tree_ensemble_config = tree_ensemble_config.SerializeToString()\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=tree_ensemble_config,\n name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.max_number_of_unique_feature_columns = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n # Both features will be disabled since the feature selection limit is\n # already reached.\n features[\"dense_float_0\"] = array_ops.ones([4, 1], dtypes.float32)\n features[\"dense_float_1\"] = array_ops.constant([0, 0, 1, 1],\n dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = variables.Variable(\n initial_value=0,\n name=\"ensemble_stamp\",\n trainable=False,\n dtype=dtypes.int64)\n\n predictions_dict = {\n \"predictions\":\n predictions,\n \"predictions_no_dropout\":\n predictions,\n \"partition_ids\":\n partition_ids,\n \"ensemble_stamp\":\n ensemble_stamp,\n \"num_trees\":\n 12,\n # We have somehow reached our limit 1. Both of the handlers will be\n # disabled.\n \"num_used_handlers\":\n array_ops.constant(1, dtype=dtypes.int64),\n \"used_handlers_mask\":\n array_ops.constant([False, False], dtype=dtypes.bool),\n }\n\n labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n # Create train op.\n train_op = gbdt_model.train(\n loss=math_ops.reduce_mean(\n _squared_loss(labels, weights, predictions)),\n predictions_dict=predictions_dict,\n labels=labels)\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect no splits to be chosen because the quantile\n # buckets will not be ready.\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertEquals(output.growing_metadata.num_layers_attempted, 1)\n self.assertEquals(stamp_token.eval(), 1)\n\n # Update the stamp to be able to run a second time.\n sess.run([ensemble_stamp.assign_add(1)])\n\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n # Make sure the trees are not modified, but the num_layers_attempted is\n # incremented so that eventually the training stops.\n self.assertEquals(len(output.trees), 1)\n self.assertEquals(len(output.trees[0].nodes), 3)\n\n self.assertEquals(output.growing_metadata.num_layers_attempted, 2)\n\n def testResetModelBeforeAndAfterSplit(self):\n \"\"\"Tests whether resetting works.\"\"\"\n with self.test_session():\n # First build a small tree and train it to verify training works.\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n \"max_tree_depth\": 4,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))\n\n # Create train op.\n update_op, reset_op, training_state = gbdt_model.update_stats(\n loss, predictions_dict)\n with ops.control_dependencies(update_op):\n train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(\n predictions_dict, training_state)\n\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n original_stamp = ensemble_stamp.eval()\n expected_tree = \"\"\"\n nodes {\n dense_float_binary_split {\n threshold: 1.0\n left_id: 1\n right_id: 2\n }\n node_metadata {\n gain: 0\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n nodes {\n leaf {\n vector {\n value: 0.0\n }\n }\n }\"\"\"\n\n def _train_once_and_check(expect_split):\n stamp = ensemble_stamp.eval()\n train_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(stamp_token.eval(), stamp + 1)\n if expect_split:\n # State of the ensemble after a split occurs.\n self.assertEquals(len(output.trees), 1)\n self.assertProtoEquals(expected_tree, output.trees[0])\n else:\n # State of the ensemble after a single accumulation but before any\n # splitting occurs\n self.assertEquals(len(output.trees), 0)\n self.assertProtoEquals(\"\"\"\n growing_metadata {\n num_trees_attempted: 1\n num_layers_attempted: 1\n }\"\"\", output)\n\n def _run_reset():\n stamp_before_reset = ensemble_stamp.eval()\n reset_op.run()\n stamp_after_reset = ensemble_stamp.eval()\n self.assertNotEquals(stamp_after_reset, stamp_before_reset)\n\n _, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertProtoEquals(\"\", output)\n\n return stamp_after_reset\n\n # Exit after one train_op, so no new layer are created but the handlers\n # contain enough information to split on the next call to train.\n _train_once_and_check(expect_split=False)\n self.assertEquals(ensemble_stamp.eval(), original_stamp + 1)\n\n # Reset the handlers so it still requires two training calls to split.\n stamp_after_reset = _run_reset()\n\n _train_once_and_check(expect_split=False)\n _train_once_and_check(expect_split=True)\n self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)\n\n # This time, test that the reset_op works right after splitting.\n stamp_after_reset = _run_reset()\n\n # Test that after resetting, the tree can be trained as normal.\n _train_once_and_check(expect_split=False)\n _train_once_and_check(expect_split=True)\n self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)\n\n def testResetModelNonChief(self):\n \"\"\"Tests the reset function on a non-chief worker.\"\"\"\n with self.test_session():\n # Create ensemble with one bias node.\n ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()\n text_format.Merge(\n \"\"\"\n trees {\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\n }\n tree_weights: 1.0\n tree_metadata {\n num_tree_weight_updates: 1\n num_layers_grown: 1\n is_finalized: false\n }\"\"\", ensemble_config)\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0,\n tree_ensemble_config=ensemble_config.SerializeToString(),\n name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=False,\n num_ps_replicas=0,\n center_bias=False,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))\n\n # Create reset op.\n _, reset_op, _ = gbdt_model.update_stats(\n loss, predictions_dict)\n\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # Reset op doesn't do anything because this is a non-chief worker.\n reset_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 1)\n self.assertEquals(len(output.tree_weights), 1)\n self.assertEquals(stamp_token.eval(), 0)\n\n def testResetModelWithCenterBias(self):\n \"\"\"Tests the reset function running on chief with bias centering.\"\"\"\n with self.test_session():\n ensemble_handle = model_ops.tree_ensemble_variable(\n stamp_token=0, tree_ensemble_config=\"\", name=\"tree_ensemble\")\n learner_config = learner_pb2.LearnerConfig()\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n learner_config.num_classes = 2\n learner_config.regularization.l1 = 0\n learner_config.regularization.l2 = 0\n learner_config.constraints.max_tree_depth = 1\n learner_config.constraints.min_node_weight = 0\n features = {}\n features[\"dense_float\"] = array_ops.ones([4, 1], dtypes.float32)\n\n gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(\n is_chief=True,\n num_ps_replicas=0,\n center_bias=True,\n ensemble_handle=ensemble_handle,\n examples_per_layer=1,\n learner_config=learner_config,\n logits_dimension=1,\n features=features)\n\n predictions = array_ops.constant(\n [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)\n partition_ids = array_ops.zeros([4], dtypes.int32)\n ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)\n\n predictions_dict = {\n \"predictions\": predictions,\n \"predictions_no_dropout\": predictions,\n \"partition_ids\": partition_ids,\n \"ensemble_stamp\": ensemble_stamp,\n \"num_trees\": 12,\n }\n\n labels = array_ops.ones([4, 1], dtypes.float32)\n weights = array_ops.ones([4, 1], dtypes.float32)\n loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))\n\n # Create train op.\n update_op, reset_op, training_state = gbdt_model.update_stats(\n loss, predictions_dict)\n with ops.control_dependencies(update_op):\n train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(\n predictions_dict, training_state)\n\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n\n # On first run, expect bias to be centered.\n def train_and_check():\n train_op.run()\n _, serialized = model_ops.tree_ensemble_serialize(ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n expected_tree = \"\"\"\n nodes {\n leaf {\n vector {\n value: 0.25\n }\n }\n }\"\"\"\n self.assertEquals(len(output.trees), 1)\n self.assertAllEqual(output.tree_weights, [1.0])\n self.assertProtoEquals(expected_tree, output.trees[0])\n\n train_and_check()\n self.assertEquals(ensemble_stamp.eval(), 1)\n\n reset_op.run()\n stamp_token, serialized = model_ops.tree_ensemble_serialize(\n ensemble_handle)\n output = tree_config_pb2.DecisionTreeEnsembleConfig()\n output.ParseFromString(serialized.eval())\n self.assertEquals(len(output.trees), 0)\n self.assertEquals(len(output.tree_weights), 0)\n self.assertEquals(stamp_token.eval(), 2)\n\n train_and_check()\n self.assertEquals(ensemble_stamp.eval(), 3)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Variable class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum # pylint: disable=g-bad-import-order\n\nimport six\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import variable_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import tf_should_use\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef default_variable_creator(_, **kwds):\n del kwds\n raise NotImplementedError(\"variable_scope needs to be imported\")\n\n\ndef _make_getter(captured_getter, captured_previous):\n \"\"\"To avoid capturing loop variables.\"\"\"\n def getter(**kwargs):\n return captured_getter(captured_previous, **kwargs)\n return getter\n\n\n@tf_export(\"VariableSynchronization\")\nclass VariableSynchronization(enum.Enum):\n \"\"\"Indicates when a distributed variable will be synced.\"\"\"\n\n # Indicates that the synchronization will be determined by the current\n # `DistributionStrategy` (eg. With `MirroredStrategy` this would be\n # `ON_WRITE`).\n AUTO = 0\n\n # Indicates that there will only be one copy of the variable, so there is no\n # need to sync.\n NONE = 1\n\n # Indicates that the variable will be aggregated across devices\n # every time it is updated.\n ON_WRITE = 2\n\n # Indicates that the variable will be aggregated across devices\n # when it is read (eg. when checkpointing or when evaluating an op that uses\n # the variable).\n ON_READ = 3\n\n\n@tf_export(\"VariableAggregation\")\nclass VariableAggregation(enum.Enum):\n \"\"\"Indicates how a distributed variable will be aggregated.\"\"\"\n NONE = 0\n SUM = 1\n MEAN = 2\n\n\nclass VariableMetaclass(type):\n \"\"\"Metaclass to allow construction of tf.Variable to be overridden.\"\"\"\n\n def _variable_call(cls,\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Call on Variable class. Useful to force the signature.\"\"\"\n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access\n previous_getter = _make_getter(getter, previous_getter)\n\n # Reset `aggregation` that is explicitly set as `None` to the enum NONE.\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n return previous_getter(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n variable_def=variable_def,\n dtype=dtype,\n expected_shape=expected_shape,\n import_scope=import_scope,\n constraint=constraint,\n use_resource=use_resource,\n synchronization=synchronization,\n aggregation=aggregation)\n\n def __call__(cls, *args, **kwargs):\n if cls is Variable:\n return cls._variable_call(*args, **kwargs)\n else:\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n\n\n@tf_export(\"Variable\")\nclass Variable(six.with_metaclass(VariableMetaclass,\n checkpointable.CheckpointableBase)):\n \"\"\"See the [Variables Guide](https://tensorflow.org/guide/variables).\n\n A variable maintains state in the graph across calls to `run()`. You add a\n variable to the graph by constructing an instance of the class `Variable`.\n\n The `Variable()` constructor requires an initial value for the variable,\n which can be a `Tensor` of any type and shape. The initial value defines the\n type and shape of the variable. After construction, the type and shape of\n the variable are fixed. The value can be changed using one of the assign\n methods.\n\n If you want to change the shape of a variable later you have to use an\n `assign` Op with `validate_shape=False`.\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs for other Ops in the graph. Additionally, all the operators\n overloaded for the `Tensor` class are carried over to variables, so you can\n also add nodes to the graph by just doing arithmetic on variables.\n\n ```python\n import tensorflow as tf\n\n # Create a variable.\n w = tf.Variable(<initial-value>, name=<optional-name>)\n\n # Use the variable in the graph like any Tensor.\n y = tf.matmul(w, ...another variable or tensor...)\n\n # The overloaded operators are available too.\n z = tf.sigmoid(w + y)\n\n # Assign a new value to the variable with `assign()` or a related method.\n w.assign(w + 1.0)\n w.assign_add(1.0)\n ```\n\n When you launch the graph, variables have to be explicitly initialized before\n you can run Ops that use their value. You can initialize a variable by\n running its *initializer op*, restoring the variable from a save file, or\n simply running an `assign` Op that assigns a value to the variable. In fact,\n the variable *initializer op* is just an `assign` Op that assigns the\n variable's initial value to the variable itself.\n\n ```python\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the variable initializer.\n sess.run(w.initializer)\n # ...you now can run ops that use the value of 'w'...\n ```\n\n The most common initialization pattern is to use the convenience function\n `global_variables_initializer()` to add an Op to the graph that initializes\n all the variables. You then run that Op after launching the graph.\n\n ```python\n # Add an Op to initialize global variables.\n init_op = tf.global_variables_initializer()\n\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the Op that initializes global variables.\n sess.run(init_op)\n # ...you can now run any Op that uses variable values...\n ```\n\n If you need to create a variable with an initial value dependent on another\n variable, use the other variable's `initialized_value()`. This ensures that\n variables are initialized in the right order.\n\n All variables are automatically collected in the graph where they are\n created. By default, the constructor adds the new variable to the graph\n collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function\n `global_variables()` returns the contents of that collection.\n\n When building a machine learning model it is often convenient to distinguish\n between variables holding the trainable model parameters and other variables\n such as a `global step` variable used to count training steps. To make this\n easier, the variable constructor supports a `trainable=<bool>` parameter. If\n `True`, the new variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. The convenience function\n `trainable_variables()` returns the contents of this collection. The\n various `Optimizer` classes use this collection as the default list of\n variables to optimize.\n\n WARNING: tf.Variable objects by default have a non-intuitive memory model. A\n Variable is represented internally as a mutable Tensor which can\n non-deterministically alias other Tensors in a graph. The set of operations\n which consume a Variable and can lead to aliasing is undetermined and can\n change across TensorFlow versions. Avoid writing code which relies on the\n value of a Variable either changing or not changing as other operations\n happen. For example, using Variable objects or simple functions thereof as\n predicates in a `tf.cond` is dangerous and error-prone:\n\n ```\n v = tf.Variable(True)\n tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.\n ```\n\n Here replacing adding `use_resource=True` when constructing the variable will\n fix any nondeterminism issues:\n ```\n v = tf.Variable(True, use_resource=True)\n tf.cond(v, lambda: v.assign(False), my_false_fn)\n ```\n\n To use the replacement for variables which does\n not have these issues:\n\n * Add `use_resource=True` when constructing `tf.Variable`;\n * Call `tf.get_variable_scope().set_use_resource(True)` inside a\n `tf.variable_scope` before the `tf.get_variable()` call.\n \"\"\"\n\n def __init__(self,\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates\n the Variable object with its contents, referencing the variable's nodes\n in the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected\n to have this shape.\n import_scope: Optional `string`. Name scope to add to the\n `Variable.` Only used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n use_resource: if True, a ResourceVariable is created; otherwise an\n old-style ref-based variable is created. When eager execution is enabled\n a resource variable is always created.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n raise NotImplementedError\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n raise NotImplementedError\n\n @property\n def trainable(self):\n raise NotImplementedError\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n raise NotImplementedError\n\n def initialized_value(self):\n \"\"\"Returns the value of the initialized variable.\n\n You should use this instead of the variable itself to initialize another\n variable with a value that depends on the value of this variable.\n\n ```python\n # Initialize 'v' with a random tensor.\n v = tf.Variable(tf.truncated_normal([10, 40]))\n # Use `initialized_value` to guarantee that `v` has been\n # initialized before its value is used to initialize `w`.\n # The random values are picked only once.\n w = tf.Variable(v.initialized_value() * 2.0)\n ```\n\n Returns:\n A `Tensor` holding the value of this variable after its initializer\n has run.\n \"\"\"\n raise NotImplementedError\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n raise NotImplementedError\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n raise NotImplementedError\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_sub(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = ref.scatter_nd_add(indices, updates)\n with tf.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_assign(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n raise NotImplementedError\n\n def load(self, value, session=None):\n \"\"\"Load new value into this variable.\n\n Writes new value to variable's memory. Doesn't add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n v.load([2, 3], sess)\n print(v.eval(sess)) # prints [2 3]\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n v.load([3, 4], sess)\n print(v.eval()) # prints [3 4]\n ```\n\n Args:\n value: New variable value\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Raises:\n ValueError: Session is not passed and no default session\n \"\"\"\n raise NotImplementedError\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n @staticmethod\n def _OverloadAllOperators(): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n Variable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(Variable, \"__getitem__\", array_ops._SliceHelperVar)\n\n @staticmethod\n def _OverloadOperator(operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n\n Args:\n operator: string. The operator name.\n \"\"\"\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n return getattr(ops.Tensor, operator)(a._AsTensor(), *args)\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__\n except AttributeError:\n pass\n\n setattr(Variable, operator, _run_op)\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n raise NotImplementedError\n\n def get_shape(self):\n \"\"\"Alias of Variable.shape.\"\"\"\n raise NotImplementedError\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def from_proto(variable_def, import_scope=None):\n \"\"\"Returns a `Variable` object created from `variable_def`.\"\"\"\n return RefVariable(variable_def=variable_def,\n import_scope=import_scope)\n\n class SaveSliceInfo(object):\n \"\"\"Information on how to save this Variable as a slice.\n\n Provides internal support for saving variables as slices of a larger\n variable. This API is not public and is subject to change.\n\n Available properties:\n\n * full_name\n * full_shape\n * var_offset\n * var_shape\n \"\"\"\n\n def __init__(self,\n full_name=None,\n full_shape=None,\n var_offset=None,\n var_shape=None,\n save_slice_info_def=None,\n import_scope=None):\n \"\"\"Create a `SaveSliceInfo`.\n\n Args:\n full_name: Name of the full variable of which this `Variable` is a\n slice.\n full_shape: Shape of the full variable, as a list of int.\n var_offset: Offset of this `Variable` into the full variable, as a\n list of int.\n var_shape: Shape of this `Variable`, as a list of int.\n save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,\n recreates the SaveSliceInfo object its contents.\n `save_slice_info_def` and other arguments are mutually\n exclusive.\n import_scope: Optional `string`. Name scope to add. Only used\n when initializing from protocol buffer.\n \"\"\"\n if save_slice_info_def:\n assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)\n self.full_name = ops.prepend_name_scope(\n save_slice_info_def.full_name, import_scope=import_scope)\n self.full_shape = [i for i in save_slice_info_def.full_shape]\n self.var_offset = [i for i in save_slice_info_def.var_offset]\n self.var_shape = [i for i in save_slice_info_def.var_shape]\n else:\n self.full_name = full_name\n self.full_shape = full_shape\n self.var_offset = var_offset\n self.var_shape = var_shape\n\n @property\n def spec(self):\n \"\"\"Computes the spec string used for saving.\"\"\"\n full_shape_str = \" \".join([\"%d\" % d for d in self.full_shape]) + \" \"\n sl_spec = \":\".join([\n \"%d,%d\" % (o, s) for o, s in zip(self.var_offset, self.var_shape)\n ])\n return full_shape_str + sl_spec\n\n def to_proto(self, export_scope=None):\n \"\"\"Returns a SaveSliceInfoDef() proto.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or\n self.full_name.startswith(export_scope)):\n save_slice_info_def = variable_pb2.SaveSliceInfoDef()\n save_slice_info_def.full_name = ops.strip_name_scope(\n self.full_name, export_scope)\n for i in self.full_shape:\n save_slice_info_def.full_shape.append(i)\n for i in self.var_offset:\n save_slice_info_def.var_offset.append(i)\n for i in self.var_shape:\n save_slice_info_def.var_shape.append(i)\n return save_slice_info_def\n else:\n return None\n\n def __iadd__(self, other):\n raise NotImplementedError\n\n def __isub__(self, other):\n raise NotImplementedError\n\n def __imul__(self, other):\n raise NotImplementedError\n\n def __idiv__(self, other):\n raise NotImplementedError\n\n def __itruediv__(self, other):\n raise NotImplementedError\n\n def __irealdiv__(self, other):\n raise NotImplementedError\n\n def __ipow__(self, other):\n raise NotImplementedError\n\n\n# TODO(apassos): do not repeat all comments here\nclass RefVariable(Variable):\n \"\"\"Ref-based implementation of variables.\"\"\"\n\n def __init__(self,\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates\n the Variable object with its contents, referencing the variable's nodes\n in the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected\n to have this shape.\n import_scope: Optional `string`. Name scope to add to the\n `Variable.` Only used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n self._in_graph_mode = True\n if variable_def:\n # If variable_def is provided, recreates the variable from its fields.\n if initial_value:\n raise ValueError(\"variable_def and initial_value are mutually \"\n \"exclusive.\")\n self._init_from_proto(variable_def, import_scope=import_scope)\n else:\n # Create from initial_value.\n self._init_from_args(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n dtype=dtype,\n expected_shape=expected_shape,\n constraint=constraint)\n\n def __repr__(self):\n if context.executing_eagerly() and not self._in_graph_mode:\n return \"<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>\" % (\n self.name, self.get_shape(), self.dtype.name,\n ops.numpy_text(self.read_value(), is_repr=True))\n else:\n return \"<tf.Variable '%s' shape=%s dtype=%s>\" % (\n self.name, self.get_shape(), self.dtype.name)\n\n def _init_from_args(self,\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n dtype=None,\n expected_shape=None,\n constraint=None):\n \"\"\"Creates a new variable from arguments.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called.\n (Note that initializer functions from init_ops.py must first be bound\n to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type.\n If None, either the datatype will be kept (if initial_value is\n a Tensor) or float32 will be used (if it is a Python object convertible\n to a Tensor).\n expected_shape: Deprecated. Ignored.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Raises:\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If lifted into the eager context.\n \"\"\"\n _ = expected_shape\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n init_from_fn = callable(initial_value)\n\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n if not isinstance(collections, (list, tuple, set)):\n raise ValueError(\n \"collections argument to Variable constructor must be a list, tuple, \"\n \"or set. Got %s of type %s\" % (collections, type(collections)))\n if constraint is not None and not callable(constraint):\n raise ValueError(\"The `constraint` argument must be a callable.\")\n\n # Store the graph key so optimizers know how to only retrieve variables from\n # this graph.\n self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access\n if isinstance(initial_value, checkpointable.CheckpointInitialValue):\n self._maybe_initialize_checkpointable()\n self._update_uid = initial_value.checkpoint_position.restore_uid\n initial_value = initial_value.wrapped_value\n\n self._trainable = trainable\n if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:\n collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]\n with ops.init_scope():\n # Ensure that we weren't lifted into the eager context.\n if context.executing_eagerly():\n raise RuntimeError(\n \"RefVariable not supported when eager execution is enabled. \")\n with ops.name_scope(name, \"Variable\", [] if init_from_fn else\n [initial_value]) as name:\n\n if init_from_fn:\n # Use attr_scope and device(None) to simulate the behavior of\n # colocate_with when the variable we want to colocate with doesn't\n # yet exist.\n true_name = ops._name_from_scope_name(name) # pylint: disable=protected-access\n attr = attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(\n s=[compat.as_bytes(\"loc:@%s\" % true_name)]))\n # pylint: disable=protected-access\n with ops.get_default_graph()._attr_scope({\"_class\": attr}):\n with ops.name_scope(\"Initializer\"), ops.device(None):\n self._initial_value = ops.convert_to_tensor(\n initial_value(), name=\"initial_value\", dtype=dtype)\n shape = (self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n self._variable = state_ops.variable_op_v2(\n shape,\n self._initial_value.dtype.base_dtype,\n name=name)\n # pylint: enable=protected-access\n\n # Or get the initial value from a Tensor or Python object.\n else:\n self._initial_value = ops.convert_to_tensor(\n initial_value, name=\"initial_value\", dtype=dtype)\n # pylint: disable=protected-access\n if self._initial_value.op._get_control_flow_context() is not None:\n raise ValueError(\n \"Initializer for variable %s is from inside a control-flow \"\n \"construct, such as a loop or conditional. When creating a \"\n \"variable inside a loop or conditional, use a lambda as the \"\n \"initializer.\" % name)\n # pylint: enable=protected-access\n shape = (self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n # In this case, the variable op can't be created until after the\n # initial_value has been converted to a Tensor with a known type.\n self._variable = state_ops.variable_op_v2(\n shape,\n self._initial_value.dtype.base_dtype,\n name=name)\n\n # Manually overrides the variable's shape with the initial value's.\n if validate_shape:\n initial_value_shape = self._initial_value.get_shape()\n if not initial_value_shape.is_fully_defined():\n raise ValueError(\"initial_value must have a shape specified: %s\" %\n self._initial_value)\n\n # If 'initial_value' makes use of other variables, make sure we don't\n # have an issue if these other variables aren't initialized first by\n # using their initialized_value() method.\n self._initializer_op = state_ops.assign(\n self._variable,\n self._try_guard_against_uninitialized_dependencies(\n self._initial_value),\n validate_shape=validate_shape).op\n\n # TODO(vrv): Change this class to not take caching_device, but\n # to take the op to colocate the snapshot with, so we can use\n # colocation rather than devices.\n if caching_device is not None:\n with ops.device(caching_device):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n else:\n with ops.colocate_with(self._variable.op):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n ops.add_to_collections(collections, self)\n\n self._caching_device = caching_device\n self._save_slice_info = None\n self._constraint = constraint\n\n def _init_from_proto(self, variable_def, import_scope=None):\n \"\"\"Recreates the Variable object from a `VariableDef` protocol buffer.\n\n Args:\n variable_def: `VariableDef` protocol buffer, describing a variable\n whose nodes already exists in the graph.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(variable_def, variable_pb2.VariableDef)\n # Create from variable_def.\n g = ops.get_default_graph()\n self._variable = g.as_graph_element(\n ops.prepend_name_scope(variable_def.variable_name,\n import_scope=import_scope))\n self._initializer_op = g.as_graph_element(\n ops.prepend_name_scope(variable_def.initializer_name,\n import_scope=import_scope))\n # Tests whether initial_value_name exists first for backwards compatibility.\n if (hasattr(variable_def, \"initial_value_name\") and\n variable_def.initial_value_name):\n self._initial_value = g.as_graph_element(\n ops.prepend_name_scope(variable_def.initial_value_name,\n import_scope=import_scope))\n else:\n self._initial_value = None\n self._trainable = getattr(variable_def, \"trainable\", True)\n self._snapshot = g.as_graph_element(\n ops.prepend_name_scope(variable_def.snapshot_name,\n import_scope=import_scope))\n if variable_def.HasField(\"save_slice_info_def\"):\n self._save_slice_info = Variable.SaveSliceInfo(\n save_slice_info_def=variable_def.save_slice_info_def,\n import_scope=import_scope)\n else:\n self._save_slice_info = None\n self._caching_device = None\n self._constraint = None\n\n def _as_graph_element(self):\n \"\"\"Conversion function for Graph.as_graph_element().\"\"\"\n return self._variable\n\n def _AsTensor(self): # pylint: disable=invalid-name\n \"\"\"Converts this variable to a Tensor.\n\n See `tf.Variable.value`.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return self._snapshot\n\n def __iter__(self):\n \"\"\"Dummy method to prevent iteration. Do not call.\n\n NOTE(mrry): If we register __getitem__ as an overloaded operator,\n Python will valiantly attempt to iterate over the variable's Tensor from 0\n to infinity. Declaring this method prevents this unintended behavior.\n\n Raises:\n TypeError: when invoked.\n \"\"\"\n raise TypeError(\"'Variable' object is not iterable.\")\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return self._snapshot\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return array_ops.identity(self._variable, name=\"read\")\n\n def _ref(self):\n \"\"\"Returns a reference to this variable.\n\n You usually do not need to call this method as all ops that need a reference\n to the variable call it automatically.\n\n Returns is a `Tensor` which holds a reference to the variable. You can\n assign a new value to the variable by passing the tensor to an assign op.\n See `tf.Variable.value` if you want to get the value of the\n variable.\n\n Returns:\n A `Tensor` that is a reference to the variable.\n \"\"\"\n return self._variable\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n self._ref().set_shape(shape)\n self.value().set_shape(shape)\n\n @property\n def trainable(self):\n return self._trainable\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n return self._variable.eval(session=session)\n\n def initialized_value(self):\n \"\"\"Returns the value of the initialized variable.\n\n You should use this instead of the variable itself to initialize another\n variable with a value that depends on the value of this variable.\n\n ```python\n # Initialize 'v' with a random tensor.\n v = tf.Variable(tf.truncated_normal([10, 40]))\n # Use `initialized_value` to guarantee that `v` has been\n # initialized before its value is used to initialize `w`.\n # The random values are picked only once.\n w = tf.Variable(v.initialized_value() * 2.0)\n ```\n\n Returns:\n A `Tensor` holding the value of this variable after its initializer\n has run.\n \"\"\"\n with ops.init_scope():\n return control_flow_ops.cond(is_variable_initialized(self),\n self.read_value,\n lambda: self.initial_value)\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n return self._initial_value\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n return self._constraint\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n assign = state_ops.assign(self._variable, value, use_locking=use_locking,\n name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n assign = state_ops.assign_add(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n assign = state_ops.assign_sub(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_sub(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be added to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_add(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_update(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_sub(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_sub(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = ref.scatter_nd_add(indices, updates)\n with tf.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_add(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_update(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_update(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def _strided_slice_assign(self,\n begin,\n end,\n strides,\n value,\n name,\n begin_mask,\n end_mask,\n ellipsis_mask,\n new_axis_mask,\n shrink_axis_mask):\n return gen_array_ops.strided_slice_assign(ref=self._ref(),\n begin=begin,\n end=end,\n strides=strides,\n value=value,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n return state_ops.count_up_to(self._variable, limit=limit)\n\n def load(self, value, session=None):\n \"\"\"Load new value into this variable.\n\n Writes new value to variable's memory. Doesn't add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n v.load([2, 3], sess)\n print(v.eval(sess)) # prints [2 3]\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n v.load([3, 4], sess)\n print(v.eval()) # prints [3 4]\n ```\n\n Args:\n value: New variable value\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Raises:\n ValueError: Session is not passed and no default session\n \"\"\"\n if context.executing_eagerly():\n self.assign(value)\n else:\n session = session or ops.get_default_session()\n if session is None:\n raise ValueError(\n \"Either session argument should be provided or default session \"\n \"should be established\")\n session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n @staticmethod\n def _OverloadAllOperators(): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n Variable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(Variable, \"__getitem__\", array_ops._SliceHelperVar)\n\n @staticmethod\n def _OverloadOperator(operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n\n Args:\n operator: string. The operator name.\n \"\"\"\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n return getattr(ops.Tensor, operator)(a._AsTensor(), *args)\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__\n except AttributeError:\n pass\n\n setattr(Variable, operator, _run_op)\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"For implementing `Checkpointable`. This object is saveable on its own.\"\"\"\n return {checkpointable.VARIABLE_VALUE_KEY: self}\n\n def _try_guard_against_uninitialized_dependencies(self, initial_value):\n \"\"\"Attempt to guard against dependencies on uninitialized variables.\n\n Replace references to variables in `initial_value` with references to the\n variable's initialized values. The initialized values are essentially\n conditional TensorFlow graphs that return a variable's value if it is\n initialized or its `initial_value` if it hasn't been initialized. This\n replacement is done on a best effort basis:\n\n - If the `initial_value` graph contains cycles, we don't do any\n replacements for that graph.\n - If the variables that `initial_value` depends on are not present in the\n `GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.\n\n In these cases, it is up to the caller to ensure that the `initial_value`\n graph uses initialized variables or that they guard access to variables\n using their `initialized_value` method.\n\n Args:\n initial_value: `Tensor`. The initial value.\n Returns:\n A `Tensor` suitable to initialize a variable.\n Raises:\n TypeError: If `initial_value` is not a `Tensor`.\n \"\"\"\n if not isinstance(initial_value, ops.Tensor):\n raise TypeError(\"initial_value needs to be a Tensor: %s\" % initial_value)\n\n # Don't modify initial_value if it contains any cyclic dependencies.\n def has_cycle(op, path):\n \"\"\"Detect cycles in the dependencies of `initial_value`.\"\"\"\n if op.name in path:\n return True\n path.add(op.name)\n for op_input in op.inputs:\n if has_cycle(op_input.op, path):\n return True\n for op_control_input in op.control_inputs:\n if has_cycle(op_control_input, path):\n return True\n path.remove(op.name)\n return False\n if has_cycle(initial_value.op, path=set()):\n return initial_value\n\n return self._safe_initial_value_from_tensor(initial_value, op_cache={})\n\n def _safe_initial_value_from_tensor(self, tensor, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n tensor: A `Tensor`. The tensor to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n Returns:\n A `Tensor` compatible with `tensor`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `tensor` will be returned unchanged.\n \"\"\"\n op = tensor.op\n new_op = op_cache.get(op.name)\n if new_op is None:\n new_op = self._safe_initial_value_from_op(op, op_cache)\n op_cache[op.name] = new_op\n return new_op.outputs[tensor.value_index]\n\n def _safe_initial_value_from_op(self, op, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n op: An `Operation`. The operation to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n Returns:\n An `Operation` compatible with `op`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `op` will be returned unchanged.\n \"\"\"\n op_type = op.node_def.op\n if op_type in (\"IsVariableInitialized\", \"VarIsInitializedOp\",\n \"ReadVariableOp\"):\n return op\n\n # Attempt to find the initialized_value of any variable reference / handles.\n # TODO(b/70206927): Fix handling of ResourceVariables.\n if op_type in (\"Variable\", \"VariableV2\", \"VarHandleOp\"):\n initialized_value = self._find_initialized_value_for_variable(op)\n return op if initialized_value is None else initialized_value.op\n\n # Recursively build initializer expressions for inputs.\n modified = False\n new_op_inputs = []\n for op_input in op.inputs:\n new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)\n new_op_inputs.append(new_op_input)\n modified = modified or (new_op_input != op_input)\n\n # If at least one input was modified, replace the op.\n if modified:\n new_op_type = op_type\n if new_op_type == \"RefSwitch\":\n new_op_type = \"Switch\"\n new_op_name = op.node_def.name + \"_\" + self.name\n new_op_name = new_op_name.replace(\":\", \"_\")\n return self.graph.create_op(\n new_op_type, new_op_inputs,\n op._output_types, # pylint: disable=protected-access\n name=new_op_name, attrs=op.node_def.attr)\n\n return op\n\n def _find_initialized_value_for_variable(self, variable_op):\n \"\"\"Find the initialized value for a variable op.\n\n To do so, lookup the variable op in the variables collection.\n\n Args:\n variable_op: A variable `Operation`.\n Returns:\n A `Tensor` representing the initialized value for the variable or `None`\n if the initialized value could not be found.\n \"\"\"\n try:\n var_names = [variable_op.node_def.name, variable_op.node_def.name + \":0\"]\n for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES):\n for var in self.graph.get_collection(collection_name):\n if var.name in var_names:\n return var.initialized_value()\n except AttributeError:\n # Return None when an incomplete user-defined variable type was put in\n # the collection.\n return None\n return None\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n return self._variable.name\n\n @property\n def _shared_name(self):\n \"\"\"The shared name of the variable.\n\n Unlike name(), shared_name doesn't have \":0\" suffix. It is user-specified\n name with name scope prefix.\n\n Returns:\n variable name.\n \"\"\"\n return self.name[:-2]\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n return self._initializer_op\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n return self._variable.device\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n return self._variable.dtype\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n return self._variable.op\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n return self._variable.graph\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n return self._variable.get_shape()\n\n def get_shape(self):\n \"\"\"Alias of Variable.shape.\"\"\"\n return self.shape\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or\n self._variable.name.startswith(export_scope)):\n var_def = variable_pb2.VariableDef()\n var_def.variable_name = ops.strip_name_scope(\n self._variable.name, export_scope)\n if self._initial_value is not None:\n # For backwards compatibility.\n var_def.initial_value_name = ops.strip_name_scope(\n self._initial_value.name, export_scope)\n var_def.trainable = self.trainable\n var_def.initializer_name = ops.strip_name_scope(\n self.initializer.name, export_scope)\n var_def.snapshot_name = ops.strip_name_scope(\n self._snapshot.name, export_scope)\n if self._save_slice_info:\n var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(\n export_scope=export_scope))\n return var_def\n else:\n return None\n\n def __iadd__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable += will be deprecated. Use variable.assign_add\"\n \" if you want assignment to the variable value or 'x = x + y'\"\n \" if you want a new python Tensor object.\", 1)\n return self + other\n\n def __isub__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable -= will be deprecated. Use variable.assign_sub\"\n \" if you want assignment to the variable value or 'x = x - y'\"\n \" if you want a new python Tensor object.\", 1)\n return self - other\n\n def __imul__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable *= will be deprecated. Use `var.assign(var * other)`\"\n \" if you want assignment to the variable value or `x = x * y`\"\n \" if you want a new python Tensor object.\", 1)\n return self * other\n\n def __idiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __itruediv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __irealdiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __ipow__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable **= will be deprecated. Use `var.assign(var ** other)`\"\n \" if you want assignment to the variable value or `x = x ** y`\"\n \" if you want a new python Tensor object.\", 1)\n return self ** other\n\n def _set_save_slice_info(self, save_slice_info):\n \"\"\"Sets the slice info for this `Variable`.\n\n Args:\n save_slice_info: A `Variable.SaveSliceInfo` object.\n \"\"\"\n self._save_slice_info = save_slice_info\n\n def _get_save_slice_info(self):\n return self._save_slice_info\n\n\nclass PartitionedVariable(object):\n \"\"\"A container for partitioned `Variable` objects.\n\n @compatibility(eager) `tf.PartitionedVariable` is not compatible with\n eager execution. Use `tf.Variable` instead which is compatible\n with both eager execution and graph construction. See [the\n TensorFlow Eager Execution\n guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)\n for details on how variables work in eager execution.\n @end_compatibility\n \"\"\"\n\n class PartitionedVariableIterator(object):\n \"\"\"An iterator that allows accessing the underlying `Variable` objects.\n\n This iterator is necessary to control order of access when Variables\n are not partitioned in a standard way along a single axis.\n\n Allows e.g. `list(partitioned_variable)` to return a proper list.\n \"\"\"\n\n def __init__(self, partitioned_variable):\n self._ix = 0\n self._partitioned_variable = partitioned_variable\n\n def __iter__(self):\n return self\n\n def __next__(self): # For python3 compatibility.\n return self.next()\n\n def next(self):\n # pylint: disable=protected-access\n if self._ix >= len(self._partitioned_variable._variable_list):\n raise StopIteration()\n variable = self._partitioned_variable._variable_list[self._ix]\n # pylint: enable=protected-access\n self._ix += 1\n return variable\n\n def __init__(self, name, shape, dtype, variable_list, partitions):\n \"\"\"Creates a new partitioned variable wrapper.\n\n Variables passed via the variable_list must contain a save_slice_info\n field. Concatenation and iteration is in lexicographic order according\n to the var_offset property of the save_slice_info.\n\n Args:\n name: String. Overall name of the variables.\n shape: List of integers. Overall shape of the variables.\n dtype: Type of the variables.\n variable_list: List of `Variable` that comprise this partitioned variable.\n partitions: List of integers. Number of partitions for each dimension.\n\n Raises:\n TypeError: If `variable_list` is not a list of `Variable` objects, or\n `partitions` is not a list.\n ValueError: If `variable_list` is empty, or the `Variable` shape\n information does not match `shape`, or `partitions` has invalid values.\n RuntimeError: If eager execution is enabled\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\n \"tf.PartitionedVariable not supported with eager execution enabled.\")\n if not isinstance(variable_list, (list, tuple)):\n raise TypeError(\n \"variable_list is not a list or tuple: %s\" % variable_list)\n if not isinstance(partitions, (list, tuple)):\n raise TypeError(\"partitions is not a list or tuple: %s\" % partitions)\n if not all([p >= 1 for p in partitions]):\n raise ValueError(\"partition values must be positive: %s\" % partitions)\n if not variable_list:\n raise ValueError(\"variable_list may not be empty\")\n # pylint: disable=protected-access\n for v in variable_list:\n # Sort the variable_list lexicographically according to var offset value.\n if not all([v._get_save_slice_info() is not None for v in variable_list]):\n raise ValueError(\n \"All variables must have a save_slice_info available: %s\"\n % [v.name for v in variable_list])\n if len(shape) != len(partitions):\n raise ValueError(\"len(shape) != len(partitions): %s vs. %s\"\n % (shape, partitions))\n if not all([v._get_save_slice_info().full_shape == shape]):\n raise ValueError(\n \"All variables' full shapes must match shape: %s; \"\n \"but full shapes were: %s\"\n % (shape, str([v._get_save_slice_info().full_shape])))\n self._variable_list = sorted(\n variable_list, key=lambda v: v._get_save_slice_info().var_offset)\n # pylint: enable=protected-access\n\n self._name = name\n self._shape = shape\n self._dtype = dtype\n self._partitions = partitions\n self._as_tensor = None\n\n def __iter__(self):\n \"\"\"Return an iterable for accessing the underlying partition Variables.\"\"\"\n return self.PartitionedVariableIterator(self)\n\n def __len__(self):\n num_partition_axes = len(self._partition_axes())\n if num_partition_axes > 1:\n raise ValueError(\"Cannot get a length for %d > 1 partition axes\"\n % num_partition_axes)\n return len(self._variable_list)\n\n def _partition_axes(self):\n if all([p == 1 for p in self._partitions]):\n return [0]\n else:\n return [i for i, p in enumerate(self._partitions) if p > 1]\n\n def _concat(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n This is different from using the partitioned variable directly as a tensor\n (through tensor conversion and `as_tensor`) in that it creates a new set of\n operations that keeps the control dependencies from its scope.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n if len(self._variable_list) == 1:\n with ops.name_scope(None):\n return array_ops.identity(self._variable_list[0], name=self._name)\n\n partition_axes = self._partition_axes()\n\n if len(partition_axes) > 1:\n raise NotImplementedError(\n \"Cannot concatenate along more than one dimension: %s. \"\n \"Multi-axis partition concat is not supported\" % str(partition_axes))\n partition_ix = partition_axes[0]\n\n with ops.name_scope(self._name + \"/ConcatPartitions/\"):\n concatenated = array_ops.concat(self._variable_list, partition_ix)\n\n with ops.name_scope(None):\n return array_ops.identity(concatenated, name=self._name)\n\n def as_tensor(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n The returned tensor will not inherit the control dependencies from the scope\n where the value is used, which is similar to getting the value of\n `Variable`.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n with ops.control_dependencies(None):\n return self._concat()\n\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):\n # pylint: disable=invalid-name\n _ = name\n if dtype is not None and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n raise NotImplementedError(\n \"PartitionedVariable doesn't support being used as a reference.\")\n else:\n return v.as_tensor()\n\n @property\n def name(self):\n return self._name\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self.get_shape()\n\n def get_shape(self):\n return self._shape\n\n def _get_variable_list(self):\n return self._variable_list\n\n def _get_partitions(self):\n return self._partitions\n\n def assign(self, value, use_locking=False):\n _ = value, use_locking\n raise NotImplementedError(\n \"assign() has not been implemented for PartitionedVariable.\")\n\n\n@tf_export(\"global_variables\")\ndef global_variables(scope=None):\n \"\"\"Returns global variables.\n\n Global variables are variables that are shared across machines in a\n distributed environment. The `Variable()` constructor or `get_variable()`\n automatically adds new variables to the graph collection\n `GraphKeys.GLOBAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to global variables are local variables. See\n `tf.local_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)\n\n\n@tf_export(\"all_variables\")\n@deprecated(\"2017-03-02\", \"Please use tf.global_variables instead.\")\ndef all_variables():\n \"\"\"See `tf.global_variables`.\"\"\"\n return global_variables()\n\n\ndef _all_saveable_objects(scope=None):\n \"\"\"Returns all variables and `SaveableObject`s that must be checkpointed.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of `Variable` and `SaveableObject` to be checkpointed\n \"\"\"\n # TODO(andreasst): make this function public once things are settled.\n return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +\n ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))\n\n\n@tf_export(\"local_variables\")\ndef local_variables(scope=None):\n \"\"\"Returns local variables.\n\n Local variables - per process variables, usually not saved/restored to\n checkpoint and used for temporary or intermediate values.\n For example, they can be used as counters for metrics computation or\n number of epochs this machine has read data.\n The `tf.contrib.framework.local_variable()` function automatically adds the\n new variable to `GraphKeys.LOCAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to local variables are global variables. See\n `tf.global_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of local `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)\n\n\n@tf_export(\"model_variables\")\ndef model_variables(scope=None):\n \"\"\"Returns all variables in the MODEL_VARIABLES collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of local Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)\n\n\n@tf_export(\"trainable_variables\")\ndef trainable_variables(scope=None):\n \"\"\"Returns all variables created with `trainable=True`.\n\n When passed `trainable=True`, the `Variable()` constructor automatically\n adds new variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the\n contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)\n\n\n@tf_export(\"moving_average_variables\")\ndef moving_average_variables(scope=None):\n \"\"\"Returns all variables that maintain their moving averages.\n\n If an `ExponentialMovingAverage` object is created and the `apply()`\n method is called on a list of variables, these variables will\n be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.\n This convenience function returns the contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)\n\n\n@tf_export(\"initializers.variables\", \"variables_initializer\")\ndef variables_initializer(var_list, name=\"init\"):\n \"\"\"Returns an Op that initializes a list of variables.\n\n After you launch the graph in a session, you can run the returned Op to\n initialize all the variables in `var_list`. This Op runs all the\n initializers of the variables in `var_list` in parallel.\n\n Calling `initialize_variables()` is equivalent to passing the list of\n initializers to `Group()`.\n\n If `var_list` is empty, however, the function still returns an Op that can\n be run. That Op just has no effect.\n\n Args:\n var_list: List of `Variable` objects to initialize.\n name: Optional name for the returned operation.\n\n Returns:\n An Op that run the initializers of all the specified variables.\n \"\"\"\n if var_list and not context.executing_eagerly():\n return control_flow_ops.group(*[v.initializer for v in var_list], name=name)\n return control_flow_ops.no_op(name=name)\n\n\n@tf_export(\"initialize_variables\")\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.variables_initializer` instead.\")\ndef initialize_variables(var_list, name=\"init\"):\n \"\"\"See `tf.variables_initializer`.\"\"\"\n return variables_initializer(var_list, name=name)\n\n\n@tf_export(\"initializers.global_variables\", \"global_variables_initializer\")\ndef global_variables_initializer():\n \"\"\"Returns an Op that initializes global variables.\n\n This is just a shortcut for `variables_initializer(global_variables())`\n\n Returns:\n An Op that initializes global variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"global_variables_initializer\")\n return variables_initializer(global_variables())\n\n\n@tf_export(\"initialize_all_variables\")\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.global_variables_initializer` instead.\")\ndef initialize_all_variables():\n \"\"\"See `tf.global_variables_initializer`.\"\"\"\n return global_variables_initializer()\n\n\n@tf_export(\"initializers.local_variables\", \"local_variables_initializer\")\ndef local_variables_initializer():\n \"\"\"Returns an Op that initializes all local variables.\n\n This is just a shortcut for `variables_initializer(local_variables())`\n\n Returns:\n An Op that initializes all local variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"local_variables_initializer\")\n return variables_initializer(local_variables())\n\n\n@tf_export(\"initialize_local_variables\")\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.local_variables_initializer` instead.\")\ndef initialize_local_variables():\n \"\"\"See `tf.local_variables_initializer`.\"\"\"\n return local_variables_initializer()\n\n\n@tf_export(\"is_variable_initialized\")\n@tf_should_use.should_use_result\ndef is_variable_initialized(variable):\n \"\"\"Tests if a variable has been initialized.\n\n Args:\n variable: A `Variable`.\n\n Returns:\n Returns a scalar boolean Tensor, `True` if the variable has been\n initialized, `False` otherwise.\n \"\"\"\n return state_ops.is_variable_initialized(variable)\n\n\n@tf_export(\"assert_variables_initialized\")\n@tf_should_use.should_use_result\ndef assert_variables_initialized(var_list=None):\n \"\"\"Returns an Op to check if variables are initialized.\n\n NOTE: This function is obsolete and will be removed in 6 months. Please\n change your implementation to use `report_uninitialized_variables()`.\n\n When run, the returned Op will raise the exception `FailedPreconditionError`\n if any of the variables has not yet been initialized.\n\n Note: This function is implemented by trying to fetch the values of the\n variables. If one of the variables is not initialized a message may be\n logged by the C++ runtime. This is expected.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the\n value of `global_variables().`\n\n Returns:\n An Op, or None if there are no variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n if not var_list:\n return None\n else:\n ranks = []\n for var in var_list:\n with ops.colocate_with(var.op):\n ranks.append(array_ops.rank_internal(var, optimize=False))\n if len(ranks) == 1:\n return ranks[0]\n else:\n return array_ops.stack(ranks)\n\n\n@tf_export(\"report_uninitialized_variables\")\n@tf_should_use.should_use_result\ndef report_uninitialized_variables(var_list=None,\n name=\"report_uninitialized_variables\"):\n \"\"\"Adds ops to list the names of uninitialized variables.\n\n When run, it returns a 1-D tensor containing the names of uninitialized\n variables if there are any, or an empty array if there are none.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the\n value of `global_variables() + local_variables()`\n name: Optional name of the `Operation`.\n\n Returns:\n A 1-D tensor containing names of the uninitialized variables, or an empty\n 1-D tensor if there are no variables or no uninitialized variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n with ops.name_scope(name):\n # Run all operations on CPU\n if var_list:\n init_vars = [state_ops.is_variable_initialized(v) for v in var_list]\n with ops.device(\"/cpu:0\"):\n if not var_list:\n # Return an empty tensor so we only need to check for returned tensor\n # size being 0 as an indication of model ready.\n return array_ops.constant([], dtype=dtypes.string)\n else:\n # Get a 1-D boolean tensor listing whether each variable is initialized.\n variables_mask = math_ops.logical_not(array_ops.stack(init_vars))\n # Get a 1-D string tensor containing all the variable names.\n variable_names_tensor = array_ops.constant(\n [s.op.name for s in var_list])\n # Return a 1-D tensor containing all the names of\n # uninitialized variables.\n return array_ops.boolean_mask(variable_names_tensor, variables_mask)\n\n# pylint: disable=protected-access\nVariable._OverloadAllOperators()\n\nops.register_tensor_conversion_function(\n PartitionedVariable, PartitionedVariable._TensorConversionFunction)\n# pylint: enable=protected-access\n\n\nops.register_dense_tensor_like_type(Variable)\n"
] | [
[
"tensorflow.python.feature_column.feature_column_lib.categorical_column_with_hash_bucket",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.contrib.boosted_trees.python.training.functions.gbdt_batch.GradientBoostedDecisionTreeModel",
"tensorflow.contrib.layers.python.layers.feature_column.sparse_column_with_hash_bucket",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_variable",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.contrib.layers.feature_column._real_valued_var_len_column",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.contrib.boosted_trees.proto.tree_config_pb2.DecisionTreeEnsembleConfig",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.resources.shared_resources",
"tensorflow.python.ops.math_ops.square",
"tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig",
"tensorflow.python.feature_column.feature_column_lib.numeric_column",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_serialize",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_stamp_token",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.boosted_trees.python.utils.losses.per_example_maxent_loss",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.contrib.boosted_trees.python.training.functions.gbdt_batch.extract_features"
],
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.gen_state_ops.scatter_nd_update",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.framework.ops.register_dense_tensor_like_type",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.state_ops.variable_op_v2",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.gen_state_ops.scatter_add",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.ops.gen_state_ops.scatter_sub",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.state_ops.count_up_to",
"tensorflow.core.framework.variable_pb2.SaveSliceInfoDef",
"tensorflow.python.ops.gen_state_ops.scatter_nd_sub",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.ops.prepend_name_scope",
"tensorflow.python.ops.array_ops.rank_internal",
"tensorflow.python.framework.ops.strip_name_scope",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.ops.gen_state_ops.scatter_update",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.state_ops.is_variable_initialized",
"tensorflow.python.ops.gen_state_ops.scatter_nd_add",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.ops._name_from_scope_name",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.core.framework.variable_pb2.VariableDef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kevin1kevin1k/bokeh | [
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477",
"9f34b5b710e2748ec803c12918ec1706098a3477"
] | [
"examples/plotting/file/custom_datetime_axis.py",
"bokeh/util/serialization.py",
"examples/reference/models/Bezier.py",
"sphinx/source/docs/user_guide/examples/styling_legend_border.py",
"examples/integration/webgl/multiple_webgl_plots.py",
"sphinx/source/docs/user_guide/examples/styling_legend_location_outside.py",
"examples/reference/models/Hex.py"
] | [
"import pandas as pd\n\nfrom bokeh.io import show, output_file\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.stocks import MSFT\n\ndf = pd.DataFrame(MSFT)[:51]\ninc = df.close > df.open\ndec = df.open > df.close\n\np = figure(plot_width=1000, title=\"MSFT Candlestick with Custom X-Axis\")\n\n# map dataframe indices to date strings and use as label overrides\np.xaxis.major_label_overrides = {\n i: date.strftime('%b %d') for i, date in enumerate(pd.to_datetime(df[\"date\"]))\n}\np.xaxis.bounds = (0, df.index[-1])\np.x_range.range_padding = 0.05\n\np.segment(df.index, df.high, df.index, df.low, color=\"black\")\np.vbar(df.index[inc], 0.5, df.open[inc], df.close[inc], fill_color=\"#D5E1DD\", line_color=\"black\")\np.vbar(df.index[dec], 0.5, df.open[dec], df.close[dec], fill_color=\"#F2583E\", line_color=\"black\")\n\noutput_file(\"custom_datetime_axis.html\", title=\"custom_datetime_axis.py example\")\n\nshow(p)\n",
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\nFunctions for helping with serialization and deserialization of\nBokeh objects.\n\nCertain NumPy array dtypes can be serialized to a binary format for\nperformance and efficiency. The list of supported dtypes is:\n\n{binary_array_types}\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport base64\nimport datetime as dt\nimport math\nimport sys\nfrom threading import Lock\nimport uuid\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom ..settings import settings\nfrom .string import format_docstring\nfrom .dependencies import import_optional\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\npd = import_optional('pandas')\n\nBINARY_ARRAY_TYPES = set([\n np.dtype(np.float32),\n np.dtype(np.float64),\n np.dtype(np.uint8),\n np.dtype(np.int8),\n np.dtype(np.uint16),\n np.dtype(np.int16),\n np.dtype(np.uint32),\n np.dtype(np.int32),\n])\n\nDATETIME_TYPES = set([\n dt.datetime,\n dt.date,\n dt.time,\n np.datetime64,\n])\n\nif pd:\n try:\n _pd_timestamp = pd.Timestamp\n except AttributeError:\n _pd_timestamp = pd.tslib.Timestamp\n DATETIME_TYPES.add(_pd_timestamp)\n DATETIME_TYPES.add(pd.Timedelta)\n DATETIME_TYPES.add(pd.Period)\n DATETIME_TYPES.add(type(pd.NaT))\n\nNP_EPOCH = np.datetime64(0, 'ms')\nNP_MS_DELTA = np.timedelta64(1, 'ms')\n\nDT_EPOCH = dt.datetime.utcfromtimestamp(0)\n\n__doc__ = format_docstring(__doc__, binary_array_types=\"\\n\".join(\"* ``np.\" + str(x) + \"``\" for x in BINARY_ARRAY_TYPES))\n\n__all__ = (\n 'array_encoding_disabled',\n 'convert_datetime_array',\n 'convert_datetime_type',\n 'convert_timedelta_type',\n 'decode_base64_dict',\n 'encode_binary_dict',\n 'encode_base64_dict',\n 'is_datetime_type',\n 'is_timedelta_type',\n 'make_globally_unique_id',\n 'make_id',\n 'serialize_array',\n 'transform_array',\n 'transform_array_to_list',\n 'transform_column_source_data',\n 'traverse_data',\n 'transform_series',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\ndef is_datetime_type(obj):\n ''' Whether an object is any date, time, or datetime type recognized by\n Bokeh.\n\n Arg:\n obj (object) : the object to test\n\n Returns:\n bool : True if ``obj`` is a datetime type\n\n '''\n return isinstance(obj, _dt_tuple)\n\ndef is_timedelta_type(obj):\n ''' Whether an object is any timedelta type recognized by Bokeh.\n\n Arg:\n obj (object) : the object to test\n\n Returns:\n bool : True if ``obj`` is a timedelta type\n\n '''\n return isinstance(obj, (dt.timedelta, np.timedelta64))\n\ndef convert_timedelta_type(obj):\n ''' Convert any recognized timedelta value to floating point absolute\n milliseconds.\n\n Arg:\n obj (object) : the object to convert\n\n Returns:\n float : milliseconds\n\n '''\n if isinstance(obj, dt.timedelta):\n return obj.total_seconds() * 1000.\n elif isinstance(obj, np.timedelta64):\n return (obj / NP_MS_DELTA)\n\ndef convert_datetime_type(obj):\n ''' Convert any recognized date, time, or datetime value to floating point\n milliseconds since epoch.\n\n Arg:\n obj (object) : the object to convert\n\n Returns:\n float : milliseconds\n\n '''\n # Pandas NaT\n if pd and obj is pd.NaT:\n return np.nan\n\n # Pandas Period\n if pd and isinstance(obj, pd.Period):\n return obj.to_timestamp().value / 10**6.0\n\n # Pandas Timestamp\n if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0\n\n # Pandas Timedelta\n elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0\n\n # Datetime (datetime is a subclass of date)\n elif isinstance(obj, dt.datetime):\n diff = obj.replace(tzinfo=None) - DT_EPOCH\n return diff.total_seconds() * 1000.\n\n # Date\n elif isinstance(obj, dt.date):\n return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000\n\n # NumPy datetime64\n elif isinstance(obj, np.datetime64):\n epoch_delta = obj - NP_EPOCH\n return (epoch_delta / NP_MS_DELTA)\n\n # Time\n elif isinstance(obj, dt.time):\n return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.\n\ndef convert_datetime_array(array):\n ''' Convert NumPy datetime arrays to arrays to milliseconds since epoch.\n\n Args:\n array : (obj)\n A NumPy array of datetime to convert\n\n If the value passed in is not a NumPy array, it will be returned as-is.\n\n Returns:\n array\n\n '''\n\n if not isinstance(array, np.ndarray):\n return array\n\n try:\n dt2001 = np.datetime64('2001')\n legacy_datetime64 = (dt2001.astype('int64') ==\n dt2001.astype('datetime64[ms]').astype('int64'))\n except AttributeError as e:\n if e.args == (\"'module' object has no attribute 'datetime64'\",):\n # for compatibility with PyPy that doesn't have datetime64\n if 'PyPy' in sys.version:\n legacy_datetime64 = False\n pass\n else:\n raise e\n else:\n raise e\n\n # not quite correct, truncates to ms..\n if array.dtype.kind == 'M':\n if legacy_datetime64:\n if array.dtype == np.dtype('datetime64[ns]'):\n array = array.astype('int64') / 10**6.0\n else:\n array = array.astype('datetime64[us]').astype('int64') / 1000.\n\n elif array.dtype.kind == 'm':\n array = array.astype('timedelta64[us]').astype('int64') / 1000.\n\n return array\n\ndef make_id():\n ''' Return a new unique ID for a Bokeh object.\n\n Normally this function will return simple monotonically increasing integer\n IDs (as strings) for identifying Bokeh objects within a Document. However,\n if it is desirable to have globally unique for every object, this behavior\n can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.\n\n Returns:\n str\n\n '''\n global _simple_id\n\n if settings.simple_ids():\n with _simple_id_lock:\n _simple_id += 1\n return str(_simple_id)\n else:\n return make_globally_unique_id()\n\ndef make_globally_unique_id():\n ''' Return a globally unique UUID.\n\n Some situations, e.g. id'ing dynamically created Divs in HTML documents,\n always require globally unique IDs.\n\n Returns:\n str\n\n '''\n return str(uuid.uuid4())\n\ndef array_encoding_disabled(array):\n ''' Determine whether an array may be binary encoded.\n\n The NumPy array dtypes that can be encoded are:\n\n {binary_array_types}\n\n Args:\n array (np.ndarray) : the array to check\n\n Returns:\n bool\n\n '''\n\n # disable binary encoding for non-supported dtypes\n return array.dtype not in BINARY_ARRAY_TYPES\n\narray_encoding_disabled.__doc__ = format_docstring(array_encoding_disabled.__doc__,\n binary_array_types=\"\\n \".join(\"* ``np.\" + str(x) + \"``\"\n for x in BINARY_ARRAY_TYPES))\n\ndef transform_array(array, force_list=False, buffers=None):\n ''' Transform a NumPy arrays into serialized format\n\n Converts un-serializable dtypes and returns JSON serializable\n format\n\n Args:\n array (np.ndarray) : a NumPy array to be transformed\n force_list (bool, optional) : whether to only output to standard lists\n This function can encode some dtypes using a binary encoding, but\n setting this argument to True will override that and cause only\n standard Python lists to be emitted. (default: False)\n\n buffers (set, optional) :\n If binary buffers are desired, the buffers parameter may be\n provided, and any columns that may be sent as binary buffers\n will be added to the set. If None, then only base64 encoding\n will be used (default: None)\n\n If force_list is True, then this value will be ignored, and\n no buffers will be generated.\n\n **This is an \"out\" parameter**. The values it contains will be\n modified in-place.\n\n\n Returns:\n JSON\n\n '''\n\n array = convert_datetime_array(array)\n\n return serialize_array(array, force_list=force_list, buffers=buffers)\n\ndef transform_array_to_list(array):\n ''' Transforms a NumPy array into a list of values\n\n Args:\n array (np.nadarray) : the NumPy array series to transform\n\n Returns:\n list or dict\n\n '''\n if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()):\n transformed = array.astype('object')\n transformed[np.isnan(array)] = 'NaN'\n transformed[np.isposinf(array)] = 'Infinity'\n transformed[np.isneginf(array)] = '-Infinity'\n return transformed.tolist()\n elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()):\n transformed = array.astype('object')\n transformed[pd.isnull(array)] = 'NaN'\n return transformed.tolist()\n return array.tolist()\n\ndef transform_series(series, force_list=False, buffers=None):\n ''' Transforms a Pandas series into serialized form\n\n Args:\n series (pd.Series) : the Pandas series to transform\n force_list (bool, optional) : whether to only output to standard lists\n This function can encode some dtypes using a binary encoding, but\n setting this argument to True will override that and cause only\n standard Python lists to be emitted. (default: False)\n\n buffers (set, optional) :\n If binary buffers are desired, the buffers parameter may be\n provided, and any columns that may be sent as binary buffers\n will be added to the set. If None, then only base64 encoding\n will be used (default: None)\n\n If force_list is True, then this value will be ignored, and\n no buffers will be generated.\n\n **This is an \"out\" parameter**. The values it contains will be\n modified in-place.\n\n Returns:\n list or dict\n\n '''\n # not checking for pd here, this function should only be called if it\n # is already known that series is a Pandas Series type\n if isinstance(series, pd.PeriodIndex):\n vals = series.to_timestamp().values\n else:\n vals = series.values\n return transform_array(vals, force_list=force_list, buffers=buffers)\n\ndef serialize_array(array, force_list=False, buffers=None):\n ''' Transforms a NumPy array into serialized form.\n\n Args:\n array (np.ndarray) : the NumPy array to transform\n force_list (bool, optional) : whether to only output to standard lists\n This function can encode some dtypes using a binary encoding, but\n setting this argument to True will override that and cause only\n standard Python lists to be emitted. (default: False)\n\n buffers (set, optional) :\n If binary buffers are desired, the buffers parameter may be\n provided, and any columns that may be sent as binary buffers\n will be added to the set. If None, then only base64 encoding\n will be used (default: None)\n\n If force_list is True, then this value will be ignored, and\n no buffers will be generated.\n\n **This is an \"out\" parameter**. The values it contains will be\n modified in-place.\n\n Returns:\n list or dict\n\n '''\n if isinstance(array, np.ma.MaskedArray):\n array = array.filled(np.nan) # Set masked values to nan\n if (array_encoding_disabled(array) or force_list):\n return transform_array_to_list(array)\n if not array.flags['C_CONTIGUOUS']:\n array = np.ascontiguousarray(array)\n if buffers is None:\n return encode_base64_dict(array)\n else:\n return encode_binary_dict(array, buffers)\n\ndef traverse_data(obj, use_numpy=True, buffers=None):\n ''' Recursively traverse an object until a flat list is found.\n\n If NumPy is available, the flat list is converted to a numpy array\n and passed to transform_array() to handle ``nan``, ``inf``, and\n ``-inf``.\n\n Otherwise, iterate through all items, converting non-JSON items\n\n Args:\n obj (list) : a list of values or lists\n use_numpy (bool, optional) toggle NumPy as a dependency for testing\n This argument is only useful for testing (default: True)\n '''\n if use_numpy and all(isinstance(el, np.ndarray) for el in obj):\n return [transform_array(el, buffers=buffers) for el in obj]\n obj_copy = []\n for item in obj:\n # Check the base/common case first for performance reasons\n # Also use type(x) is float because it's faster than isinstance\n if type(item) is float:\n if math.isnan(item):\n item = 'NaN'\n elif math.isinf(item):\n if item > 0:\n item = 'Infinity'\n else:\n item = '-Infinity'\n obj_copy.append(item)\n elif isinstance(item, (list, tuple)): # check less common type second\n obj_copy.append(traverse_data(item))\n else:\n obj_copy.append(item)\n return obj_copy\n\ndef transform_column_source_data(data, buffers=None, cols=None):\n ''' Transform ``ColumnSourceData`` data to a serialized format\n\n Args:\n data (dict) : the mapping of names to data columns to transform\n\n buffers (set, optional) :\n If binary buffers are desired, the buffers parameter may be\n provided, and any columns that may be sent as binary buffers\n will be added to the set. If None, then only base64 encoding\n will be used (default: None)\n\n **This is an \"out\" parameter**. The values it contains will be\n modified in-place.\n\n cols (list[str], optional) :\n Optional list of subset of columns to transform. If None, all\n columns will be transformed (default: None)\n\n Returns:\n JSON compatible dict\n\n '''\n to_transform = set(data) if cols is None else set(cols)\n\n data_copy = {}\n for key in to_transform:\n if pd and isinstance(data[key], (pd.Series, pd.Index)):\n data_copy[key] = transform_series(data[key], buffers=buffers)\n elif isinstance(data[key], np.ndarray):\n data_copy[key] = transform_array(data[key], buffers=buffers)\n else:\n data_copy[key] = traverse_data(data[key], buffers=buffers)\n\n return data_copy\n\ndef encode_binary_dict(array, buffers):\n ''' Send a numpy array as an unencoded binary buffer\n\n The encoded format is a dict with the following structure:\n\n .. code:: python\n\n {\n '__buffer__' : << an ID to locate the buffer >>,\n 'shape' : << array shape >>,\n 'dtype' : << dtype name >>,\n 'order' : << byte order at origin (little or big)>>\n }\n\n Args:\n array (np.ndarray) : an array to encode\n\n buffers (set) :\n Set to add buffers to\n\n **This is an \"out\" parameter**. The values it contains will be\n modified in-place.\n\n Returns:\n dict\n\n '''\n buffer_id = make_id()\n buf = (dict(id=buffer_id), array.tobytes())\n buffers.append(buf)\n\n return {\n '__buffer__' : buffer_id,\n 'shape' : array.shape,\n 'dtype' : array.dtype.name,\n 'order' : sys.byteorder\n }\n\ndef encode_base64_dict(array):\n ''' Encode a NumPy array using base64:\n\n The encoded format is a dict with the following structure:\n\n .. code:: python\n\n {\n '__ndarray__' : << base64 encoded array data >>,\n 'shape' : << array shape >>,\n 'dtype' : << dtype name >>,\n }\n\n Args:\n\n array (np.ndarray) : an array to encode\n\n Returns:\n dict\n\n '''\n return {\n '__ndarray__' : base64.b64encode(array.data).decode('utf-8'),\n 'shape' : array.shape,\n 'dtype' : array.dtype.name\n }\n\ndef decode_base64_dict(data):\n ''' Decode a base64 encoded array into a NumPy array.\n\n Args:\n data (dict) : encoded array data to decode\n\n Data should have the format encoded by :func:`encode_base64_dict`.\n\n Returns:\n np.ndarray\n\n '''\n b64 = base64.b64decode(data['__ndarray__'])\n array = np.copy(np.frombuffer(b64, dtype=data['dtype']))\n if len(data['shape']) > 1:\n array = array.reshape(data['shape'])\n return array\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n_simple_id = 999\n_simple_id_lock = Lock()\n\n_dt_tuple = tuple(DATETIME_TYPES)\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n",
"import numpy as np\n\nfrom bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid\nfrom bokeh.models.glyphs import Bezier\nfrom bokeh.io import curdoc, show\n\nN = 9\nx = np.linspace(-2, 2, N)\ny = x**2\n\nsource = ColumnDataSource(dict(\n x=x,\n y=y,\n xp02=x+0.4,\n xp01=x+0.1,\n xm01=x-0.1,\n yp01=y+0.2,\n ym01=y-0.2,\n )\n)\n\nplot = Plot(\n title=None, plot_width=300, plot_height=300,\n min_border=0, toolbar_location=None)\n\nglyph = Bezier(x0=\"x\", y0=\"y\", x1=\"xp02\", y1=\"y\", cx0=\"xp01\", cy0=\"yp01\", cx1=\"xm01\", cy1=\"ym01\", line_color=\"#d95f02\", line_width=2)\nplot.add_glyph(source, glyph)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis, 'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ncurdoc().add_root(plot)\n\nshow(plot)\n",
"import numpy as np\nfrom bokeh.plotting import output_file, figure, show\n\nx = np.linspace(0, 4*np.pi, 100)\ny = np.sin(x)\n\noutput_file(\"legend_border.html\")\n\np = figure()\n\np.circle(x, y, legend=\"sin(x)\")\np.line(x, y, legend=\"sin(x)\")\n\np.line(x, 2*y, legend=\"2*sin(x)\",\n line_dash=[4, 4], line_color=\"orange\", line_width=2)\n\np.square(x, 3*y, legend=\"3*sin(x)\", fill_color=None, line_color=\"green\")\np.line(x, 3*y, legend=\"3*sin(x)\", line_color=\"green\")\n\np.legend.border_line_width = 3\np.legend.border_line_color = \"navy\"\np.legend.border_line_alpha = 0.5\n\nshow(p)\n",
"import numpy as np\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\nN = 10000\n\nx = np.random.normal(0, np.pi, N)\ny = np.sin(x) + np.random.normal(0, 0.2, N)\n\np1 = figure(output_backend=\"webgl\")\np1.circle(x, y, fill_alpha=0.1)\n\np2 = figure(output_backend=\"webgl\")\np2.circle(x, y, fill_alpha=0.1)\n\nx = np.linspace(0, 10*np.pi, N)\ny = np.cos(x) + np.sin(2*x+1.25) + np.random.normal(0, 0.001, (N, ))\n\np3 = figure(output_backend=\"webgl\")\np3.line(x, y, color=\"#22aa22\", line_width=3)\n\np4 = figure(output_backend=\"webgl\")\np4.line(x, y, color=\"#22aa22\", line_width=3)\n\nsave(gridplot([[p1, p2], [p3, p4]]))\n",
"import numpy as np\nfrom bokeh.models import Legend\nfrom bokeh.plotting import figure, show, output_file\n\nx = np.linspace(0, 4*np.pi, 100)\ny = np.sin(x)\n\noutput_file(\"legend_labels.html\")\n\np = figure(toolbar_location=\"above\")\n\nr0 = p.circle(x, y)\nr1 = p.line(x, y)\n\nr2 = p.line(x, 2*y, line_dash=[4, 4], line_color=\"orange\", line_width=2)\n\nr3 = p.square(x, 3*y, fill_color=None, line_color=\"green\")\nr4 = p.line(x, 3*y, line_color=\"green\")\n\nlegend = Legend(items=[\n (\"sin(x)\" , [r0, r1]),\n (\"2*sin(x)\" , [r2]),\n (\"3*sin(x)\" , [r3, r4]),\n], location=\"center\")\n\np.add_layout(legend, 'right')\n\nshow(p)\n",
"import numpy as np\n\nfrom bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid\nfrom bokeh.models.markers import Hex\nfrom bokeh.io import curdoc, show\n\nN = 9\nx = np.linspace(-2, 2, N)\ny = x**2\nsizes = np.linspace(10, 20, N)\n\nsource = ColumnDataSource(dict(x=x, y=y, sizes=sizes))\n\nplot = Plot(\n title=None, plot_width=300, plot_height=300,\n min_border=0, toolbar_location=None)\n\nglyph = Hex(x=\"x\", y=\"y\", size=\"sizes\", fill_color=\"#f0027f\")\nplot.add_glyph(source, glyph)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis, 'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ncurdoc().add_root(plot)\n\nshow(plot)\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
],
[
"numpy.isfinite",
"numpy.ascontiguousarray",
"numpy.isnan",
"numpy.isposinf",
"numpy.dtype",
"numpy.datetime64",
"numpy.timedelta64",
"numpy.frombuffer",
"numpy.isneginf"
],
[
"numpy.linspace"
],
[
"numpy.linspace",
"numpy.sin"
],
[
"numpy.random.normal",
"numpy.cos",
"numpy.linspace",
"numpy.sin"
],
[
"numpy.linspace",
"numpy.sin"
],
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
korolm/kaggle-mlcourse | [
"94df0346cfcf9412cd150e1b3ca5239cbe6c9521"
] | [
"assignment2/alice/features/sites.py"
] | [
"from scipy.sparse import csr_matrix\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport re\n\nfrom joblib import Memory\n\ncachedir = 'cache/'\nmemory = Memory(cachedir, verbose=0)\n\npath_to_site_dict = 'data/site_dic.pkl'\n\n\ndef load_site_dict():\n with open(path_to_site_dict, 'rb') as f:\n site2id = pickle.load(f)\n id2site = {v: k for (k, v) in site2id.items()}\n # we treat site with id 0 as \"unknown\"\n id2site[0] = 'unknown'\n return id2site\n\n\nsites = ['site%s' % i for i in range(1, 11)]\nid2site = load_site_dict()\n\n\ndef transform_to_txt_format(train_df, test_df):\n train_file = 'tmp/train_sessions_text.txt'\n test_file = 'tmp/test_sessions_text.txt'\n sites = ['site%s' % i for i in range(1, 11)]\n train_df[sites].fillna(0).astype('int').to_csv(train_file,\n sep=' ',\n index=None, header=None)\n test_df[sites].fillna(0).astype('int').to_csv(test_file,\n sep=' ',\n index=None, header=None)\n return train_file, test_file\n\n\[email protected]\ndef f_sites(train_df, test_df, ngram_range=(1, 3)):\n train_file, test_file = transform_to_txt_format(train_df, test_df)\n cv = CountVectorizer(ngram_range=ngram_range, max_features=50000)\n with open(train_file) as inp_train_file:\n X_train = cv.fit_transform(inp_train_file)\n with open(test_file) as inp_test_file:\n X_test = cv.transform(inp_test_file)\n return X_train, X_test#, cv.get_feature_names()\n\n\[email protected]\ndef f_tfidf_sites(train_df, test_df, ngram_range=(1, 5), sub=False, max_features=50000):\n def join_row(row):\n return ' '.join([id2site[i] for i in row])\n\n train_sessions = train_df[sites].fillna(0).astype('int').apply(join_row, axis=1)\n test_sessions = test_df[sites].fillna(0).astype('int').apply(join_row, axis=1)\n\n vectorizer = TfidfVectorizer(ngram_range=ngram_range,\n max_features=max_features,\n tokenizer=lambda s: s.split())\n X_train = vectorizer.fit_transform(train_sessions)\n X_test = vectorizer.transform(test_sessions)\n return X_train, X_test#, vectorizer.get_feature_names()\n\n\[email protected]\ndef time_sites(train_df, test_df, ngram_range=(1, 5), max_features=50000):\n time_diff = ['time_diff_%s' % i for i in range(1, 11)]\n\n def est_session_length(s):\n if s <= 5:\n return 'small'\n if 6 <= s <= 30:\n return 'medium'\n if 31 <= s <= 90:\n return 'large'\n if 91 <= s:\n return 'extra-large'\n\n def join_row_with_time(row):\n # str_sites = []\n # for i in range(1, 11):\n # site_id = row['site%s' % i]\n # if np.isnan(site_id):\n # site_str = 'no_site'\n # else:\n # site_str = str(id2site[row['site%s' % i]])\n # diff_str = str(row['time_diff_%s' % i])\n # str_sites.append(site_str + '_' + diff_str)\n return ' '.join(['no_site' + '_' + str(row['time_diff_%s' % i])\n if np.isnan(row['site%s' % i])\n else str(id2site[row['site%s' % i]]) + '_' + str(row['time_diff_%s' % i])\n for i in range(1, 11)])\n\n for t in range(1, 10):\n train_df['time_diff_' + str(t)] = (\n (train_df['time' + str(t + 1)] - train_df['time' + str(t)]) / np.timedelta64(1, 's')).apply(\n est_session_length)\n test_df['time_diff_' + str(t)] = (\n (test_df['time' + str(t + 1)] - test_df['time' + str(t)]) / np.timedelta64(1, 's')).apply(\n est_session_length)\n\n train_df['time_diff_10'] = None\n test_df['time_diff_10'] = None\n\n train_df[sites].fillna(0).astype('int')\n test_df[sites].fillna(0).astype('int')\n train_sessions = train_df[sites + time_diff].apply(join_row_with_time, axis=1)\n\n test_sessions = test_df[sites + time_diff].apply(join_row_with_time, axis=1)\n\n vectorizer = TfidfVectorizer(ngram_range=ngram_range,\n max_features=max_features,\n tokenizer=lambda s: s.split())\n X_train = vectorizer.fit_transform(train_sessions)\n X_test = vectorizer.transform(test_sessions)\n return X_train, X_test#, vectorizer.get_feature_names()\n\n\ndef count_not_zeros(x):\n unique = set(x)\n if 0 in unique:\n unique.discard(0)\n return len(unique)\n\n\nunique_sites = lambda df: np.array([count_not_zeros(x) for x in df[sites].values]).reshape(-1, 1)\n\n\ndef f_unique(traim_df, test_df):\n return unique_sites(traim_df), unique_sites(test_df), ['unique']\n\ndef extract_unique(df):\n data = df[sites].fillna(0).astype('int')\n return csr_matrix([[sum(1 for s in np.unique(row.values) if s != 0)] for _, row in data.iterrows()])\n"
] | [
[
"numpy.isnan",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.unique",
"numpy.timedelta64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jalexvig/imitating_optimizer | [
"c0a62869ae678a62df9d13d1007efa0e531c6c3c"
] | [
"src/models/mnist.py"
] | [
"import os\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\n\nfrom src.models.base import BaseModel\n\n\nclass MNIST(BaseModel):\n\n def _setup(self):\n\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n\n return x\n\n def get_criterion(self):\n\n return nn.CrossEntropyLoss()\n\n def get_data_gen(self, batch_size, train=True):\n\n dpath_data = os.path.join(\n os.path.dirname(__file__),\n '..',\n '..',\n 'data'\n )\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(dpath_data, train=train, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True)\n\n return iter(train_loader)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout2d",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anzhao920/MicrosoftProject15_Invictus | [
"15f44eebb09561acbbe7b6730dfadf141e4c166d"
] | [
"COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/numpy/core/tests/test_dtype.py"
] | [
"import sys\r\nimport operator\r\nimport pytest\r\nimport ctypes\r\nimport gc\r\n\r\nimport numpy as np\r\nfrom numpy.core._rational_tests import rational\r\nfrom numpy.core._multiarray_tests import create_custom_field_dtype\r\nfrom numpy.testing import (\r\n assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)\r\nfrom numpy.compat import pickle\r\nfrom itertools import permutations\r\n\r\ndef assert_dtype_equal(a, b):\r\n assert_equal(a, b)\r\n assert_equal(hash(a), hash(b),\r\n \"two equivalent types do not hash to the same value !\")\r\n\r\ndef assert_dtype_not_equal(a, b):\r\n assert_(a != b)\r\n assert_(hash(a) != hash(b),\r\n \"two different types hash to the same value !\")\r\n\r\nclass TestBuiltin:\r\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\r\n np.compat.unicode])\r\n def test_run(self, t):\r\n \"\"\"Only test hash runs at all.\"\"\"\r\n dt = np.dtype(t)\r\n hash(dt)\r\n\r\n @pytest.mark.parametrize('t', [int, float])\r\n def test_dtype(self, t):\r\n # Make sure equivalent byte order char hash the same (e.g. < and = on\r\n # little endian)\r\n dt = np.dtype(t)\r\n dt2 = dt.newbyteorder(\"<\")\r\n dt3 = dt.newbyteorder(\">\")\r\n if dt == dt2:\r\n assert_(dt.byteorder != dt2.byteorder, \"bogus test\")\r\n assert_dtype_equal(dt, dt2)\r\n else:\r\n assert_(dt.byteorder != dt3.byteorder, \"bogus test\")\r\n assert_dtype_equal(dt, dt3)\r\n\r\n def test_equivalent_dtype_hashing(self):\r\n # Make sure equivalent dtypes with different type num hash equal\r\n uintp = np.dtype(np.uintp)\r\n if uintp.itemsize == 4:\r\n left = uintp\r\n right = np.dtype(np.uint32)\r\n else:\r\n left = uintp\r\n right = np.dtype(np.ulonglong)\r\n assert_(left == right)\r\n assert_(hash(left) == hash(right))\r\n\r\n def test_invalid_types(self):\r\n # Make sure invalid type strings raise an error\r\n\r\n assert_raises(TypeError, np.dtype, 'O3')\r\n assert_raises(TypeError, np.dtype, 'O5')\r\n assert_raises(TypeError, np.dtype, 'O7')\r\n assert_raises(TypeError, np.dtype, 'b3')\r\n assert_raises(TypeError, np.dtype, 'h4')\r\n assert_raises(TypeError, np.dtype, 'I5')\r\n assert_raises(TypeError, np.dtype, 'e3')\r\n assert_raises(TypeError, np.dtype, 'f5')\r\n\r\n if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:\r\n assert_raises(TypeError, np.dtype, 'g12')\r\n elif np.dtype('g').itemsize == 12:\r\n assert_raises(TypeError, np.dtype, 'g16')\r\n\r\n if np.dtype('l').itemsize == 8:\r\n assert_raises(TypeError, np.dtype, 'l4')\r\n assert_raises(TypeError, np.dtype, 'L4')\r\n else:\r\n assert_raises(TypeError, np.dtype, 'l8')\r\n assert_raises(TypeError, np.dtype, 'L8')\r\n\r\n if np.dtype('q').itemsize == 8:\r\n assert_raises(TypeError, np.dtype, 'q4')\r\n assert_raises(TypeError, np.dtype, 'Q4')\r\n else:\r\n assert_raises(TypeError, np.dtype, 'q8')\r\n assert_raises(TypeError, np.dtype, 'Q8')\r\n\r\n @pytest.mark.parametrize(\"dtype\",\r\n ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',\r\n 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',\r\n 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',\r\n \"Float128\", \"Complex128\"])\r\n def test_numeric_style_types_are_invalid(self, dtype):\r\n with assert_raises(TypeError):\r\n np.dtype(dtype)\r\n\r\n @pytest.mark.parametrize(\r\n 'value',\r\n ['m8', 'M8', 'datetime64', 'timedelta64',\r\n 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',\r\n '>f', '<f', '=f', '|f',\r\n ])\r\n def test_dtype_bytes_str_equivalence(self, value):\r\n bytes_value = value.encode('ascii')\r\n from_bytes = np.dtype(bytes_value)\r\n from_str = np.dtype(value)\r\n assert_dtype_equal(from_bytes, from_str)\r\n\r\n def test_dtype_from_bytes(self):\r\n # Empty bytes object\r\n assert_raises(TypeError, np.dtype, b'')\r\n # Byte order indicator, but no type\r\n assert_raises(TypeError, np.dtype, b'|')\r\n\r\n # Single character with ordinal < NPY_NTYPES returns\r\n # type by index into _builtin_descrs\r\n assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))\r\n assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))\r\n\r\n # Single character where value is a valid type code\r\n assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))\r\n\r\n # Bytes with non-ascii values raise errors\r\n assert_raises(TypeError, np.dtype, b'\\xff')\r\n assert_raises(TypeError, np.dtype, b's\\xff')\r\n\r\n def test_bad_param(self):\r\n # Can't give a size that's too small\r\n assert_raises(ValueError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['i4', 'i1'],\r\n 'offsets':[0, 4],\r\n 'itemsize':4})\r\n # If alignment is enabled, the alignment (4) must divide the itemsize\r\n assert_raises(ValueError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['i4', 'i1'],\r\n 'offsets':[0, 4],\r\n 'itemsize':9}, align=True)\r\n # If alignment is enabled, the individual fields must be aligned\r\n assert_raises(ValueError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['i1', 'f4'],\r\n 'offsets':[0, 2]}, align=True)\r\n\r\n def test_field_order_equality(self):\r\n x = np.dtype({'names': ['A', 'B'],\r\n 'formats': ['i4', 'f4'],\r\n 'offsets': [0, 4]})\r\n y = np.dtype({'names': ['B', 'A'],\r\n 'formats': ['f4', 'i4'],\r\n 'offsets': [4, 0]})\r\n assert_equal(x == y, False)\r\n # But it is currently an equivalent cast:\r\n assert np.can_cast(x, y, casting=\"equiv\")\r\n\r\n\r\nclass TestRecord:\r\n def test_equivalent_record(self):\r\n \"\"\"Test whether equivalent record dtypes hash the same.\"\"\"\r\n a = np.dtype([('yo', int)])\r\n b = np.dtype([('yo', int)])\r\n assert_dtype_equal(a, b)\r\n\r\n def test_different_names(self):\r\n # In theory, they may hash the same (collision) ?\r\n a = np.dtype([('yo', int)])\r\n b = np.dtype([('ye', int)])\r\n assert_dtype_not_equal(a, b)\r\n\r\n def test_different_titles(self):\r\n # In theory, they may hash the same (collision) ?\r\n a = np.dtype({'names': ['r', 'b'],\r\n 'formats': ['u1', 'u1'],\r\n 'titles': ['Red pixel', 'Blue pixel']})\r\n b = np.dtype({'names': ['r', 'b'],\r\n 'formats': ['u1', 'u1'],\r\n 'titles': ['RRed pixel', 'Blue pixel']})\r\n assert_dtype_not_equal(a, b)\r\n\r\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\r\n def test_refcount_dictionary_setting(self):\r\n names = [\"name1\"]\r\n formats = [\"f8\"]\r\n titles = [\"t1\"]\r\n offsets = [0]\r\n d = dict(names=names, formats=formats, titles=titles, offsets=offsets)\r\n refcounts = {k: sys.getrefcount(i) for k, i in d.items()}\r\n np.dtype(d)\r\n refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}\r\n assert refcounts == refcounts_new\r\n\r\n def test_mutate(self):\r\n # Mutating a dtype should reset the cached hash value\r\n a = np.dtype([('yo', int)])\r\n b = np.dtype([('yo', int)])\r\n c = np.dtype([('ye', int)])\r\n assert_dtype_equal(a, b)\r\n assert_dtype_not_equal(a, c)\r\n a.names = ['ye']\r\n assert_dtype_equal(a, c)\r\n assert_dtype_not_equal(a, b)\r\n state = b.__reduce__()[2]\r\n a.__setstate__(state)\r\n assert_dtype_equal(a, b)\r\n assert_dtype_not_equal(a, c)\r\n\r\n def test_not_lists(self):\r\n \"\"\"Test if an appropriate exception is raised when passing bad values to\r\n the dtype constructor.\r\n \"\"\"\r\n assert_raises(TypeError, np.dtype,\r\n dict(names={'A', 'B'}, formats=['f8', 'i4']))\r\n assert_raises(TypeError, np.dtype,\r\n dict(names=['A', 'B'], formats={'f8', 'i4'}))\r\n\r\n def test_aligned_size(self):\r\n # Check that structured dtypes get padded to an aligned size\r\n dt = np.dtype('i4, i1', align=True)\r\n assert_equal(dt.itemsize, 8)\r\n dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)\r\n assert_equal(dt.itemsize, 8)\r\n dt = np.dtype({'names':['f0', 'f1'],\r\n 'formats':['i4', 'u1'],\r\n 'offsets':[0, 4]}, align=True)\r\n assert_equal(dt.itemsize, 8)\r\n dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)\r\n assert_equal(dt.itemsize, 8)\r\n # Nesting should preserve that alignment\r\n dt1 = np.dtype([('f0', 'i4'),\r\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\r\n ('f2', 'i1')], align=True)\r\n assert_equal(dt1.itemsize, 20)\r\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\r\n 'formats':['i4',\r\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\r\n 'i1'],\r\n 'offsets':[0, 4, 16]}, align=True)\r\n assert_equal(dt2.itemsize, 20)\r\n dt3 = np.dtype({'f0': ('i4', 0),\r\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\r\n 'f2': ('i1', 16)}, align=True)\r\n assert_equal(dt3.itemsize, 20)\r\n assert_equal(dt1, dt2)\r\n assert_equal(dt2, dt3)\r\n # Nesting should preserve packing\r\n dt1 = np.dtype([('f0', 'i4'),\r\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\r\n ('f2', 'i1')], align=False)\r\n assert_equal(dt1.itemsize, 11)\r\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\r\n 'formats':['i4',\r\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\r\n 'i1'],\r\n 'offsets':[0, 4, 10]}, align=False)\r\n assert_equal(dt2.itemsize, 11)\r\n dt3 = np.dtype({'f0': ('i4', 0),\r\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\r\n 'f2': ('i1', 10)}, align=False)\r\n assert_equal(dt3.itemsize, 11)\r\n assert_equal(dt1, dt2)\r\n assert_equal(dt2, dt3)\r\n # Array of subtype should preserve alignment\r\n dt1 = np.dtype([('a', '|i1'),\r\n ('b', [('f0', '<i2'),\r\n ('f1', '<f4')], 2)], align=True)\r\n assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),\r\n ('b', [('f0', '<i2'), ('', '|V2'),\r\n ('f1', '<f4')], (2,))])\r\n\r\n def test_union_struct(self):\r\n # Should be able to create union dtypes\r\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\r\n 'offsets':[0, 0, 2]}, align=True)\r\n assert_equal(dt.itemsize, 4)\r\n a = np.array([3], dtype='<u4').view(dt)\r\n a['f1'] = 10\r\n a['f2'] = 36\r\n assert_equal(a['f0'], 10 + 36*256*256)\r\n # Should be able to specify fields out of order\r\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\r\n 'offsets':[4, 0, 2]}, align=True)\r\n assert_equal(dt.itemsize, 8)\r\n # field name should not matter: assignment is by position\r\n dt2 = np.dtype({'names':['f2', 'f0', 'f1'],\r\n 'formats':['<u4', '<u2', '<u2'],\r\n 'offsets':[4, 0, 2]}, align=True)\r\n vals = [(0, 1, 2), (3, -1, 4)]\r\n vals2 = [(0, 1, 2), (3, -1, 4)]\r\n a = np.array(vals, dt)\r\n b = np.array(vals2, dt2)\r\n assert_equal(a.astype(dt2), b)\r\n assert_equal(b.astype(dt), a)\r\n assert_equal(a.view(dt2), b)\r\n assert_equal(b.view(dt), a)\r\n # Should not be able to overlap objects with other types\r\n assert_raises(TypeError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['O', 'i1'],\r\n 'offsets':[0, 2]})\r\n assert_raises(TypeError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['i4', 'O'],\r\n 'offsets':[0, 3]})\r\n assert_raises(TypeError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':[[('a', 'O')], 'i1'],\r\n 'offsets':[0, 2]})\r\n assert_raises(TypeError, np.dtype,\r\n {'names':['f0', 'f1'],\r\n 'formats':['i4', [('a', 'O')]],\r\n 'offsets':[0, 3]})\r\n # Out of order should still be ok, however\r\n dt = np.dtype({'names':['f0', 'f1'],\r\n 'formats':['i1', 'O'],\r\n 'offsets':[np.dtype('intp').itemsize, 0]})\r\n\r\n @pytest.mark.parametrize([\"obj\", \"dtype\", \"expected\"],\r\n [([], (\"(2)f4,\"), np.empty((0, 2), dtype=\"f4\")),\r\n (3, \"(3)f4,\", [3, 3, 3]),\r\n (np.float64(2), \"(2)f4,\", [2, 2]),\r\n ([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),\r\n ([\"1\", \"2\"], \"(2)i,\", None)])\r\n def test_subarray_list(self, obj, dtype, expected):\r\n dtype = np.dtype(dtype)\r\n res = np.array(obj, dtype=dtype)\r\n\r\n if expected is None:\r\n # iterate the 1-d list to fill the array\r\n expected = np.empty(len(obj), dtype=dtype)\r\n for i in range(len(expected)):\r\n expected[i] = obj[i]\r\n\r\n assert_array_equal(res, expected)\r\n\r\n def test_comma_datetime(self):\r\n dt = np.dtype('M8[D],datetime64[Y],i8')\r\n assert_equal(dt, np.dtype([('f0', 'M8[D]'),\r\n ('f1', 'datetime64[Y]'),\r\n ('f2', 'i8')]))\r\n\r\n def test_from_dictproxy(self):\r\n # Tests for PR #5920\r\n dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})\r\n assert_dtype_equal(dt, np.dtype(dt.fields))\r\n dt2 = np.dtype((np.void, dt.fields))\r\n assert_equal(dt2.fields, dt.fields)\r\n\r\n def test_from_dict_with_zero_width_field(self):\r\n # Regression test for #6430 / #2196\r\n dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])\r\n dt2 = np.dtype({'names': ['val1', 'val2'],\r\n 'formats': [(np.float32, (0,)), int]})\r\n\r\n assert_dtype_equal(dt, dt2)\r\n assert_equal(dt.fields['val1'][0].itemsize, 0)\r\n assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)\r\n\r\n def test_bool_commastring(self):\r\n d = np.dtype('?,?,?') # raises?\r\n assert_equal(len(d.names), 3)\r\n for n in d.names:\r\n assert_equal(d.fields[n][0], np.dtype('?'))\r\n\r\n def test_nonint_offsets(self):\r\n # gh-8059\r\n def make_dtype(off):\r\n return np.dtype({'names': ['A'], 'formats': ['i4'],\r\n 'offsets': [off]})\r\n\r\n assert_raises(TypeError, make_dtype, 'ASD')\r\n assert_raises(OverflowError, make_dtype, 2**70)\r\n assert_raises(TypeError, make_dtype, 2.3)\r\n assert_raises(ValueError, make_dtype, -10)\r\n\r\n # no errors here:\r\n dt = make_dtype(np.uint32(0))\r\n np.zeros(1, dtype=dt)[0].item()\r\n\r\n def test_fields_by_index(self):\r\n dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])\r\n assert_dtype_equal(dt[0], np.dtype(np.int8))\r\n assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))\r\n assert_dtype_equal(dt[-1], dt[1])\r\n assert_dtype_equal(dt[-2], dt[0])\r\n assert_raises(IndexError, lambda: dt[-3])\r\n\r\n assert_raises(TypeError, operator.getitem, dt, 3.0)\r\n\r\n assert_equal(dt[1], dt[np.int8(1)])\r\n\r\n @pytest.mark.parametrize('align_flag',[False, True])\r\n def test_multifield_index(self, align_flag):\r\n # indexing with a list produces subfields\r\n # the align flag should be preserved\r\n dt = np.dtype([\r\n (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')\r\n ], align=align_flag)\r\n\r\n dt_sub = dt[['B', 'col1']]\r\n assert_equal(\r\n dt_sub,\r\n np.dtype({\r\n 'names': ['B', 'col1'],\r\n 'formats': ['<f8', '<U20'],\r\n 'offsets': [88, 0],\r\n 'titles': [None, 'title'],\r\n 'itemsize': 96\r\n })\r\n )\r\n assert_equal(dt_sub.isalignedstruct, align_flag)\r\n\r\n dt_sub = dt[['B']]\r\n assert_equal(\r\n dt_sub,\r\n np.dtype({\r\n 'names': ['B'],\r\n 'formats': ['<f8'],\r\n 'offsets': [88],\r\n 'itemsize': 96\r\n })\r\n )\r\n assert_equal(dt_sub.isalignedstruct, align_flag)\r\n\r\n dt_sub = dt[[]]\r\n assert_equal(\r\n dt_sub,\r\n np.dtype({\r\n 'names': [],\r\n 'formats': [],\r\n 'offsets': [],\r\n 'itemsize': 96\r\n })\r\n )\r\n assert_equal(dt_sub.isalignedstruct, align_flag)\r\n\r\n assert_raises(TypeError, operator.getitem, dt, ())\r\n assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])\r\n assert_raises(TypeError, operator.getitem, dt, ['col1', 2])\r\n assert_raises(KeyError, operator.getitem, dt, ['fake'])\r\n assert_raises(KeyError, operator.getitem, dt, ['title'])\r\n assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])\r\n\r\n def test_partial_dict(self):\r\n # 'names' is missing\r\n assert_raises(ValueError, np.dtype,\r\n {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})\r\n\r\n def test_fieldless_views(self):\r\n a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],\r\n 'itemsize':8})\r\n assert_raises(ValueError, a.view, np.dtype([]))\r\n\r\n d = np.dtype((np.dtype([]), 10))\r\n assert_equal(d.shape, (10,))\r\n assert_equal(d.itemsize, 0)\r\n assert_equal(d.base, np.dtype([]))\r\n\r\n arr = np.fromiter((() for i in range(10)), [])\r\n assert_equal(arr.dtype, np.dtype([]))\r\n assert_raises(ValueError, np.frombuffer, b'', dtype=[])\r\n assert_equal(np.frombuffer(b'', dtype=[], count=2),\r\n np.empty(2, dtype=[]))\r\n\r\n assert_raises(ValueError, np.dtype, ([], 'f8'))\r\n assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])\r\n\r\n assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),\r\n np.ones(2, dtype=bool))\r\n\r\n assert_equal(np.zeros((1, 2), dtype=[]) == a,\r\n np.ones((1, 2), dtype=bool))\r\n\r\n\r\nclass TestSubarray:\r\n def test_single_subarray(self):\r\n a = np.dtype((int, (2)))\r\n b = np.dtype((int, (2,)))\r\n assert_dtype_equal(a, b)\r\n\r\n assert_equal(type(a.subdtype[1]), tuple)\r\n assert_equal(type(b.subdtype[1]), tuple)\r\n\r\n def test_equivalent_record(self):\r\n \"\"\"Test whether equivalent subarray dtypes hash the same.\"\"\"\r\n a = np.dtype((int, (2, 3)))\r\n b = np.dtype((int, (2, 3)))\r\n assert_dtype_equal(a, b)\r\n\r\n def test_nonequivalent_record(self):\r\n \"\"\"Test whether different subarray dtypes hash differently.\"\"\"\r\n a = np.dtype((int, (2, 3)))\r\n b = np.dtype((int, (3, 2)))\r\n assert_dtype_not_equal(a, b)\r\n\r\n a = np.dtype((int, (2, 3)))\r\n b = np.dtype((int, (2, 2)))\r\n assert_dtype_not_equal(a, b)\r\n\r\n a = np.dtype((int, (1, 2, 3)))\r\n b = np.dtype((int, (1, 2)))\r\n assert_dtype_not_equal(a, b)\r\n\r\n def test_shape_equal(self):\r\n \"\"\"Test some data types that are equal\"\"\"\r\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))\r\n # FutureWarning during deprecation period; after it is passed this\r\n # should instead check that \"(1)f8\" == \"1f8\" == (\"f8\", 1).\r\n with pytest.warns(FutureWarning):\r\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))\r\n assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))\r\n assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))\r\n d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))\r\n assert_dtype_equal(np.dtype(d), np.dtype(d))\r\n\r\n def test_shape_simple(self):\r\n \"\"\"Test some simple cases that shouldn't be equal\"\"\"\r\n assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))\r\n assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))\r\n assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))\r\n\r\n def test_shape_monster(self):\r\n \"\"\"Test some more complicated cases that shouldn't be equal\"\"\"\r\n assert_dtype_not_equal(\r\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\r\n np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))\r\n assert_dtype_not_equal(\r\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\r\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))\r\n assert_dtype_not_equal(\r\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\r\n np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))\r\n assert_dtype_not_equal(\r\n np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\r\n np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))\r\n\r\n def test_shape_sequence(self):\r\n # Any sequence of integers should work as shape, but the result\r\n # should be a tuple (immutable) of base type integers.\r\n a = np.array([1, 2, 3], dtype=np.int16)\r\n l = [1, 2, 3]\r\n # Array gets converted\r\n dt = np.dtype([('a', 'f4', a)])\r\n assert_(isinstance(dt['a'].shape, tuple))\r\n assert_(isinstance(dt['a'].shape[0], int))\r\n # List gets converted\r\n dt = np.dtype([('a', 'f4', l)])\r\n assert_(isinstance(dt['a'].shape, tuple))\r\n #\r\n\r\n class IntLike:\r\n def __index__(self):\r\n return 3\r\n\r\n def __int__(self):\r\n # (a PyNumber_Check fails without __int__)\r\n return 3\r\n\r\n dt = np.dtype([('a', 'f4', IntLike())])\r\n assert_(isinstance(dt['a'].shape, tuple))\r\n assert_(isinstance(dt['a'].shape[0], int))\r\n dt = np.dtype([('a', 'f4', (IntLike(),))])\r\n assert_(isinstance(dt['a'].shape, tuple))\r\n assert_(isinstance(dt['a'].shape[0], int))\r\n\r\n def test_shape_matches_ndim(self):\r\n dt = np.dtype([('a', 'f4', ())])\r\n assert_equal(dt['a'].shape, ())\r\n assert_equal(dt['a'].ndim, 0)\r\n\r\n dt = np.dtype([('a', 'f4')])\r\n assert_equal(dt['a'].shape, ())\r\n assert_equal(dt['a'].ndim, 0)\r\n\r\n dt = np.dtype([('a', 'f4', 4)])\r\n assert_equal(dt['a'].shape, (4,))\r\n assert_equal(dt['a'].ndim, 1)\r\n\r\n dt = np.dtype([('a', 'f4', (1, 2, 3))])\r\n assert_equal(dt['a'].shape, (1, 2, 3))\r\n assert_equal(dt['a'].ndim, 3)\r\n\r\n def test_shape_invalid(self):\r\n # Check that the shape is valid.\r\n max_int = np.iinfo(np.intc).max\r\n max_intp = np.iinfo(np.intp).max\r\n # Too large values (the datatype is part of this)\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])\r\n # Takes a different code path (fails earlier:\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])\r\n # Negative values\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])\r\n assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])\r\n\r\n def test_alignment(self):\r\n #Check that subarrays are aligned\r\n t1 = np.dtype('(1,)i4', align=True)\r\n t2 = np.dtype('2i4', align=True)\r\n assert_equal(t1.alignment, t2.alignment)\r\n\r\n\r\ndef iter_struct_object_dtypes():\r\n \"\"\"\r\n Iterates over a few complex dtypes and object pattern which\r\n fill the array with a given object (defaults to a singleton).\r\n\r\n Yields\r\n ------\r\n dtype : dtype\r\n pattern : tuple\r\n Structured tuple for use with `np.array`.\r\n count : int\r\n Number of objects stored in the dtype.\r\n singleton : object\r\n A singleton object. The returned pattern is constructed so that\r\n all objects inside the datatype are set to the singleton.\r\n \"\"\"\r\n obj = object()\r\n\r\n dt = np.dtype([('b', 'O', (2, 3))])\r\n p = ([[obj] * 3] * 2,)\r\n yield pytest.param(dt, p, 6, obj, id=\"<subarray>\")\r\n\r\n dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])\r\n p = (0, [[obj] * 3] * 2)\r\n yield pytest.param(dt, p, 6, obj, id=\"<subarray in field>\")\r\n\r\n dt = np.dtype([('a', 'i4'),\r\n ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])\r\n p = (0, [[(obj, 0)] * 3] * 2)\r\n yield pytest.param(dt, p, 6, obj, id=\"<structured subarray 1>\")\r\n\r\n dt = np.dtype([('a', 'i4'),\r\n ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])\r\n p = (0, [[(obj, obj)] * 3] * 2)\r\n yield pytest.param(dt, p, 12, obj, id=\"<structured subarray 2>\")\r\n\r\n\r\[email protected](not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\r\nclass TestStructuredObjectRefcounting:\r\n \"\"\"These tests cover various uses of complicated structured types which\r\n include objects and thus require reference counting.\r\n \"\"\"\r\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\r\n iter_struct_object_dtypes())\r\n @pytest.mark.parametrize([\"creation_func\", \"creation_obj\"], [\r\n pytest.param(np.empty, None,\r\n # None is probably used for too many things\r\n marks=pytest.mark.skip(\"unreliable due to python's behaviour\")),\r\n (np.ones, 1),\r\n (np.zeros, 0)])\r\n def test_structured_object_create_delete(self, dt, pat, count, singleton,\r\n creation_func, creation_obj):\r\n \"\"\"Structured object reference counting in creation and deletion\"\"\"\r\n # The test assumes that 0, 1, and None are singletons.\r\n gc.collect()\r\n before = sys.getrefcount(creation_obj)\r\n arr = creation_func(3, dt)\r\n\r\n now = sys.getrefcount(creation_obj)\r\n assert now - before == count * 3\r\n del arr\r\n now = sys.getrefcount(creation_obj)\r\n assert now == before\r\n\r\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\r\n iter_struct_object_dtypes())\r\n def test_structured_object_item_setting(self, dt, pat, count, singleton):\r\n \"\"\"Structured object reference counting for simple item setting\"\"\"\r\n one = 1\r\n\r\n gc.collect()\r\n before = sys.getrefcount(singleton)\r\n arr = np.array([pat] * 3, dt)\r\n assert sys.getrefcount(singleton) - before == count * 3\r\n # Fill with `1` and check that it was replaced correctly:\r\n before2 = sys.getrefcount(one)\r\n arr[...] = one\r\n after2 = sys.getrefcount(one)\r\n assert after2 - before2 == count * 3\r\n del arr\r\n gc.collect()\r\n assert sys.getrefcount(one) == before2\r\n assert sys.getrefcount(singleton) == before\r\n\r\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\r\n iter_struct_object_dtypes())\r\n @pytest.mark.parametrize(\r\n ['shape', 'index', 'items_changed'],\r\n [((3,), ([0, 2],), 2),\r\n ((3, 2), ([0, 2], slice(None)), 4),\r\n ((3, 2), ([0, 2], [1]), 2),\r\n ((3,), ([True, False, True]), 2)])\r\n def test_structured_object_indexing(self, shape, index, items_changed,\r\n dt, pat, count, singleton):\r\n \"\"\"Structured object reference counting for advanced indexing.\"\"\"\r\n zero = 0\r\n one = 1\r\n\r\n arr = np.zeros(shape, dt)\r\n\r\n gc.collect()\r\n before_zero = sys.getrefcount(zero)\r\n before_one = sys.getrefcount(one)\r\n # Test item getting:\r\n part = arr[index]\r\n after_zero = sys.getrefcount(zero)\r\n assert after_zero - before_zero == count * items_changed\r\n del part\r\n # Test item setting:\r\n arr[index] = one\r\n gc.collect()\r\n after_zero = sys.getrefcount(zero)\r\n after_one = sys.getrefcount(one)\r\n assert before_zero - after_zero == count * items_changed\r\n assert after_one - before_one == count * items_changed\r\n\r\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\r\n iter_struct_object_dtypes())\r\n def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):\r\n \"\"\"Structured object reference counting for specialized functions.\r\n The older functions such as take and repeat use different code paths\r\n then item setting (when writing this).\r\n \"\"\"\r\n indices = [0, 1]\r\n\r\n arr = np.array([pat] * 3, dt)\r\n gc.collect()\r\n before = sys.getrefcount(singleton)\r\n res = arr.take(indices)\r\n after = sys.getrefcount(singleton)\r\n assert after - before == count * 2\r\n new = res.repeat(10)\r\n gc.collect()\r\n after_repeat = sys.getrefcount(singleton)\r\n assert after_repeat - after == count * 2 * 10\r\n\r\n\r\nclass TestStructuredDtypeSparseFields:\r\n \"\"\"Tests subarray fields which contain sparse dtypes so that\r\n not all memory is used by the dtype work. Such dtype's should\r\n leave the underlying memory unchanged.\r\n \"\"\"\r\n dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],\r\n 'offsets':[0, 4]}, (2, 3))])\r\n sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],\r\n 'offsets':[4]}, (2, 3))])\r\n\r\n @pytest.mark.xfail(reason=\"inaccessible data is changed see gh-12686.\")\r\n @pytest.mark.valgrind_error(reason=\"reads from uninitialized buffers.\")\r\n def test_sparse_field_assignment(self):\r\n arr = np.zeros(3, self.dtype)\r\n sparse_arr = arr.view(self.sparse_dtype)\r\n\r\n sparse_arr[...] = np.finfo(np.float32).max\r\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\r\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\r\n\r\n def test_sparse_field_assignment_fancy(self):\r\n # Fancy assignment goes to the copyswap function for complex types:\r\n arr = np.zeros(3, self.dtype)\r\n sparse_arr = arr.view(self.sparse_dtype)\r\n\r\n sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max\r\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\r\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\r\n\r\n\r\nclass TestMonsterType:\r\n \"\"\"Test deeply nested subtypes.\"\"\"\r\n\r\n def test1(self):\r\n simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\r\n 'titles': ['Red pixel', 'Blue pixel']})\r\n a = np.dtype([('yo', int), ('ye', simple1),\r\n ('yi', np.dtype((int, (3, 2))))])\r\n b = np.dtype([('yo', int), ('ye', simple1),\r\n ('yi', np.dtype((int, (3, 2))))])\r\n assert_dtype_equal(a, b)\r\n\r\n c = np.dtype([('yo', int), ('ye', simple1),\r\n ('yi', np.dtype((a, (3, 2))))])\r\n d = np.dtype([('yo', int), ('ye', simple1),\r\n ('yi', np.dtype((a, (3, 2))))])\r\n assert_dtype_equal(c, d)\r\n\r\n def test_list_recursion(self):\r\n l = list()\r\n l.append(('f', l))\r\n with pytest.raises(RecursionError):\r\n np.dtype(l)\r\n\r\n def test_tuple_recursion(self):\r\n d = np.int32\r\n for i in range(100000):\r\n d = (d, (1,))\r\n with pytest.raises(RecursionError):\r\n np.dtype(d)\r\n\r\n def test_dict_recursion(self):\r\n d = dict(names=['self'], formats=[None], offsets=[0])\r\n d['formats'][0] = d\r\n with pytest.raises(RecursionError):\r\n np.dtype(d)\r\n\r\n\r\nclass TestMetadata:\r\n def test_no_metadata(self):\r\n d = np.dtype(int)\r\n assert_(d.metadata is None)\r\n\r\n def test_metadata_takes_dict(self):\r\n d = np.dtype(int, metadata={'datum': 1})\r\n assert_(d.metadata == {'datum': 1})\r\n\r\n def test_metadata_rejects_nondict(self):\r\n assert_raises(TypeError, np.dtype, int, metadata='datum')\r\n assert_raises(TypeError, np.dtype, int, metadata=1)\r\n assert_raises(TypeError, np.dtype, int, metadata=None)\r\n\r\n def test_nested_metadata(self):\r\n d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])\r\n assert_(d['a'].metadata == {'datum': 1})\r\n\r\n def test_base_metadata_copied(self):\r\n d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))\r\n assert_(d.metadata == {'datum': 1})\r\n\r\nclass TestString:\r\n def test_complex_dtype_str(self):\r\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\r\n ('rtile', '>f4', (64, 36))], (3,)),\r\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\r\n ('bright', '>f4', (8, 36))])])\r\n assert_equal(str(dt),\r\n \"[('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\r\n \"('rtile', '>f4', (64, 36))], (3,)), \"\r\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\r\n \"('bright', '>f4', (8, 36))])]\")\r\n\r\n # If the sticky aligned flag is set to True, it makes the\r\n # str() function use a dict representation with an 'aligned' flag\r\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\r\n ('rtile', '>f4', (64, 36))],\r\n (3,)),\r\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\r\n ('bright', '>f4', (8, 36))])],\r\n align=True)\r\n assert_equal(str(dt),\r\n \"{'names':['top','bottom'], \"\r\n \"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), \"\r\n \"('rtile', '>f4', (64, 36))], (3,)),\"\r\n \"[('bleft', ('>f4', (8, 64)), (1,)), \"\r\n \"('bright', '>f4', (8, 36))]], \"\r\n \"'offsets':[0,76800], \"\r\n \"'itemsize':80000, \"\r\n \"'aligned':True}\")\r\n assert_equal(np.dtype(eval(str(dt))), dt)\r\n\r\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\r\n 'offsets': [0, 1, 2],\r\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})\r\n assert_equal(str(dt),\r\n \"[(('Red pixel', 'r'), 'u1'), \"\r\n \"(('Green pixel', 'g'), 'u1'), \"\r\n \"(('Blue pixel', 'b'), 'u1')]\")\r\n\r\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\r\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\r\n 'offsets': [0, 0, 1, 2],\r\n 'titles': ['Color', 'Red pixel',\r\n 'Green pixel', 'Blue pixel']})\r\n assert_equal(str(dt),\r\n \"{'names':['rgba','r','g','b'],\"\r\n \" 'formats':['<u4','u1','u1','u1'],\"\r\n \" 'offsets':[0,0,1,2],\"\r\n \" 'titles':['Color','Red pixel',\"\r\n \"'Green pixel','Blue pixel'],\"\r\n \" 'itemsize':4}\")\r\n\r\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\r\n 'offsets': [0, 2],\r\n 'titles': ['Red pixel', 'Blue pixel']})\r\n assert_equal(str(dt),\r\n \"{'names':['r','b'],\"\r\n \" 'formats':['u1','u1'],\"\r\n \" 'offsets':[0,2],\"\r\n \" 'titles':['Red pixel','Blue pixel'],\"\r\n \" 'itemsize':3}\")\r\n\r\n dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])\r\n assert_equal(str(dt),\r\n \"[('a', '<m8[D]'), ('b', '<M8[us]')]\")\r\n\r\n def test_repr_structured(self):\r\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\r\n ('rtile', '>f4', (64, 36))], (3,)),\r\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\r\n ('bright', '>f4', (8, 36))])])\r\n assert_equal(repr(dt),\r\n \"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\r\n \"('rtile', '>f4', (64, 36))], (3,)), \"\r\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\r\n \"('bright', '>f4', (8, 36))])])\")\r\n\r\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\r\n 'offsets': [0, 1, 2],\r\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},\r\n align=True)\r\n assert_equal(repr(dt),\r\n \"dtype([(('Red pixel', 'r'), 'u1'), \"\r\n \"(('Green pixel', 'g'), 'u1'), \"\r\n \"(('Blue pixel', 'b'), 'u1')], align=True)\")\r\n\r\n def test_repr_structured_not_packed(self):\r\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\r\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\r\n 'offsets': [0, 0, 1, 2],\r\n 'titles': ['Color', 'Red pixel',\r\n 'Green pixel', 'Blue pixel']}, align=True)\r\n assert_equal(repr(dt),\r\n \"dtype({'names':['rgba','r','g','b'],\"\r\n \" 'formats':['<u4','u1','u1','u1'],\"\r\n \" 'offsets':[0,0,1,2],\"\r\n \" 'titles':['Color','Red pixel',\"\r\n \"'Green pixel','Blue pixel'],\"\r\n \" 'itemsize':4}, align=True)\")\r\n\r\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\r\n 'offsets': [0, 2],\r\n 'titles': ['Red pixel', 'Blue pixel'],\r\n 'itemsize': 4})\r\n assert_equal(repr(dt),\r\n \"dtype({'names':['r','b'], \"\r\n \"'formats':['u1','u1'], \"\r\n \"'offsets':[0,2], \"\r\n \"'titles':['Red pixel','Blue pixel'], \"\r\n \"'itemsize':4})\")\r\n\r\n def test_repr_structured_datetime(self):\r\n dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\r\n assert_equal(repr(dt),\r\n \"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\")\r\n\r\n def test_repr_str_subarray(self):\r\n dt = np.dtype(('<i2', (1,)))\r\n assert_equal(repr(dt), \"dtype(('<i2', (1,)))\")\r\n assert_equal(str(dt), \"('<i2', (1,))\")\r\n\r\n def test_base_dtype_with_object_type(self):\r\n # Issue gh-2798, should not error.\r\n np.array(['a'], dtype=\"O\").astype((\"O\", [(\"name\", \"O\")]))\r\n\r\n def test_empty_string_to_object(self):\r\n # Pull request #4722\r\n np.array([\"\", \"\"]).astype(object)\r\n\r\n def test_void_subclass_unsized(self):\r\n dt = np.dtype(np.record)\r\n assert_equal(repr(dt), \"dtype('V')\")\r\n assert_equal(str(dt), '|V0')\r\n assert_equal(dt.name, 'record')\r\n\r\n def test_void_subclass_sized(self):\r\n dt = np.dtype((np.record, 2))\r\n assert_equal(repr(dt), \"dtype('V2')\")\r\n assert_equal(str(dt), '|V2')\r\n assert_equal(dt.name, 'record16')\r\n\r\n def test_void_subclass_fields(self):\r\n dt = np.dtype((np.record, [('a', '<u2')]))\r\n assert_equal(repr(dt), \"dtype((numpy.record, [('a', '<u2')]))\")\r\n assert_equal(str(dt), \"(numpy.record, [('a', '<u2')])\")\r\n assert_equal(dt.name, 'record16')\r\n\r\n\r\nclass TestDtypeAttributeDeletion:\r\n\r\n def test_dtype_non_writable_attributes_deletion(self):\r\n dt = np.dtype(np.double)\r\n attr = [\"subdtype\", \"descr\", \"str\", \"name\", \"base\", \"shape\",\r\n \"isbuiltin\", \"isnative\", \"isalignedstruct\", \"fields\",\r\n \"metadata\", \"hasobject\"]\r\n\r\n for s in attr:\r\n assert_raises(AttributeError, delattr, dt, s)\r\n\r\n def test_dtype_writable_attributes_deletion(self):\r\n dt = np.dtype(np.double)\r\n attr = [\"names\"]\r\n for s in attr:\r\n assert_raises(AttributeError, delattr, dt, s)\r\n\r\n\r\nclass TestDtypeAttributes:\r\n def test_descr_has_trailing_void(self):\r\n # see gh-6359\r\n dtype = np.dtype({\r\n 'names': ['A', 'B'],\r\n 'formats': ['f4', 'f4'],\r\n 'offsets': [0, 8],\r\n 'itemsize': 16})\r\n new_dtype = np.dtype(dtype.descr)\r\n assert_equal(new_dtype.itemsize, 16)\r\n\r\n def test_name_dtype_subclass(self):\r\n # Ticket #4357\r\n class user_def_subcls(np.void):\r\n pass\r\n assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')\r\n\r\n\r\nclass TestPickling:\r\n\r\n def check_pickling(self, dtype):\r\n for proto in range(pickle.HIGHEST_PROTOCOL + 1):\r\n pickled = pickle.loads(pickle.dumps(dtype, proto))\r\n assert_equal(pickled, dtype)\r\n assert_equal(pickled.descr, dtype.descr)\r\n if dtype.metadata is not None:\r\n assert_equal(pickled.metadata, dtype.metadata)\r\n # Check the reconstructed dtype is functional\r\n x = np.zeros(3, dtype=dtype)\r\n y = np.zeros(3, dtype=pickled)\r\n assert_equal(x, y)\r\n assert_equal(x[0], y[0])\r\n\r\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\r\n np.compat.unicode, bool])\r\n def test_builtin(self, t):\r\n self.check_pickling(np.dtype(t))\r\n\r\n def test_structured(self):\r\n dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))\r\n self.check_pickling(dt)\r\n\r\n def test_structured_aligned(self):\r\n dt = np.dtype('i4, i1', align=True)\r\n self.check_pickling(dt)\r\n\r\n def test_structured_unaligned(self):\r\n dt = np.dtype('i4, i1', align=False)\r\n self.check_pickling(dt)\r\n\r\n def test_structured_padded(self):\r\n dt = np.dtype({\r\n 'names': ['A', 'B'],\r\n 'formats': ['f4', 'f4'],\r\n 'offsets': [0, 8],\r\n 'itemsize': 16})\r\n self.check_pickling(dt)\r\n\r\n def test_structured_titles(self):\r\n dt = np.dtype({'names': ['r', 'b'],\r\n 'formats': ['u1', 'u1'],\r\n 'titles': ['Red pixel', 'Blue pixel']})\r\n self.check_pickling(dt)\r\n\r\n @pytest.mark.parametrize('base', ['m8', 'M8'])\r\n @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',\r\n 'ms', 'us', 'ns', 'ps', 'fs', 'as'])\r\n def test_datetime(self, base, unit):\r\n dt = np.dtype('%s[%s]' % (base, unit) if unit else base)\r\n self.check_pickling(dt)\r\n if unit:\r\n dt = np.dtype('%s[7%s]' % (base, unit))\r\n self.check_pickling(dt)\r\n\r\n def test_metadata(self):\r\n dt = np.dtype(int, metadata={'datum': 1})\r\n self.check_pickling(dt)\r\n\r\n\r\ndef test_rational_dtype():\r\n # test for bug gh-5719\r\n a = np.array([1111], dtype=rational).astype\r\n assert_raises(OverflowError, a, 'int8')\r\n\r\n # test that dtype detection finds user-defined types\r\n x = rational(1)\r\n assert_equal(np.array([x,x]).dtype, np.dtype(rational))\r\n\r\n\r\ndef test_dtypes_are_true():\r\n # test for gh-6294\r\n assert bool(np.dtype('f8'))\r\n assert bool(np.dtype('i8'))\r\n assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))\r\n\r\n\r\ndef test_invalid_dtype_string():\r\n # test for gh-10440\r\n assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')\r\n assert_raises(TypeError, np.dtype, u'Fl\\xfcgel')\r\n\r\n\r\ndef test_keyword_argument():\r\n # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971\r\n assert np.dtype(dtype=np.float64) == np.dtype(np.float64)\r\n\r\n\r\nclass TestFromDTypeAttribute:\r\n def test_simple(self):\r\n class dt:\r\n dtype = \"f8\"\r\n\r\n assert np.dtype(dt) == np.float64\r\n assert np.dtype(dt()) == np.float64\r\n\r\n def test_recursion(self):\r\n class dt:\r\n pass\r\n\r\n dt.dtype = dt\r\n with pytest.raises(RecursionError):\r\n np.dtype(dt)\r\n\r\n dt_instance = dt()\r\n dt_instance.dtype = dt\r\n with pytest.raises(RecursionError):\r\n np.dtype(dt_instance)\r\n\r\n def test_void_subtype(self):\r\n class dt(np.void):\r\n # This code path is fully untested before, so it is unclear\r\n # what this should be useful for. Note that if np.void is used\r\n # numpy will think we are deallocating a base type [1.17, 2019-02].\r\n dtype = np.dtype(\"f,f\")\r\n pass\r\n\r\n np.dtype(dt)\r\n np.dtype(dt(1))\r\n\r\n def test_void_subtype_recursion(self):\r\n class dt(np.void):\r\n pass\r\n\r\n dt.dtype = dt\r\n\r\n with pytest.raises(RecursionError):\r\n np.dtype(dt)\r\n\r\n with pytest.raises(RecursionError):\r\n np.dtype(dt(1))\r\n\r\n\r\nclass TestDTypeClasses:\r\n @pytest.mark.parametrize(\"dtype\", list(np.typecodes['All']) + [rational])\r\n def test_basic_dtypes_subclass_properties(self, dtype):\r\n # Note: Except for the isinstance and type checks, these attributes\r\n # are considered currently private and may change.\r\n dtype = np.dtype(dtype)\r\n assert isinstance(dtype, np.dtype)\r\n assert type(dtype) is not np.dtype\r\n assert type(dtype).__name__ == f\"dtype[{dtype.type.__name__}]\"\r\n assert type(dtype).__module__ == \"numpy\"\r\n assert not type(dtype)._abstract\r\n\r\n # the flexible dtypes and datetime/timedelta have additional parameters\r\n # which are more than just storage information, these would need to be\r\n # given when creating a dtype:\r\n parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)\r\n if dtype.type not in parametric:\r\n assert not type(dtype)._parametric\r\n assert type(dtype)() is dtype\r\n else:\r\n assert type(dtype)._parametric\r\n with assert_raises(TypeError):\r\n type(dtype)()\r\n\r\n def test_dtype_superclass(self):\r\n assert type(np.dtype) is not type\r\n assert isinstance(np.dtype, type)\r\n\r\n assert type(np.dtype).__name__ == \"_DTypeMeta\"\r\n assert type(np.dtype).__module__ == \"numpy\"\r\n assert np.dtype._abstract\r\n\r\n\r\nclass TestFromCTypes:\r\n\r\n @staticmethod\r\n def check(ctype, dtype):\r\n dtype = np.dtype(dtype)\r\n assert_equal(np.dtype(ctype), dtype)\r\n assert_equal(np.dtype(ctype()), dtype)\r\n\r\n def test_array(self):\r\n c8 = ctypes.c_uint8\r\n self.check( 3 * c8, (np.uint8, (3,)))\r\n self.check( 1 * c8, (np.uint8, (1,)))\r\n self.check( 0 * c8, (np.uint8, (0,)))\r\n self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))\r\n self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))\r\n\r\n def test_padded_structure(self):\r\n class PaddedStruct(ctypes.Structure):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16)\r\n ]\r\n expected = np.dtype([\r\n ('a', np.uint8),\r\n ('b', np.uint16)\r\n ], align=True)\r\n self.check(PaddedStruct, expected)\r\n\r\n def test_bit_fields(self):\r\n class BitfieldStruct(ctypes.Structure):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8, 7),\r\n ('b', ctypes.c_uint8, 1)\r\n ]\r\n assert_raises(TypeError, np.dtype, BitfieldStruct)\r\n assert_raises(TypeError, np.dtype, BitfieldStruct())\r\n\r\n def test_pointer(self):\r\n p_uint8 = ctypes.POINTER(ctypes.c_uint8)\r\n assert_raises(TypeError, np.dtype, p_uint8)\r\n\r\n def test_void_pointer(self):\r\n self.check(ctypes.c_void_p, np.uintp)\r\n\r\n def test_union(self):\r\n class Union(ctypes.Union):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16),\r\n ]\r\n expected = np.dtype(dict(\r\n names=['a', 'b'],\r\n formats=[np.uint8, np.uint16],\r\n offsets=[0, 0],\r\n itemsize=2\r\n ))\r\n self.check(Union, expected)\r\n\r\n def test_union_with_struct_packed(self):\r\n class Struct(ctypes.Structure):\r\n _pack_ = 1\r\n _fields_ = [\r\n ('one', ctypes.c_uint8),\r\n ('two', ctypes.c_uint32)\r\n ]\r\n\r\n class Union(ctypes.Union):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16),\r\n ('c', ctypes.c_uint32),\r\n ('d', Struct),\r\n ]\r\n expected = np.dtype(dict(\r\n names=['a', 'b', 'c', 'd'],\r\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\r\n offsets=[0, 0, 0, 0],\r\n itemsize=ctypes.sizeof(Union)\r\n ))\r\n self.check(Union, expected)\r\n\r\n def test_union_packed(self):\r\n class Struct(ctypes.Structure):\r\n _fields_ = [\r\n ('one', ctypes.c_uint8),\r\n ('two', ctypes.c_uint32)\r\n ]\r\n _pack_ = 1\r\n class Union(ctypes.Union):\r\n _pack_ = 1\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16),\r\n ('c', ctypes.c_uint32),\r\n ('d', Struct),\r\n ]\r\n expected = np.dtype(dict(\r\n names=['a', 'b', 'c', 'd'],\r\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\r\n offsets=[0, 0, 0, 0],\r\n itemsize=ctypes.sizeof(Union)\r\n ))\r\n self.check(Union, expected)\r\n\r\n def test_packed_structure(self):\r\n class PackedStructure(ctypes.Structure):\r\n _pack_ = 1\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16)\r\n ]\r\n expected = np.dtype([\r\n ('a', np.uint8),\r\n ('b', np.uint16)\r\n ])\r\n self.check(PackedStructure, expected)\r\n\r\n def test_large_packed_structure(self):\r\n class PackedStructure(ctypes.Structure):\r\n _pack_ = 2\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16),\r\n ('c', ctypes.c_uint8),\r\n ('d', ctypes.c_uint16),\r\n ('e', ctypes.c_uint32),\r\n ('f', ctypes.c_uint32),\r\n ('g', ctypes.c_uint8)\r\n ]\r\n expected = np.dtype(dict(\r\n formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],\r\n offsets=[0, 2, 4, 6, 8, 12, 16],\r\n names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],\r\n itemsize=18))\r\n self.check(PackedStructure, expected)\r\n\r\n def test_big_endian_structure_packed(self):\r\n class BigEndStruct(ctypes.BigEndianStructure):\r\n _fields_ = [\r\n ('one', ctypes.c_uint8),\r\n ('two', ctypes.c_uint32)\r\n ]\r\n _pack_ = 1\r\n expected = np.dtype([('one', 'u1'), ('two', '>u4')])\r\n self.check(BigEndStruct, expected)\r\n\r\n def test_little_endian_structure_packed(self):\r\n class LittleEndStruct(ctypes.LittleEndianStructure):\r\n _fields_ = [\r\n ('one', ctypes.c_uint8),\r\n ('two', ctypes.c_uint32)\r\n ]\r\n _pack_ = 1\r\n expected = np.dtype([('one', 'u1'), ('two', '<u4')])\r\n self.check(LittleEndStruct, expected)\r\n\r\n def test_little_endian_structure(self):\r\n class PaddedStruct(ctypes.LittleEndianStructure):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16)\r\n ]\r\n expected = np.dtype([\r\n ('a', '<B'),\r\n ('b', '<H')\r\n ], align=True)\r\n self.check(PaddedStruct, expected)\r\n\r\n def test_big_endian_structure(self):\r\n class PaddedStruct(ctypes.BigEndianStructure):\r\n _fields_ = [\r\n ('a', ctypes.c_uint8),\r\n ('b', ctypes.c_uint16)\r\n ]\r\n expected = np.dtype([\r\n ('a', '>B'),\r\n ('b', '>H')\r\n ], align=True)\r\n self.check(PaddedStruct, expected)\r\n\r\n def test_simple_endian_types(self):\r\n self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))\r\n self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))\r\n self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))\r\n self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))\r\n\r\n all_types = set(np.typecodes['All'])\r\n all_pairs = permutations(all_types, 2)\r\n\r\n @pytest.mark.parametrize(\"pair\", all_pairs)\r\n def test_pairs(self, pair):\r\n \"\"\"\r\n Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]\r\n Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])\r\n \"\"\"\r\n # gh-5645: check that np.dtype('i,L') can be used\r\n pair_type = np.dtype('{},{}'.format(*pair))\r\n expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])\r\n assert_equal(pair_type, expected)\r\n\r\n\r\nclass TestUserDType:\r\n @pytest.mark.leaks_references(reason=\"dynamically creates custom dtype.\")\r\n def test_custom_structured_dtype(self):\r\n class mytype:\r\n pass\r\n\r\n blueprint = np.dtype([(\"field\", object)])\r\n dt = create_custom_field_dtype(blueprint, mytype, 0)\r\n assert dt.type == mytype\r\n # We cannot (currently) *create* this dtype with `np.dtype` because\r\n # mytype does not inherit from `np.generic`. This seems like an\r\n # unnecessary restriction, but one that has been around forever:\r\n assert np.dtype(mytype) == np.dtype(\"O\")\r\n\r\n def test_custom_structured_dtype_errors(self):\r\n class mytype:\r\n pass\r\n\r\n blueprint = np.dtype([(\"field\", object)])\r\n\r\n with pytest.raises(ValueError):\r\n # Tests what happens if fields are unset during creation\r\n # which is currently rejected due to the containing object\r\n # (see PyArray_RegisterDataType).\r\n create_custom_field_dtype(blueprint, mytype, 1)\r\n\r\n with pytest.raises(RuntimeError):\r\n # Tests that a dtype must have its type field set up to np.dtype\r\n # or in this case a builtin instance.\r\n create_custom_field_dtype(blueprint, mytype, 2)\r\n"
] | [
[
"numpy.can_cast",
"numpy.dtype",
"numpy.iinfo",
"numpy.testing.assert_equal",
"numpy.core._multiarray_tests.create_custom_field_dtype",
"numpy.uint32",
"numpy.compat.pickle.dumps",
"numpy.core._rational_tests.rational",
"numpy.int8",
"numpy.finfo",
"numpy.frombuffer",
"numpy.zeros",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.array",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.float64",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kudo1026/packnet-sfm | [
"b7c6230e1a093b1f096a9577616d40ada5e376e6",
"b7c6230e1a093b1f096a9577616d40ada5e376e6"
] | [
"packnet_sfm/networks/layers/resnet/ds_decoder.py",
"packnet_sfm/models/UCMSfmModel.py"
] | [
"# Copyright 2020 Toyota Research Institute. All rights reserved.\n\n# Adapted from monodepth2\n# https://github.com/nianticlabs/monodepth2/blob/master/networks/depth_decoder.py\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom collections import OrderedDict\nfrom .layers import ConvBlock, Conv3x3, upsample\n\n\nclass DSDecoder(nn.Module):\n def __init__(self, num_ch_enc, scales=[0], num_output_channels=3, use_skips=True):\n super(DSDecoder, self).__init__()\n\n self.num_output_channels = num_output_channels\n self.use_skips = use_skips\n self.upsample_mode = 'nearest'\n self.scales = scales\n\n self.num_ch_enc = num_ch_enc\n self.num_ch_dec = np.array([16, 32, 64, 128, 256])\n\n # camera intrinsic parameter as a vector\n # i = torch.tensor([183.85 / 1000, 191.47 / 1000, 186.73 / 1000, 132.81 / 1000, (-0.221 + 1) / 2, 0.576])\n # i = torch.tensor([208.10/1000, 216.78/1000, 186.24/1000, 132.82/1000, (-0.172 + 1)/2, 0.592])\n # i = torch.tensor([181.4/1000, 188.9/1000, 186.4/1000, 132.6/1000, (-0.230+1)/2, 0.571]) # euroc gt\n # i = i * 0.9\n # i = i * 1.10\n # sigmoid_inv_i = torch.log(i / (1 - i))\n # self.intrinsic_vector = nn.Parameter(sigmoid_inv_i)\n # self.intrinsic_vector = nn.Parameter(torch.zeros(6))\n self.intrinsic_vector = nn.Parameter(-torch.ones(6))\n\n self.tanh = nn.Tanh()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input_features):\n self.output = {}\n\n # get forcal length and offsets\n x = input_features[-1]\n B = x.shape[0]\n \n fx, fy, cx, cy = self.sigmoid(self.intrinsic_vector[0:4]) * 1000\n xi = self.sigmoid(self.intrinsic_vector[4]) * 2 - 1\n alpha = self.sigmoid(self.intrinsic_vector[5]) * 1\n\n I = torch.zeros(6)\n I[0] = fx\n I[1] = fy\n I[2] = cx\n I[3] = cy\n I[4] = xi\n I[5] = alpha\n\n self.output = I.unsqueeze(0).repeat(B,1)\n\n return self.output\n",
"# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport random\nimport torch.nn as nn\nfrom packnet_sfm.geometry.pose import Pose\nfrom packnet_sfm.utils.misc import make_list\nfrom packnet_sfm.models.SfmModel import SfmModel\nimport torch.nn.functional as F\n\nclass UCMSfmModel(SfmModel):\n \"\"\"\n Model class encapsulating a pose and depth networks.\n\n Parameters\n ----------\n depth_net : nn.Module\n Depth network to be used\n pose_net : nn.Module\n Pose network to be used\n rotation_mode : str\n Rotation mode for the pose network\n flip_lr_prob : float\n Probability of flipping when using the depth network\n upsample_depth_maps : bool\n True if depth map scales are upsampled to highest resolution\n kwargs : dict\n Extra parameters\n \"\"\"\n\n def flip_model(self, model, image, flip):\n \"\"\"\n Flip input image and flip output inverse depth map\n\n Parameters\n ----------\n model : nn.Module\n Module to be used\n image : torch.Tensor [B,3,H,W]\n Input image\n flip : bool\n True if the flip is happening\n\n Returns\n -------\n inv_depths : list of torch.Tensor [B,1,H,W]\n List of predicted inverse depth maps\n \"\"\"\n if flip:\n return [flip_lr(inv_depth) for inv_depth in model(flip_lr(image))]\n else:\n return model(image)\n\n def interpolate_scales(self, images, shape=None, mode='bilinear', align_corners=False):\n \"\"\"\n Interpolate list of images to the same shape\n\n Parameters\n ----------\n images : list of torch.Tensor [B,?,?,?]\n Images to be interpolated, with different resolutions\n shape : tuple (H, W)\n Output shape\n mode : str\n Interpolation mode\n align_corners : bool\n True if corners will be aligned after interpolation\n\n Returns\n -------\n images : list of torch.Tensor [B,?,H,W]\n Interpolated images, with the same resolution\n \"\"\"\n # If no shape is provided, interpolate to highest resolution\n if shape is None:\n shape = images[0].shape\n # Take last two dimensions as shape\n if len(shape) > 2:\n shape = shape[-2:]\n # Interpolate all images\n return [F.interpolate(image, shape, mode=mode,\n align_corners=align_corners) for image in images]\n\n def compute_depth_net(self, image):\n \"\"\"Computes inverse depth maps from single images\"\"\"\n # Randomly flip and estimate inverse depth maps\n inv_depths, k = self.flip_model(self.depth_net, image, False)\n inv_depths = make_list(inv_depths)\n # If upsampling depth maps\n if self.upsample_depth_maps:\n inv_depths = self.interpolate_scales(\n inv_depths, mode='nearest', align_corners=None)\n # Return inverse depth maps\n return inv_depths, k \n\n def forward(self, batch, return_logs=False):\n \"\"\"\n Processes a batch.\n\n Parameters\n ----------\n batch : dict\n Input batch\n return_logs : bool\n True if logs are stored\n\n Returns\n -------\n output : dict\n Dictionary containing predicted inverse depth maps and poses\n \"\"\"\n #print(logs)\n # Generate inverse depth predictions\n inv_depths, k = self.compute_depth_net(batch['rgb'])\n # Generate pose predictions if available\n pose = None\n if 'rgb_context' in batch and self.pose_net is not None:\n pose = self.compute_poses(batch['rgb'],\n batch['rgb_context'])\n # Return output dictionary\n return {\n 'inv_depths': inv_depths,\n 'poses': pose,\n 'intrinsics': k\n }\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"numpy.array"
],
[
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lcl1026504480/mfpython | [
"c1b1689a42488129299e31152764c535eb8e66e0",
"c1b1689a42488129299e31152764c535eb8e66e0",
"c1b1689a42488129299e31152764c535eb8e66e0"
] | [
"evolution/6.py",
"matplotlib/11.py",
"matplotlib/8.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 21:25:15 2020\n\n@author: lenovouser\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nN_MOVES = 150\nDNA_SIZE = N_MOVES*2 # 40 x moves, 40 y moves\nDIRECTION_BOUND = [0, 1]\nCROSS_RATE = 0.8\nMUTATE_RATE = 0.0001\nPOP_SIZE = 100\nN_GENERATIONS = 200\nGOAL_POINT = [10, 5]\nSTART_POINT = [0, 5]\nOBSTACLE_LINE = np.array([[5, 2], [5, 8]])\n\n\nclass GA(object):\n def __init__(self, DNA_size, DNA_bound, cross_rate, mutation_rate, pop_size, ):\n self.DNA_size = DNA_size\n DNA_bound[1] += 1\n self.DNA_bound = DNA_bound\n self.cross_rate = cross_rate\n self.mutate_rate = mutation_rate\n self.pop_size = pop_size\n\n self.pop = np.random.randint(*DNA_bound, size=(pop_size, DNA_size))\n\n def DNA2product(self, DNA, n_moves, start_point): # convert to readable string\n pop = (DNA - 0.5) \n pop[:, 0], pop[:, n_moves] = start_point[0], start_point[1]\n lines_x = np.cumsum(pop[:, :n_moves], axis=1)\n lines_y = np.cumsum(pop[:, n_moves:], axis=1)\n return lines_x, lines_y\n\n def get_fitness(self, lines_x, lines_y, goal_point, obstacle_line):\n dist2goal = np.sqrt((goal_point[0] - lines_x[:, -1]) ** 2 + (goal_point[1] - lines_y[:, -1]) ** 2)\n fitness=np.exp(-10*dist2goal)\n points = (lines_x > obstacle_line[0, 0] - 0.5) & (lines_x < obstacle_line[1, 0] + 0.5)\n y_values = np.where(points, lines_y, np.zeros_like(lines_y) - 100)\n bad_lines = ((y_values > obstacle_line[0, 1]) & (y_values < obstacle_line[1, 1])).max(axis=1)\n fitness[bad_lines] = 1e-6\n return fitness\n\n def select(self, fitness):\n idx = np.random.choice(np.arange(self.pop_size), size=self.pop_size, replace=True, p=fitness/fitness.sum())\n return self.pop[idx]\n\n def crossover(self, parent, pop):\n if np.random.rand() < self.cross_rate:\n i_ = np.random.randint(0, self.pop_size, size=1) # select another individual from pop\n cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool) # choose crossover points\n parent[cross_points] = pop[i_, cross_points] # mating and produce one child\n return parent\n\n def mutate(self, child):\n for point in range(self.DNA_size):\n if np.random.rand() < self.mutate_rate:\n child[point] = np.random.randint(*self.DNA_bound)\n return child\n\n def evolve(self, fitness):\n pop = self.select(fitness)\n pop_copy = pop.copy()\n for parent in pop: # for every parent\n child = self.crossover(parent, pop_copy)\n child = self.mutate(child)\n parent[:] = child\n self.pop = pop\n\n\nclass Line(object):\n def __init__(self, n_moves, goal_point, start_point, obstacle_line):\n self.n_moves = n_moves\n self.goal_point = goal_point\n self.start_point = start_point\n self.obstacle_line = obstacle_line\n\n plt.ion()\n\n def plotting(self, lines_x, lines_y):\n plt.cla()\n plt.scatter(*self.goal_point, s=200, c='r')\n plt.scatter(*self.start_point, s=100, c='b')\n plt.plot(self.obstacle_line[:, 0], self.obstacle_line[:, 1], lw=3, c='k')\n plt.plot(lines_x.T, lines_y.T, c='k')\n plt.scatter(lines_x[:,-1],lines_y[:,-1],100,\"y\")\n plt.xlim((-5, 15))\n plt.ylim((-5, 15))\n plt.pause(0.01)\n\n\nga = GA(DNA_size=DNA_SIZE, DNA_bound=DIRECTION_BOUND,\n cross_rate=CROSS_RATE, mutation_rate=MUTATE_RATE, pop_size=POP_SIZE)\n\nenv = Line(N_MOVES, GOAL_POINT, START_POINT, OBSTACLE_LINE)\n\nfor generation in range(N_GENERATIONS):\n lx, ly = ga.DNA2product(ga.pop, N_MOVES, START_POINT)\n fitness = ga.get_fitness(lx, ly, GOAL_POINT, OBSTACLE_LINE)\n ga.evolve(fitness)\n print('Gen:', generation, '| best fit:', fitness.max())\n env.plotting(lx, ly)\n\nplt.ioff()\nplt.show()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 15:02:09 2020\n\n@author: lenovouser\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(0, 10, 0.1)\ny1 = 0.05 * x**2\ny2 = -1 *y1\n\nfig, ax1 = plt.subplots()\n\nax2 = ax1.twinx() # mirror the ax1\nax1.plot(x, y1, 'g-')\nax2.plot(x, y2, 'b-')\n\nax1.set_xlabel('X data')\nax1.set_ylabel('Y1 data', color='g')\nax2.set_ylabel('Y2 data', color='b')\n\nplt.show()",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 14:23:47 2020\n\n@author: lenovouser\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure()\nax = Axes3D(fig)\n# X, Y value\nX = np.arange(-4, 4, 0.25)\nY = np.arange(-4, 4, 0.25)\nX, Y = np.meshgrid(X, Y)\nR = np.sqrt(X ** 2 + Y ** 2)\n# height value\nZ = np.sin(R)\n\nax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')\n\"\"\"\n============= ================================================\n Argument Description\n ============= ================================================\n *X*, *Y*, *Z* Data values as 2D arrays\n *rstride* Array row stride (step size), defaults to 10\n *cstride* Array column stride (step size), defaults to 10\n *color* Color of the surface patches\n *cmap* A colormap for the surface patches.\n *facecolors* Face colors for the individual patches\n *norm* An instance of Normalize to map values to colors\n *vmin* Minimum value to map\n *vmax* Maximum value to map\n *shade* Whether to shade the facecolors\n ============= ================================================\n\"\"\"\n\n# I think this is different from plt12_contours\nax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=plt.get_cmap('rainbow'))\n\"\"\"\n========== ================================================\n Argument Description\n ========== ================================================\n *X*, *Y*, Data values as numpy.arrays\n *Z*\n *zdir* The direction to use: x, y or z (default)\n *offset* If specified plot a projection of the filled contour\n on this position in plane normal to zdir\n ========== ================================================\n\"\"\"\n\nax.set_zlim(-2, 2)\n\nplt.show()"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.cla",
"numpy.arange",
"numpy.cumsum",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"numpy.random.randint"
],
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.get_cmap",
"numpy.sin",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BroadDong/Caffe_2 | [
"c1a636983dc84a960d6abe150c996234d6f6278c"
] | [
"caffe2/python/rnn_cell.py"
] | [
"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n## @package rnn_cell\n# Module caffe2.python.rnn_cell\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport functools\nimport itertools\nimport logging\nimport numpy as np\nimport random\nimport six\nfrom future.utils import viewkeys\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python.attention import (\n AttentionType,\n apply_regular_attention,\n apply_recurrent_attention,\n apply_dot_attention,\n apply_soft_coverage_attention,\n)\nfrom caffe2.python import core, recurrent, workspace, brew, scope\nfrom caffe2.python.modeling.parameter_sharing import ParameterSharing\nfrom caffe2.python.modeling.parameter_info import ParameterTags\nfrom caffe2.python.modeling.initializers import Initializer\nfrom caffe2.python.model_helper import ModelHelper\n\n\nclass RNNCell(object):\n '''\n Base class for writing recurrent / stateful operations.\n\n One needs to implement 3 methods: _apply, prepare_input and get_state_names.\n As a result base class will provice apply_over_sequence method, which\n allows you to apply recurrent operations over a sequence of any length.\n '''\n def __init__(self, name, forward_only=False, initializer=None):\n self.name = name\n self.recompute_blobs = []\n self.forward_only = forward_only\n self._initializer = initializer\n\n @property\n def initializer(self):\n return self._initializer\n\n @initializer.setter\n def initializer(self, value):\n self._initializer = value\n\n def scope(self, name):\n return self.name + '/' + name if self.name is not None else name\n\n def apply_over_sequence(\n self,\n model,\n inputs,\n seq_lengths,\n initial_states=None,\n outputs_with_grads=None,\n ):\n if initial_states is None:\n with scope.NameScope(self.name):\n if self.initializer is None:\n raise Exception(\"Either initial states\"\n \"or initializer have to be set\")\n initial_states = self.initializer.create_states(model)\n\n preprocessed_inputs = self.prepare_input(model, inputs)\n step_model = ModelHelper(name=self.name, param_model=model)\n input_t, timestep = step_model.net.AddScopedExternalInputs(\n 'input_t',\n 'timestep',\n )\n states_prev = step_model.net.AddScopedExternalInputs(*[\n s + '_prev' for s in self.get_state_names()\n ])\n states = self._apply(\n model=step_model,\n input_t=input_t,\n seq_lengths=seq_lengths,\n states=states_prev,\n timestep=timestep,\n )\n\n if outputs_with_grads is None:\n outputs_with_grads = [self.get_output_state_index() * 2]\n\n # states_for_all_steps consists of combination of\n # states gather for all steps and final states. It looks like this:\n # (state_1_all, state_1_final, state_2_all, state_2_final, ...)\n states_for_all_steps = recurrent.recurrent_net(\n net=model.net,\n cell_net=step_model.net,\n inputs=[(input_t, preprocessed_inputs)],\n initial_cell_inputs=list(zip(states_prev, initial_states)),\n links=dict(zip(states_prev, states)),\n timestep=timestep,\n scope=self.name,\n forward_only=self.forward_only,\n outputs_with_grads=outputs_with_grads,\n recompute_blobs_on_backward=self.recompute_blobs,\n )\n\n output = self._prepare_output_sequence(\n model,\n states_for_all_steps,\n )\n return output, states_for_all_steps\n\n def apply(self, model, input_t, seq_lengths, states, timestep):\n input_t = self.prepare_input(model, input_t)\n states = self._apply(\n model, input_t, seq_lengths, states, timestep)\n output = self._prepare_output(model, states)\n return output, states\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs,\n ):\n '''\n A single step of a recurrent network.\n\n model: ModelHelper object new operators would be added to\n\n input_t: single input with shape (1, batch_size, input_dim)\n\n seq_lengths: blob containing sequence lengths which would be passed to\n LSTMUnit operator\n\n states: previous recurrent states\n\n timestep: current recurrent iteration. Could be used together with\n seq_lengths in order to determine, if some shorter sequences\n in the batch have already ended.\n\n extra_inputs: list of tuples (input, dim). specifies additional input\n which is not subject to prepare_input(). (useful when a cell is a\n component of a larger recurrent structure, e.g., attention)\n '''\n raise NotImplementedError('Abstract method')\n\n def prepare_input(self, model, input_blob):\n '''\n If some operations in _apply method depend only on the input,\n not on recurrent states, they could be computed in advance.\n\n model: ModelHelper object new operators would be added to\n\n input_blob: either the whole input sequence with shape\n (sequence_length, batch_size, input_dim) or a single input with shape\n (1, batch_size, input_dim).\n '''\n return input_blob\n\n def get_output_state_index(self):\n '''\n Return index into state list of the \"primary\" step-wise output.\n '''\n return 0\n\n def get_state_names(self):\n '''\n Return the names of the recurrent states.\n It's required by apply_over_sequence method in order to allocate\n recurrent states for all steps with meaningful names.\n '''\n raise NotImplementedError('Abstract method')\n\n def get_output_dim(self):\n '''\n Specifies the dimension (number of units) of stepwise output.\n '''\n raise NotImplementedError('Abstract method')\n\n def _prepare_output(self, model, states):\n '''\n Allows arbitrary post-processing of primary output.\n '''\n return states[self.get_output_state_index()]\n\n def _prepare_output_sequence(self, model, state_outputs):\n '''\n Allows arbitrary post-processing of primary sequence output.\n\n (Note that state_outputs alternates between full-sequence and final\n output for each state, thus the index multiplier 2.)\n '''\n output_sequence_index = 2 * self.get_output_state_index()\n return state_outputs[output_sequence_index]\n\n\nclass LSTMInitializer(object):\n def __init__(self, hidden_size):\n self.hidden_size = hidden_size\n\n def create_states(self, model):\n return [\n model.create_param(\n param_name='initial_hidden_state',\n initializer=Initializer(operator_name='ConstantFill',\n value=0.0),\n shape=[self.hidden_size],\n ),\n model.create_param(\n param_name='initial_cell_state',\n initializer=Initializer(operator_name='ConstantFill',\n value=0.0),\n shape=[self.hidden_size],\n )\n ]\n\n\nclass LSTMCell(RNNCell):\n\n def __init__(\n self,\n input_size,\n hidden_size,\n forget_bias,\n memory_optimization,\n drop_states=False,\n initializer=None,\n **kwargs\n ):\n super(LSTMCell, self).__init__(initializer=initializer, **kwargs)\n self.initializer = initializer or LSTMInitializer(\n hidden_size=hidden_size)\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.forget_bias = float(forget_bias)\n self.memory_optimization = memory_optimization\n self.drop_states = drop_states\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs=None,\n ):\n hidden_t_prev, cell_t_prev = states\n\n fc_input = hidden_t_prev\n fc_input_dim = self.hidden_size\n\n if extra_inputs is not None:\n extra_input_blobs, extra_input_sizes = zip(*extra_inputs)\n fc_input = brew.concat(\n model,\n [hidden_t_prev] + list(extra_input_blobs),\n self.scope('gates_concatenated_input_t'),\n axis=2,\n )\n fc_input_dim += sum(extra_input_sizes)\n\n gates_t = brew.fc(\n model,\n fc_input,\n self.scope('gates_t'),\n dim_in=fc_input_dim,\n dim_out=4 * self.hidden_size,\n axis=2,\n )\n brew.sum(model, [gates_t, input_t], gates_t)\n\n hidden_t, cell_t = model.net.LSTMUnit(\n [\n hidden_t_prev,\n cell_t_prev,\n gates_t,\n seq_lengths,\n timestep,\n ],\n list(self.get_state_names()),\n forget_bias=self.forget_bias,\n drop_states=self.drop_states,\n )\n model.net.AddExternalOutputs(hidden_t, cell_t)\n if self.memory_optimization:\n self.recompute_blobs = [gates_t]\n return hidden_t, cell_t\n\n def get_input_params(self):\n return {\n 'weights': self.scope('i2h') + '_w',\n 'biases': self.scope('i2h') + '_b',\n }\n\n def get_recurrent_params(self):\n return {\n 'weights': self.scope('gates_t') + '_w',\n 'biases': self.scope('gates_t') + '_b',\n }\n\n def prepare_input(self, model, input_blob):\n return brew.fc(\n model,\n input_blob,\n self.scope('i2h'),\n dim_in=self.input_size,\n dim_out=4 * self.hidden_size,\n axis=2,\n )\n\n def get_state_names(self):\n return (self.scope('hidden_t'), self.scope('cell_t'))\n\n def get_output_dim(self):\n return self.hidden_size\n\n\nclass MILSTMCell(LSTMCell):\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs=None,\n ):\n hidden_t_prev, cell_t_prev = states\n\n fc_input = hidden_t_prev\n fc_input_dim = self.hidden_size\n\n if extra_inputs is not None:\n extra_input_blobs, extra_input_sizes = zip(*extra_inputs)\n fc_input = brew.concat(\n model,\n [hidden_t_prev] + list(extra_input_blobs),\n self.scope('gates_concatenated_input_t'),\n axis=2,\n )\n fc_input_dim += sum(extra_input_sizes)\n\n prev_t = brew.fc(\n model,\n fc_input,\n self.scope('prev_t'),\n dim_in=fc_input_dim,\n dim_out=4 * self.hidden_size,\n axis=2,\n )\n\n # defining initializers for MI parameters\n alpha = model.create_param(\n self.scope('alpha'),\n shape=[4 * self.hidden_size],\n initializer=Initializer('ConstantFill', value=1.0),\n )\n beta_h = model.create_param(\n self.scope('beta1'),\n shape=[4 * self.hidden_size],\n initializer=Initializer('ConstantFill', value=1.0),\n )\n beta_i = model.create_param(\n self.scope('beta2'),\n shape=[4 * self.hidden_size],\n initializer=Initializer('ConstantFill', value=1.0),\n )\n b = model.create_param(\n self.scope('b'),\n shape=[4 * self.hidden_size],\n initializer=Initializer('ConstantFill', value=0.0),\n )\n\n # alpha * input_t + beta_h\n # Shape: [1, batch_size, 4 * hidden_size]\n alpha_by_input_t_plus_beta_h = model.net.ElementwiseLinear(\n [input_t, alpha, beta_h],\n self.scope('alpha_by_input_t_plus_beta_h'),\n axis=2,\n )\n # (alpha * input_t + beta_h) * prev_t =\n # alpha * input_t * prev_t + beta_h * prev_t\n # Shape: [1, batch_size, 4 * hidden_size]\n alpha_by_input_t_plus_beta_h_by_prev_t = model.net.Mul(\n [alpha_by_input_t_plus_beta_h, prev_t],\n self.scope('alpha_by_input_t_plus_beta_h_by_prev_t')\n )\n # beta_i * input_t + b\n # Shape: [1, batch_size, 4 * hidden_size]\n beta_i_by_input_t_plus_b = model.net.ElementwiseLinear(\n [input_t, beta_i, b],\n self.scope('beta_i_by_input_t_plus_b'),\n axis=2,\n )\n # alpha * input_t * prev_t + beta_h * prev_t + beta_i * input_t + b\n # Shape: [1, batch_size, 4 * hidden_size]\n gates_t = brew.sum(\n model,\n [alpha_by_input_t_plus_beta_h_by_prev_t, beta_i_by_input_t_plus_b],\n self.scope('gates_t')\n )\n hidden_t, cell_t = model.net.LSTMUnit(\n [hidden_t_prev, cell_t_prev, gates_t, seq_lengths, timestep],\n [self.scope('hidden_t_intermediate'), self.scope('cell_t')],\n forget_bias=self.forget_bias,\n drop_states=self.drop_states,\n )\n model.net.AddExternalOutputs(\n cell_t,\n hidden_t,\n )\n if self.memory_optimization:\n self.recompute_blobs = [gates_t]\n return hidden_t, cell_t\n\n\nclass DropoutCell(RNNCell):\n '''\n Wraps arbitrary RNNCell, applying dropout to its output (but not to the\n recurrent connection for the corresponding state).\n '''\n\n def __init__(self, internal_cell, dropout_ratio=None, **kwargs):\n self.internal_cell = internal_cell\n self.dropout_ratio = dropout_ratio\n assert 'is_test' in kwargs, \"Argument 'is_test' is required\"\n self.is_test = kwargs.pop('is_test')\n super(DropoutCell, self).__init__(**kwargs)\n\n self.prepare_input = internal_cell.prepare_input\n self.get_output_state_index = internal_cell.get_output_state_index\n self.get_state_names = internal_cell.get_state_names\n self.get_output_dim = internal_cell.get_output_dim\n\n self.mask = 0\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs=None,\n ):\n return self.internal_cell._apply(\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs,\n )\n\n def _prepare_output(self, model, states):\n output = self.internal_cell._prepare_output(\n model,\n states,\n )\n if self.dropout_ratio is not None:\n output = self._apply_dropout(model, output)\n return output\n\n def _prepare_output_sequence(self, model, state_outputs):\n output = self.internal_cell._prepare_output_sequence(\n model,\n state_outputs,\n )\n if self.dropout_ratio is not None:\n output = self._apply_dropout(model, output)\n return output\n\n def _apply_dropout(self, model, output):\n if self.dropout_ratio and not self.forward_only:\n with core.NameScope(self.name or ''):\n output = brew.dropout(\n model,\n output,\n str(output) + '_with_dropout_mask{}'.format(self.mask),\n ratio=float(self.dropout_ratio),\n is_test=self.is_test,\n )\n self.mask += 1\n return output\n\n\nclass MultiRNNCellInitializer(object):\n def __init__(self, cells):\n self.cells = cells\n\n def create_states(self, model):\n states = []\n for cell in self.cells:\n with core.NameScope(cell.name):\n states.extend(cell.initializer.create_states(model))\n return states\n\nclass MultiRNNCell(RNNCell):\n '''\n Multilayer RNN via the composition of RNNCell instance.\n\n It is the resposibility of calling code to ensure the compatibility\n of the successive layers in terms of input/output dimensiality, etc.,\n and to ensure that their blobs do not have name conflicts, typically by\n creating the cells with names that specify layer number.\n\n Assumes first state (recurrent output) for each layer should be the input\n to the next layer.\n '''\n\n def __init__(self, cells, residual_output_layers=None, **kwargs):\n '''\n cells: list of RNNCell instances, from input to output side.\n\n name: string designating network component (for scoping)\n\n residual_output_layers: list of indices of layers whose input will\n be added elementwise to their output elementwise. (It is the\n responsibility of the client code to ensure shape compatibility.)\n Note that layer 0 (zero) cannot have residual output because of the\n timing of prepare_input().\n\n forward_only: used to construct inference-only network.\n '''\n super(MultiRNNCell, self).__init__(**kwargs)\n self.cells = cells\n\n if residual_output_layers is None:\n self.residual_output_layers = []\n else:\n self.residual_output_layers = residual_output_layers\n\n output_index_per_layer = []\n base_index = 0\n for cell in self.cells:\n output_index_per_layer.append(\n base_index + cell.get_output_state_index(),\n )\n base_index += len(cell.get_state_names())\n\n self.output_connected_layers = []\n self.output_indices = []\n for i in range(len(self.cells) - 1):\n if (i + 1) in self.residual_output_layers:\n self.output_connected_layers.append(i)\n self.output_indices.append(output_index_per_layer[i])\n else:\n self.output_connected_layers = []\n self.output_indices = []\n self.output_connected_layers.append(len(self.cells) - 1)\n self.output_indices.append(output_index_per_layer[-1])\n\n self.state_names = []\n for cell in self.cells:\n self.state_names.extend(cell.get_state_names())\n\n if len(self.state_names) != len(set(self.state_names)):\n duplicates = {\n state_name for state_name in self.state_names\n if self.state_names.count(state_name) > 1\n }\n raise RuntimeError(\n 'Duplicate state names in MultiRNNCell: {}'.format(\n list(duplicates),\n ),\n )\n\n self.initializer = MultiRNNCellInitializer(cells)\n\n def prepare_input(self, model, input_blob):\n return self.cells[0].prepare_input(model, input_blob)\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs=None,\n ):\n\n states_per_layer = [len(cell.get_state_names()) for cell in self.cells]\n assert len(states) == sum(states_per_layer)\n\n next_states = []\n states_index = 0\n\n layer_input = input_t\n for i, layer_cell in enumerate(self.cells):\n num_states = states_per_layer[i]\n layer_states = states[states_index:(states_index + num_states)]\n states_index += num_states\n\n if i > 0:\n prepared_input = layer_cell.prepare_input(model, layer_input)\n else:\n prepared_input = layer_input\n\n layer_next_states = layer_cell._apply(\n model,\n prepared_input,\n seq_lengths,\n layer_states,\n timestep,\n extra_inputs=(None if i > 0 else extra_inputs),\n )\n # Since we're using here non-public method _apply, instead of apply,\n # we have to manually extract output from states\n if i != len(self.cells) - 1:\n layer_output = layer_cell._prepare_output(\n model,\n layer_next_states,\n )\n if i > 0 and i in self.residual_output_layers:\n layer_input = brew.sum(\n model,\n [layer_output, layer_input],\n self.scope('residual_output_{}'.format(i)),\n )\n else:\n layer_input = layer_output\n\n next_states.extend(layer_next_states)\n return next_states\n\n def get_state_names(self):\n return self.state_names\n\n def get_output_state_index(self):\n index = 0\n for cell in self.cells[:-1]:\n index += len(cell.get_state_names())\n index += self.cells[-1].get_output_state_index()\n return index\n\n def _prepare_output(self, model, states):\n connected_outputs = []\n state_index = 0\n for i, cell in enumerate(self.cells):\n num_states = len(cell.get_state_names())\n if i in self.output_connected_layers:\n layer_states = states[state_index:state_index + num_states]\n layer_output = cell._prepare_output(\n model,\n layer_states\n )\n connected_outputs.append(layer_output)\n state_index += num_states\n if len(connected_outputs) > 1:\n output = brew.sum(\n model,\n connected_outputs,\n self.scope('residual_output'),\n )\n else:\n output = connected_outputs[0]\n return output\n\n def _prepare_output_sequence(self, model, states):\n connected_outputs = []\n state_index = 0\n for i, cell in enumerate(self.cells):\n num_states = 2 * len(cell.get_state_names())\n if i in self.output_connected_layers:\n layer_states = states[state_index:state_index + num_states]\n layer_output = cell._prepare_output_sequence(\n model,\n layer_states\n )\n connected_outputs.append(layer_output)\n state_index += num_states\n if len(connected_outputs) > 1:\n output = brew.sum(\n model,\n connected_outputs,\n self.scope('residual_output_sequence'),\n )\n else:\n output = connected_outputs[0]\n return output\n\n\nclass AttentionCell(RNNCell):\n\n def __init__(\n self,\n encoder_output_dim,\n encoder_outputs,\n encoder_lengths,\n decoder_cell,\n decoder_state_dim,\n attention_type,\n weighted_encoder_outputs,\n attention_memory_optimization,\n **kwargs\n ):\n super(AttentionCell, self).__init__(**kwargs)\n self.encoder_output_dim = encoder_output_dim\n self.encoder_outputs = encoder_outputs\n self.encoder_lengths = encoder_lengths\n self.decoder_cell = decoder_cell\n self.decoder_state_dim = decoder_state_dim\n self.weighted_encoder_outputs = weighted_encoder_outputs\n self.encoder_outputs_transposed = None\n assert attention_type in [\n AttentionType.Regular,\n AttentionType.Recurrent,\n AttentionType.Dot,\n AttentionType.SoftCoverage,\n ]\n self.attention_type = attention_type\n self.attention_memory_optimization = attention_memory_optimization\n\n def _apply(\n self,\n model,\n input_t,\n seq_lengths,\n states,\n timestep,\n extra_inputs=None,\n ):\n if self.attention_type == AttentionType.SoftCoverage:\n decoder_prev_states = states[:-2]\n attention_weighted_encoder_context_t_prev = states[-2]\n coverage_t_prev = states[-1]\n else:\n decoder_prev_states = states[:-1]\n attention_weighted_encoder_context_t_prev = states[-1]\n\n assert extra_inputs is None\n\n decoder_states = self.decoder_cell._apply(\n model,\n input_t,\n seq_lengths,\n decoder_prev_states,\n timestep,\n extra_inputs=[(\n attention_weighted_encoder_context_t_prev,\n self.encoder_output_dim,\n )],\n )\n\n self.hidden_t_intermediate = self.decoder_cell._prepare_output(\n model,\n decoder_states,\n )\n\n if self.attention_type == AttentionType.Recurrent:\n (\n attention_weighted_encoder_context_t,\n self.attention_weights_3d,\n attention_blobs,\n ) = apply_recurrent_attention(\n model=model,\n encoder_output_dim=self.encoder_output_dim,\n encoder_outputs_transposed=self.encoder_outputs_transposed,\n weighted_encoder_outputs=self.weighted_encoder_outputs,\n decoder_hidden_state_t=self.hidden_t_intermediate,\n decoder_hidden_state_dim=self.decoder_state_dim,\n scope=self.name,\n attention_weighted_encoder_context_t_prev=(\n attention_weighted_encoder_context_t_prev\n ),\n encoder_lengths=self.encoder_lengths,\n )\n elif self.attention_type == AttentionType.Regular:\n (\n attention_weighted_encoder_context_t,\n self.attention_weights_3d,\n attention_blobs,\n ) = apply_regular_attention(\n model=model,\n encoder_output_dim=self.encoder_output_dim,\n encoder_outputs_transposed=self.encoder_outputs_transposed,\n weighted_encoder_outputs=self.weighted_encoder_outputs,\n decoder_hidden_state_t=self.hidden_t_intermediate,\n decoder_hidden_state_dim=self.decoder_state_dim,\n scope=self.name,\n encoder_lengths=self.encoder_lengths,\n )\n elif self.attention_type == AttentionType.Dot:\n (\n attention_weighted_encoder_context_t,\n self.attention_weights_3d,\n attention_blobs,\n ) = apply_dot_attention(\n model=model,\n encoder_output_dim=self.encoder_output_dim,\n encoder_outputs_transposed=self.encoder_outputs_transposed,\n decoder_hidden_state_t=self.hidden_t_intermediate,\n decoder_hidden_state_dim=self.decoder_state_dim,\n scope=self.name,\n encoder_lengths=self.encoder_lengths,\n )\n elif self.attention_type == AttentionType.SoftCoverage:\n (\n attention_weighted_encoder_context_t,\n self.attention_weights_3d,\n attention_blobs,\n coverage_t,\n ) = apply_soft_coverage_attention(\n model=model,\n encoder_output_dim=self.encoder_output_dim,\n encoder_outputs_transposed=self.encoder_outputs_transposed,\n weighted_encoder_outputs=self.weighted_encoder_outputs,\n decoder_hidden_state_t=self.hidden_t_intermediate,\n decoder_hidden_state_dim=self.decoder_state_dim,\n scope=self.name,\n encoder_lengths=self.encoder_lengths,\n coverage_t_prev=coverage_t_prev,\n coverage_weights=self.coverage_weights,\n )\n else:\n raise Exception('Attention type {} not implemented'.format(\n self.attention_type\n ))\n\n if self.attention_memory_optimization:\n self.recompute_blobs.extend(attention_blobs)\n\n output = list(decoder_states) + [attention_weighted_encoder_context_t]\n if self.attention_type == AttentionType.SoftCoverage:\n output.append(coverage_t)\n\n output[self.decoder_cell.get_output_state_index()] = model.Copy(\n output[self.decoder_cell.get_output_state_index()],\n self.scope('hidden_t_external'),\n )\n model.net.AddExternalOutputs(*output)\n\n return output\n\n def get_attention_weights(self):\n # [batch_size, encoder_length, 1]\n return self.attention_weights_3d\n\n def prepare_input(self, model, input_blob):\n if self.encoder_outputs_transposed is None:\n self.encoder_outputs_transposed = brew.transpose(\n model,\n self.encoder_outputs,\n self.scope('encoder_outputs_transposed'),\n axes=[1, 2, 0],\n )\n if (\n self.weighted_encoder_outputs is None and\n self.attention_type != AttentionType.Dot\n ):\n self.weighted_encoder_outputs = brew.fc(\n model,\n self.encoder_outputs,\n self.scope('weighted_encoder_outputs'),\n dim_in=self.encoder_output_dim,\n dim_out=self.encoder_output_dim,\n axis=2,\n )\n\n return self.decoder_cell.prepare_input(model, input_blob)\n\n def build_initial_coverage(self, model):\n \"\"\"\n initial_coverage is always zeros of shape [encoder_length],\n which shape must be determined programmatically dureing network\n computation.\n\n This method also sets self.coverage_weights, a separate transform\n of encoder_outputs which is used to determine coverage contribution\n tp attention.\n \"\"\"\n assert self.attention_type == AttentionType.SoftCoverage\n\n # [encoder_length, batch_size, encoder_output_dim]\n self.coverage_weights = brew.fc(\n model,\n self.encoder_outputs,\n self.scope('coverage_weights'),\n dim_in=self.encoder_output_dim,\n dim_out=self.encoder_output_dim,\n axis=2,\n )\n\n encoder_length = model.net.Slice(\n model.net.Shape(self.encoder_outputs),\n starts=[0],\n ends=[1],\n )\n if (\n scope.CurrentDeviceScope() is not None and\n scope.CurrentDeviceScope().device_type == caffe2_pb2.CUDA\n ):\n encoder_length = model.net.CopyGPUToCPU(\n encoder_length,\n 'encoder_length_cpu',\n )\n # total attention weight applied across decoding steps_per_checkpoint\n # shape: [encoder_length]\n initial_coverage = model.net.ConstantFill(\n encoder_length,\n self.scope('initial_coverage'),\n value=0.0,\n input_as_shape=1,\n )\n return initial_coverage\n\n def get_state_names(self):\n state_names = list(self.decoder_cell.get_state_names())\n state_names[self.get_output_state_index()] = self.scope(\n 'hidden_t_external',\n )\n state_names.append(self.scope('attention_weighted_encoder_context_t'))\n if self.attention_type == AttentionType.SoftCoverage:\n state_names.append(self.scope('coverage_t'))\n return state_names\n\n def get_output_dim(self):\n return self.decoder_state_dim + self.encoder_output_dim\n\n def get_output_state_index(self):\n return self.decoder_cell.get_output_state_index()\n\n def _prepare_output(self, model, states):\n if self.attention_type == AttentionType.SoftCoverage:\n attention_context = states[-2]\n else:\n attention_context = states[-1]\n\n with core.NameScope(self.name or ''):\n output = brew.concat(\n model,\n [self.hidden_t_intermediate, attention_context],\n 'states_and_context_combination',\n axis=2,\n )\n\n return output\n\n def _prepare_output_sequence(self, model, state_outputs):\n if self.attention_type == AttentionType.SoftCoverage:\n decoder_state_outputs = state_outputs[:-4]\n else:\n decoder_state_outputs = state_outputs[:-2]\n\n decoder_output = self.decoder_cell._prepare_output_sequence(\n model,\n decoder_state_outputs,\n )\n\n if self.attention_type == AttentionType.SoftCoverage:\n attention_context_index = 2 * (len(self.get_state_names()) - 2)\n else:\n attention_context_index = 2 * (len(self.get_state_names()) - 1)\n\n with core.NameScope(self.name or ''):\n output = brew.concat(\n model,\n [\n decoder_output,\n state_outputs[attention_context_index],\n ],\n 'states_and_context_combination',\n axis=2,\n )\n return output\n\n\nclass LSTMWithAttentionCell(AttentionCell):\n\n def __init__(\n self,\n encoder_output_dim,\n encoder_outputs,\n encoder_lengths,\n decoder_input_dim,\n decoder_state_dim,\n name,\n attention_type,\n weighted_encoder_outputs,\n forget_bias,\n lstm_memory_optimization,\n attention_memory_optimization,\n forward_only=False,\n ):\n decoder_cell = LSTMCell(\n input_size=decoder_input_dim,\n hidden_size=decoder_state_dim,\n forget_bias=forget_bias,\n memory_optimization=lstm_memory_optimization,\n name='{}/decoder'.format(name),\n forward_only=False,\n drop_states=False,\n )\n super(LSTMWithAttentionCell, self).__init__(\n encoder_output_dim=encoder_output_dim,\n encoder_outputs=encoder_outputs,\n encoder_lengths=encoder_lengths,\n decoder_cell=decoder_cell,\n decoder_state_dim=decoder_state_dim,\n name=name,\n attention_type=attention_type,\n weighted_encoder_outputs=weighted_encoder_outputs,\n attention_memory_optimization=attention_memory_optimization,\n forward_only=forward_only,\n )\n\n\nclass MILSTMWithAttentionCell(AttentionCell):\n\n def __init__(\n self,\n encoder_output_dim,\n encoder_outputs,\n decoder_input_dim,\n decoder_state_dim,\n name,\n attention_type,\n weighted_encoder_outputs,\n forget_bias,\n lstm_memory_optimization,\n attention_memory_optimization,\n forward_only=False,\n ):\n decoder_cell = MILSTMCell(\n input_size=decoder_input_dim,\n hidden_size=decoder_state_dim,\n forget_bias=forget_bias,\n memory_optimization=lstm_memory_optimization,\n name='{}/decoder'.format(name),\n forward_only=False,\n drop_states=False,\n )\n super(MILSTMWithAttentionCell, self).__init__(\n encoder_output_dim=encoder_output_dim,\n encoder_outputs=encoder_outputs,\n decoder_cell=decoder_cell,\n decoder_state_dim=decoder_state_dim,\n name=name,\n attention_type=attention_type,\n weighted_encoder_outputs=weighted_encoder_outputs,\n attention_memory_optimization=attention_memory_optimization,\n forward_only=forward_only,\n )\n\n\ndef _LSTM(\n cell_class,\n model,\n input_blob,\n seq_lengths,\n initial_states,\n dim_in,\n dim_out,\n scope,\n outputs_with_grads=(0,),\n return_params=False,\n memory_optimization=False,\n forget_bias=0.0,\n forward_only=False,\n drop_states=False,\n return_last_layer_only=True,\n static_rnn_unroll_size=None,\n):\n '''\n Adds a standard LSTM recurrent network operator to a model.\n\n cell_class: LSTMCell or compatible subclass\n\n model: ModelHelper object new operators would be added to\n\n input_blob: the input sequence in a format T x N x D\n where T is sequence size, N - batch size and D - input dimension\n\n seq_lengths: blob containing sequence lengths which would be passed to\n LSTMUnit operator\n\n initial_states: a list of (2 * num_layers) blobs representing the initial\n hidden and cell states of each layer. If this argument is None,\n these states will be added to the model as network parameters.\n\n dim_in: input dimension\n\n dim_out: number of units per LSTM layer\n (use int for single-layer LSTM, list of ints for multi-layer)\n\n outputs_with_grads : position indices of output blobs for LAST LAYER which\n will receive external error gradient during backpropagation.\n These outputs are: (h_all, h_last, c_all, c_last)\n\n return_params: if True, will return a dictionary of parameters of the LSTM\n\n memory_optimization: if enabled, the LSTM step is recomputed on backward\n step so that we don't need to store forward activations for each\n timestep. Saves memory with cost of computation.\n\n forget_bias: forget gate bias (default 0.0)\n\n forward_only: whether to create a backward pass\n\n drop_states: drop invalid states, passed through to LSTMUnit operator\n\n return_last_layer_only: only return outputs from final layer\n (so that length of results does depend on number of layers)\n\n static_rnn_unroll_size: if not None, we will use static RNN which is\n unrolled into Caffe2 graph. The size of the unroll is the value of\n this parameter.\n '''\n if type(dim_out) is not list and type(dim_out) is not tuple:\n dim_out = [dim_out]\n num_layers = len(dim_out)\n\n cells = []\n for i in range(num_layers):\n name = scope + \"/layer_{}\".format(i) if num_layers > 1 else scope\n cell = cell_class(\n input_size=(dim_in if i == 0 else dim_out[i - 1]),\n hidden_size=dim_out[i],\n forget_bias=forget_bias,\n memory_optimization=memory_optimization,\n name=name,\n forward_only=forward_only,\n drop_states=drop_states,\n )\n cells.append(cell)\n\n cell = MultiRNNCell(\n cells,\n name=scope,\n forward_only=forward_only,\n ) if num_layers > 1 else cells[0]\n\n cell = (\n cell if static_rnn_unroll_size is None\n else UnrolledCell(cell, static_rnn_unroll_size))\n\n # outputs_with_grads argument indexes into final layer\n outputs_with_grads = [4 * (num_layers - 1) + i for i in outputs_with_grads]\n _, result = cell.apply_over_sequence(\n model=model,\n inputs=input_blob,\n seq_lengths=seq_lengths,\n initial_states=initial_states,\n outputs_with_grads=outputs_with_grads,\n )\n\n if return_last_layer_only:\n result = result[4 * (num_layers - 1):]\n if return_params:\n result = list(result) + [{\n 'input': cell.get_input_params(),\n 'recurrent': cell.get_recurrent_params(),\n }]\n return tuple(result)\n\n\nLSTM = functools.partial(_LSTM, LSTMCell)\nMILSTM = functools.partial(_LSTM, MILSTMCell)\n\n\nclass UnrolledCell(RNNCell):\n def __init__(self, cell, T):\n self.T = T\n self.cell = cell\n\n def apply_over_sequence(\n self,\n model,\n inputs,\n seq_lengths,\n initial_states,\n outputs_with_grads=None,\n ):\n inputs = self.cell.prepare_input(model, inputs)\n\n # Now they are blob references - outputs of splitting the input sequence\n split_inputs = model.net.Split(\n inputs,\n [str(inputs) + \"_timestep_{}\".format(i)\n for i in range(self.T)],\n axis=0)\n if self.T == 1:\n split_inputs = [split_inputs]\n\n states = initial_states\n all_states = []\n for t in range(0, self.T):\n scope_name = \"timestep_{}\".format(t)\n # Parameters of all timesteps are shared\n with ParameterSharing({scope_name: ''}),\\\n scope.NameScope(scope_name):\n timestep = model.param_init_net.ConstantFill(\n [], \"timestep\", value=t, shape=[1],\n dtype=core.DataType.INT32,\n device_option=core.DeviceOption(caffe2_pb2.CPU))\n states = self.cell._apply(\n model=model,\n input_t=split_inputs[t],\n seq_lengths=seq_lengths,\n states=states,\n timestep=timestep,\n )\n all_states.append(states)\n\n all_states = zip(*all_states)\n all_states = [\n model.net.Concat(\n list(full_output),\n [\n str(full_output[0])[len(\"timestep_0/\"):] + \"_concat\",\n str(full_output[0])[len(\"timestep_0/\"):] + \"_concat_info\"\n\n ],\n axis=0)[0]\n for full_output in all_states\n ]\n outputs = tuple(\n six.next(it) for it in\n itertools.cycle([iter(all_states), iter(states)])\n )\n outputs_without_grad = set(range(len(outputs))) - set(\n outputs_with_grads)\n for i in outputs_without_grad:\n model.net.ZeroGradient(outputs[i], [])\n logging.debug(\"Added 0 gradients for blobs:\",\n [outputs[i] for i in outputs_without_grad])\n\n final_output = self.cell._prepare_output_sequence(model, outputs)\n\n return final_output, outputs\n\n\ndef GetLSTMParamNames():\n weight_params = [\"input_gate_w\", \"forget_gate_w\", \"output_gate_w\", \"cell_w\"]\n bias_params = [\"input_gate_b\", \"forget_gate_b\", \"output_gate_b\", \"cell_b\"]\n return {'weights': weight_params, 'biases': bias_params}\n\n\ndef InitFromLSTMParams(lstm_pblobs, param_values):\n '''\n Set the parameters of LSTM based on predefined values\n '''\n weight_params = GetLSTMParamNames()['weights']\n bias_params = GetLSTMParamNames()['biases']\n for input_type in viewkeys(param_values):\n weight_values = [\n param_values[input_type][w].flatten()\n for w in weight_params\n ]\n wmat = np.array([])\n for w in weight_values:\n wmat = np.append(wmat, w)\n bias_values = [\n param_values[input_type][b].flatten()\n for b in bias_params\n ]\n bm = np.array([])\n for b in bias_values:\n bm = np.append(bm, b)\n\n weights_blob = lstm_pblobs[input_type]['weights']\n bias_blob = lstm_pblobs[input_type]['biases']\n cur_weight = workspace.FetchBlob(weights_blob)\n cur_biases = workspace.FetchBlob(bias_blob)\n\n workspace.FeedBlob(\n weights_blob,\n wmat.reshape(cur_weight.shape).astype(np.float32))\n workspace.FeedBlob(\n bias_blob,\n bm.reshape(cur_biases.shape).astype(np.float32))\n\n\ndef cudnn_LSTM(model, input_blob, initial_states, dim_in, dim_out,\n scope, recurrent_params=None, input_params=None,\n num_layers=1, return_params=False):\n '''\n CuDNN version of LSTM for GPUs.\n input_blob Blob containing the input. Will need to be available\n when param_init_net is run, because the sequence lengths\n and batch sizes will be inferred from the size of this\n blob.\n initial_states tuple of (hidden_init, cell_init) blobs\n dim_in input dimensions\n dim_out output/hidden dimension\n scope namescope to apply\n recurrent_params dict of blobs containing values for recurrent\n gate weights, biases (if None, use random init values)\n See GetLSTMParamNames() for format.\n input_params dict of blobs containing values for input\n gate weights, biases (if None, use random init values)\n See GetLSTMParamNames() for format.\n num_layers number of LSTM layers\n return_params if True, returns (param_extract_net, param_mapping)\n where param_extract_net is a net that when run, will\n populate the blobs specified in param_mapping with the\n current gate weights and biases (input/recurrent).\n Useful for assigning the values back to non-cuDNN\n LSTM.\n '''\n with core.NameScope(scope):\n weight_params = GetLSTMParamNames()['weights']\n bias_params = GetLSTMParamNames()['biases']\n\n input_weight_size = dim_out * dim_in\n upper_layer_input_weight_size = dim_out * dim_out\n recurrent_weight_size = dim_out * dim_out\n input_bias_size = dim_out\n recurrent_bias_size = dim_out\n\n def init(layer, pname, input_type):\n input_weight_size_for_layer = input_weight_size if layer == 0 else \\\n upper_layer_input_weight_size\n if pname in weight_params:\n sz = input_weight_size_for_layer if input_type == 'input' \\\n else recurrent_weight_size\n elif pname in bias_params:\n sz = input_bias_size if input_type == 'input' \\\n else recurrent_bias_size\n else:\n assert False, \"unknown parameter type {}\".format(pname)\n return model.param_init_net.UniformFill(\n [],\n \"lstm_init_{}_{}_{}\".format(input_type, pname, layer),\n shape=[sz])\n\n # Multiply by 4 since we have 4 gates per LSTM unit\n first_layer_sz = input_weight_size + recurrent_weight_size + \\\n input_bias_size + recurrent_bias_size\n upper_layer_sz = upper_layer_input_weight_size + \\\n recurrent_weight_size + input_bias_size + \\\n recurrent_bias_size\n total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)\n\n weights = model.create_param(\n 'lstm_weight',\n shape=[total_sz],\n initializer=Initializer('UniformFill'),\n tags=ParameterTags.WEIGHT,\n )\n\n lstm_args = {\n 'hidden_size': dim_out,\n 'rnn_mode': 'lstm',\n 'bidirectional': 0, # TODO\n 'dropout': 1.0, # TODO\n 'input_mode': 'linear', # TODO\n 'num_layers': num_layers,\n 'engine': 'CUDNN'\n }\n\n param_extract_net = core.Net(\"lstm_param_extractor\")\n param_extract_net.AddExternalInputs([input_blob, weights])\n param_extract_mapping = {}\n\n # Populate the weights-blob from blobs containing parameters for\n # the individual components of the LSTM, such as forget/input gate\n # weights and bises. Also, create a special param_extract_net that\n # can be used to grab those individual params from the black-box\n # weights blob. These results can be then fed to InitFromLSTMParams()\n for input_type in ['input', 'recurrent']:\n param_extract_mapping[input_type] = {}\n p = recurrent_params if input_type == 'recurrent' else input_params\n if p is None:\n p = {}\n for pname in weight_params + bias_params:\n for j in range(0, num_layers):\n values = p[pname] if pname in p else init(j, pname, input_type)\n model.param_init_net.RecurrentParamSet(\n [input_blob, weights, values],\n weights,\n layer=j,\n input_type=input_type,\n param_type=pname,\n **lstm_args\n )\n if pname not in param_extract_mapping[input_type]:\n param_extract_mapping[input_type][pname] = {}\n b = param_extract_net.RecurrentParamGet(\n [input_blob, weights],\n [\"lstm_{}_{}_{}\".format(input_type, pname, j)],\n layer=j,\n input_type=input_type,\n param_type=pname,\n **lstm_args\n )\n param_extract_mapping[input_type][pname][j] = b\n\n (hidden_input_blob, cell_input_blob) = initial_states\n output, hidden_output, cell_output, rnn_scratch, dropout_states = \\\n model.net.Recurrent(\n [input_blob, hidden_input_blob, cell_input_blob, weights],\n [\"lstm_output\", \"lstm_hidden_output\", \"lstm_cell_output\",\n \"lstm_rnn_scratch\", \"lstm_dropout_states\"],\n seed=random.randint(0, 100000), # TODO: dropout seed\n **lstm_args\n )\n model.net.AddExternalOutputs(\n hidden_output, cell_output, rnn_scratch, dropout_states)\n\n if return_params:\n param_extract = param_extract_net, param_extract_mapping\n return output, hidden_output, cell_output, param_extract\n else:\n return output, hidden_output, cell_output\n\n\ndef LSTMWithAttention(\n model,\n decoder_inputs,\n decoder_input_lengths,\n initial_decoder_hidden_state,\n initial_decoder_cell_state,\n initial_attention_weighted_encoder_context,\n encoder_output_dim,\n encoder_outputs,\n encoder_lengths,\n decoder_input_dim,\n decoder_state_dim,\n scope,\n attention_type=AttentionType.Regular,\n outputs_with_grads=(0, 4),\n weighted_encoder_outputs=None,\n lstm_memory_optimization=False,\n attention_memory_optimization=False,\n forget_bias=0.0,\n forward_only=False,\n):\n '''\n Adds a LSTM with attention mechanism to a model.\n\n The implementation is based on https://arxiv.org/abs/1409.0473, with\n a small difference in the order\n how we compute new attention context and new hidden state, similarly to\n https://arxiv.org/abs/1508.04025.\n\n The model uses encoder-decoder naming conventions,\n where the decoder is the sequence the op is iterating over,\n while computing the attention context over the encoder.\n\n model: ModelHelper object new operators would be added to\n\n decoder_inputs: the input sequence in a format T x N x D\n where T is sequence size, N - batch size and D - input dimension\n\n decoder_input_lengths: blob containing sequence lengths\n which would be passed to LSTMUnit operator\n\n initial_decoder_hidden_state: initial hidden state of LSTM\n\n initial_decoder_cell_state: initial cell state of LSTM\n\n initial_attention_weighted_encoder_context: initial attention context\n\n encoder_output_dim: dimension of encoder outputs\n\n encoder_outputs: the sequence, on which we compute the attention context\n at every iteration\n\n encoder_lengths: a tensor with lengths of each encoder sequence in batch\n (may be None, meaning all encoder sequences are of same length)\n\n decoder_input_dim: input dimension (last dimension on decoder_inputs)\n\n decoder_state_dim: size of hidden states of LSTM\n\n attention_type: One of: AttentionType.Regular, AttentionType.Recurrent.\n Determines which type of attention mechanism to use.\n\n outputs_with_grads : position indices of output blobs which will receive\n external error gradient during backpropagation\n\n weighted_encoder_outputs: encoder outputs to be used to compute attention\n weights. In the basic case it's just linear transformation of\n encoder outputs (that the default, when weighted_encoder_outputs is None).\n However, it can be something more complicated - like a separate\n encoder network (for example, in case of convolutional encoder)\n\n lstm_memory_optimization: recompute LSTM activations on backward pass, so\n we don't need to store their values in forward passes\n\n attention_memory_optimization: recompute attention for backward pass\n\n forward_only: whether to create only forward pass\n '''\n cell = LSTMWithAttentionCell(\n encoder_output_dim=encoder_output_dim,\n encoder_outputs=encoder_outputs,\n encoder_lengths=encoder_lengths,\n decoder_input_dim=decoder_input_dim,\n decoder_state_dim=decoder_state_dim,\n name=scope,\n attention_type=attention_type,\n weighted_encoder_outputs=weighted_encoder_outputs,\n forget_bias=forget_bias,\n lstm_memory_optimization=lstm_memory_optimization,\n attention_memory_optimization=attention_memory_optimization,\n forward_only=forward_only,\n )\n initial_states = [\n initial_decoder_hidden_state,\n initial_decoder_cell_state,\n initial_attention_weighted_encoder_context,\n ]\n if attention_type == AttentionType.SoftCoverage:\n initial_states.append(cell.build_initial_coverage(model))\n _, result = cell.apply_over_sequence(\n model=model,\n inputs=decoder_inputs,\n seq_lengths=decoder_input_lengths,\n initial_states=initial_states,\n outputs_with_grads=outputs_with_grads,\n )\n return result\n\n\ndef _layered_LSTM(\n model, input_blob, seq_lengths, initial_states,\n dim_in, dim_out, scope, outputs_with_grads=(0,), return_params=False,\n memory_optimization=False, forget_bias=0.0, forward_only=False,\n drop_states=False, create_lstm=None):\n params = locals() # leave it as a first line to grab all params\n params.pop('create_lstm')\n if not isinstance(dim_out, list):\n return create_lstm(**params)\n elif len(dim_out) == 1:\n params['dim_out'] = dim_out[0]\n return create_lstm(**params)\n\n assert len(dim_out) != 0, \"dim_out list can't be empty\"\n assert return_params is False, \"return_params not supported for layering\"\n for i, output_dim in enumerate(dim_out):\n params.update({\n 'dim_out': output_dim\n })\n output, last_output, all_states, last_state = create_lstm(**params)\n params.update({\n 'input_blob': output,\n 'dim_in': output_dim,\n 'initial_states': (last_output, last_state),\n 'scope': scope + '_layer_{}'.format(i + 1)\n })\n return output, last_output, all_states, last_state\n\n\nlayered_LSTM = functools.partial(_layered_LSTM, create_lstm=LSTM)\n"
] | [
[
"numpy.append",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Holmes-Alan/See360 | [
"24853246c126f883544bd618d622033f7a82c214",
"24853246c126f883544bd618d622033f7a82c214"
] | [
"test_demo.py",
"main_multiview_real.py"
] | [
"from __future__ import print_function\r\nimport argparse\r\nfrom math import log10\r\n\r\nimport os\r\nimport torch\r\nimport torch.nn as nn\r\nfrom PIL import Image, ImageEnhance\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom modules import *\r\nimport torchvision.transforms as transforms\r\nimport socket\r\nimport numpy as np\r\nfrom datasets import get_theta\r\nfrom util import PSNR, SSIM, rgb2ycbcr\r\nfrom os.path import join\r\nimport time\r\nimport json\r\nimport lpips\r\n\r\n\r\n# Training settings\r\nparser = argparse.ArgumentParser(description='PyTorch See360 model')\r\nparser.add_argument('--batchSize', type=int, default=64, help='training batch size')\r\nparser.add_argument('--gpu_mode', type=bool, default=True)\r\nparser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')\r\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\r\nparser.add_argument('--gpus', default=2, type=int, help='number of gpu')\r\nparser.add_argument('--data_dir', type=str, default='./data/real_world')\r\nparser.add_argument('--test_set', type=str, default='British') # British, Louvre, Manhattan, Parliament\r\nparser.add_argument('--save_dir', default='result/', help='Location to save checkpoint models')\r\nparser.add_argument('--log_folder', default='record/', help='Location to save checkpoint models')\r\n\r\n\r\nopt = parser.parse_args()\r\ngpus_list = range(opt.gpus)\r\nhostname = str(socket.gethostname())\r\ncudnn.benchmark = True\r\nprint(opt)\r\n\r\n\r\ntransform = transforms.Compose([\r\n transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]\r\n # transforms.Lambda(lambda x: x.mul(255))\r\n # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\r\n]\r\n)\r\n\r\n\r\ncuda = opt.gpu_mode\r\nif cuda and not torch.cuda.is_available():\r\n raise Exception(\"No GPU found, please run without --cuda\")\r\n\r\ntorch.manual_seed(opt.seed)\r\nif cuda:\r\n torch.cuda.manual_seed(opt.seed)\r\n\r\nprint('===> Loading datasets')\r\n\r\n\r\nfile_path = join(opt.data_dir, opt.test_set)\r\nsave_path = join(opt.save_dir, opt.test_set)\r\n\r\n\r\nprint('===> Building model ')\r\n\r\nmodel = generator_final_v2(input_dim=3, dim=64)\r\n\r\n\r\nmodel = torch.nn.DataParallel(model)\r\n\r\nloss_fn_alex = lpips.LPIPS(net='alex') # best forward scores\r\n\r\nif cuda:\r\n model = model.cuda(gpus_list[0])\r\n loss_fn_alex = loss_fn_alex.cuda(gpus_list[0])\r\n\r\nmodel.eval()\r\nloss_fn_alex.eval()\r\n\r\ndef eval(angle_left, angle_right, angle_target):\r\n\r\n model_name = 'models/' + opt.test_set + '/GAN_final.pth'\r\n\r\n if os.path.exists(model_name):\r\n model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))\r\n # print('===> read model as: ', model_name)\r\n\r\n theta = int(12 / np.abs(angle_right - angle_left) * (angle_target - angle_left))\r\n code = F.one_hot(torch.tensor(theta), num_classes=12).float()\r\n\r\n if angle_right == 360:\r\n angle_right = 0\r\n img1 = Image.open(file_path + '/' + 'img1_crop.png').convert('RGB')\r\n img2 = Image.open(file_path + '/' + 'img2_crop.png').convert('RGB')\r\n\r\n img1 = transform(img1).unsqueeze(0)\r\n img2 = transform(img2).unsqueeze(0)\r\n\r\n img1 = img1.cuda(gpus_list[0])\r\n img2 = img2.cuda(gpus_list[0])\r\n\r\n with torch.no_grad():\r\n img1 = 2.0 * (img1 - 0.5)\r\n img2 = 2.0 * (img2 - 0.5)\r\n\r\n code = code.view(img1.shape[0], 12).cuda()\r\n\r\n output = model(img1, img2, code)\r\n\r\n output = output * 0.5 + 0.5\r\n\r\n output = output.data[0].cpu().permute(1, 2, 0)\r\n\r\n output = output * 255\r\n output = output.clamp(0, 255)\r\n\r\n out_name = save_path + '/' + str(angle_target) + '.png'\r\n Image.fromarray(np.uint8(output)).save(out_name)\r\n\r\n\r\n\r\n##########\r\n## Done ##\r\n##########\r\n\r\n##Eval Start!!!!\r\neval(angle_left=0, angle_right=60, angle_target=30)\r\n\r\n\r\n\r\n\r\n",
"from __future__ import print_function\r\nimport argparse\r\nfrom math import log10\r\n\r\nimport os\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.backends.cudnn as cudnn\r\nfrom laploss import LapLoss\r\n# from tensorboardX import SummaryWriter\r\nfrom torch.utils.data import DataLoader\r\nfrom modules import generator_final_v2, discriminator_v2, PDLoss\r\nfrom transformer_net import VGG19\r\nimport numpy as np\r\nfrom data import get_training_set_real\r\nimport torch.nn.functional as F\r\nimport socket\r\nfrom pytorch_ssim import SSIM as pytorch_ssim\r\n\r\n\r\n# Training settings\r\nparser = argparse.ArgumentParser(description='PyTorch See360 real scene training code')\r\nparser.add_argument('--upscale_factor', type=int, default=1, help=\"super resolution upscale factor\")\r\nparser.add_argument('--batchSize', type=int, default=6, help='training batch size')\r\nparser.add_argument('--pretrained_iter', type=int, default=1000, help='number of epochs to train for')\r\nparser.add_argument('--nEpochs', type=int, default=2000, help='number of epochs to train for')\r\nparser.add_argument('--snapshots', type=int, default=5, help='Snapshots')\r\nparser.add_argument('--start_iter', type=int, default=1, help='Starting Epoch')\r\nparser.add_argument('--lr', type=float, default=1e-4, help='Learning Rate. Default=0.0001')\r\nparser.add_argument('--gpu_mode', type=bool, default=True)\r\nparser.add_argument('--threads', type=int, default=6, help='number of threads for data loader to use')\r\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\r\nparser.add_argument('--gpus', default=2, type=int, help='number of gpu')\r\nparser.add_argument('--data_dir', type=str, default='./data/360HungHom')\r\nparser.add_argument('--data_augmentation', type=bool, default=True)\r\nparser.add_argument('--patch_size', type=int, default=128, help='Size of cropped LR image')\r\nparser.add_argument('--pretrained_G', default='3D/GAN_generator_1345.pth', help='sr pretrained base model')\r\nparser.add_argument('--pretrained_D', default='GAN_discriminator_0.pth', help='sr pretrained base model')\r\nparser.add_argument('--model_type', default='GAN', help='model name')\r\nparser.add_argument('--pretrained', type=bool, default=True)\r\nparser.add_argument('--save_folder', default='models/', help='Location to save checkpoint models')\r\nparser.add_argument('--log_folder', default='logs/', help='Location to save checkpoint models')\r\n\r\nopt = parser.parse_args()\r\ngpus_list = range(opt.gpus)\r\nhostname = str(socket.gethostname())\r\ncudnn.benchmark = True\r\nprint(opt)\r\n\r\nclass TVLoss(nn.Module):\r\n def forward(self, x):\r\n batch_size = x.size()[0]\r\n h_x = x.size()[2]\r\n w_x = x.size()[3]\r\n count_h = self._tensor_size(x[:, :, 1:, :])\r\n count_w = self._tensor_size(x[:, :, :, 1:])\r\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\r\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\r\n return 2 * (h_tv / count_h + w_tv / count_w) / batch_size\r\n\r\n def _tensor_size(self, t):\r\n return t.size()[1] * t.size()[2] * t.size()[3]\r\n\r\n\r\n\r\ndef train(epoch):\r\n G_epoch_loss = 0\r\n D_epoch_loss = 0\r\n adv_epoch_loss = 0\r\n recon_epoch_loss = 0\r\n G.train()\r\n D.train()\r\n\r\n vgg_weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]\r\n\r\n for iteration, batch in enumerate(training_data_loader, 1):\r\n left_img, right_img, target_img, mask_img = batch[0], batch[1], batch[2], batch[3]\r\n x, y, code = batch[4], batch[5], batch[6]\r\n minibatch = left_img.size()[0]\r\n real_label = torch.ones((minibatch, 1680))\r\n fake_label = torch.zeros((minibatch, 1680))\r\n # code = F.one_hot(theta, num_classes=12)\r\n if cuda:\r\n left_img = left_img.cuda(gpus_list[0])\r\n right_img = right_img.cuda(gpus_list[0])\r\n target_img = target_img.cuda(gpus_list[0])\r\n mask_img = mask_img.cuda(gpus_list[0])\r\n code = code.cuda(gpus_list[0])\r\n real_label = real_label.cuda(gpus_list[0])\r\n fake_label = fake_label.cuda(gpus_list[0])\r\n\r\n target_img = 2.0 * (target_img - 0.5)\r\n left_img = 2.0 * (left_img - 0.5)\r\n right_img = 2.0 * (right_img - 0.5)\r\n mask_img = 2.0 * (mask_img - 0.5)\r\n\r\n # Reset gradient\r\n for p in D.parameters():\r\n p.requires_grad = False\r\n\r\n G_optimizer.zero_grad()\r\n\r\n predict = G(left_img, right_img, code)\r\n\r\n PD = torch.mean(PD_loss(predict * 0.5 + 0.5, target_img * 0.5 + 0.5))\r\n\r\n D_fake_feat, D_fake_decision = D(predict, mask_img, left_img, right_img)\r\n D_real_feat, D_real_decision = D(target_img, mask_img, left_img, right_img)\r\n\r\n GAN_loss = L1_loss(D_fake_decision, real_label)\r\n\r\n recon_loss = lap_loss(predict, target_img) \r\n\r\n ssim_loss = 1 - ssim(predict, target_img) \r\n\r\n GAN_feat_loss = L1_loss(D_real_feat.detach(), D_fake_feat)\r\n\r\n\r\n G_loss = 1*recon_loss + 1*ssim_loss + 1*PD + 1*GAN_loss + 1*GAN_feat_loss\r\n\r\n G_loss.backward()\r\n G_optimizer.step()\r\n\r\n # Reset gradient\r\n for p in D.parameters():\r\n p.requires_grad = True\r\n\r\n D_optimizer.zero_grad()\r\n\r\n _, D_fake_decision = D(predict.detach(), mask_img.detach(), left_img.detach(), right_img.detach())\r\n _, D_real_decision = D(target_img, mask_img, left_img, right_img)\r\n\r\n real = real_label * np.random.uniform(0.7, 1.2)\r\n fake = fake_label + np.random.uniform(0.0, 0.3)\r\n\r\n\r\n Dis_loss = (L1_loss(D_real_decision, real)\r\n + L1_loss(D_fake_decision, fake)) / 2.0\r\n\r\n # Back propagation\r\n D_loss = Dis_loss\r\n D_loss.backward()\r\n D_optimizer.step()\r\n\r\n # log\r\n G_epoch_loss += G_loss.data\r\n D_epoch_loss += D_loss.data\r\n adv_epoch_loss += (GAN_loss.data)\r\n recon_epoch_loss += (recon_loss.data)\r\n\r\n\r\n print(\r\n \"===> Epoch[{}]({}/{}): G_loss: {:.4f} || D_loss: {:.4f} \"\r\n \"|| Adv: {:.4f} || Recon_Loss: {:.4f} || ssim_loss: {:.4f}\"\r\n \"|| GAN_feat_loss: {:.4f} || PD_loss: {:.4f}\".format(\r\n epoch, iteration,\r\n len(training_data_loader), G_loss.data, D_loss.data,\r\n GAN_loss.data, recon_loss.data, ssim_loss.data,\r\n GAN_feat_loss.data, PD.data))\r\n print(\r\n \"===> Epoch {} Complete: Avg. G_loss: {:.4f} D_loss: {:.4f} Recon_loss: {:.4f} Adv: {:.4f}\".format(\r\n epoch, G_epoch_loss / len(training_data_loader), D_epoch_loss / len(training_data_loader),\r\n recon_epoch_loss / len(training_data_loader),\r\n adv_epoch_loss / len(training_data_loader)))\r\n\r\n\r\ndef print_network(net):\r\n num_params = 0\r\n for param in net.parameters():\r\n num_params += param.numel()\r\n # print(net)\r\n print('Total number of parameters: %d' % num_params)\r\n\r\n\r\ndef checkpoint(epoch):\r\n model_out_G = opt.save_folder + opt.model_type + \"_generator_{}.pth\".format(epoch)\r\n model_out_D = opt.save_folder + opt.model_type + \"_discriminator_{}.pth\".format(epoch)\r\n torch.save(G.state_dict(), model_out_G)\r\n torch.save(D.state_dict(), model_out_D)\r\n print(\"Checkpoint saved to {} and {}\".format(model_out_G, model_out_D))\r\n\r\n\r\ncuda = opt.gpu_mode\r\nif cuda and not torch.cuda.is_available():\r\n raise Exception(\"No GPU found, please run without --cuda\")\r\n\r\ntorch.manual_seed(opt.seed)\r\nif cuda:\r\n torch.cuda.manual_seed(opt.seed)\r\n\r\nprint('===> Loading datasets')\r\ntrain_set = get_training_set_real(opt.data_dir, opt.data_augmentation)\r\ntraining_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)\r\n\r\nprint('===> Building model')\r\n\r\n\r\nG = generator_final_v2(input_dim=3, dim=64)\r\nD = discriminator_v2(num_channels=3, base_filter=64)\r\nVGG = VGG19()\r\nPD_loss = PDLoss(l1_lambda=1.5, w_lambda=0.01)\r\n\r\n\r\nG = torch.nn.DataParallel(G, device_ids=gpus_list)\r\nD = torch.nn.DataParallel(D, device_ids=gpus_list)\r\nVGG = torch.nn.DataParallel(VGG, device_ids=gpus_list)\r\nPD_loss = torch.nn.DataParallel(PD_loss, device_ids=gpus_list)\r\n\r\n\r\nL1_loss = nn.L1Loss()\r\nlap_loss = LapLoss(max_levels=5, k_size=5, sigma=2.0)\r\nBCE_loss = nn.BCEWithLogitsLoss()\r\nL2_loss = nn.MSELoss()\r\nssim = pytorch_ssim()\r\n\r\n\r\nprint('---------- Generator architecture -------------')\r\nprint_network(G)\r\nprint('---------- Discriminator architecture -------------')\r\nprint_network(D)\r\nprint('----------------------------------------------')\r\n\r\n\r\nif opt.pretrained:\r\n model_G = os.path.join(opt.save_folder + opt.pretrained_G)\r\n model_D = os.path.join(opt.save_folder + opt.pretrained_D)\r\n if os.path.exists(model_G):\r\n # G.load_state_dict(torch.load(model_G, map_location=lambda storage, loc: storage))\r\n pretrained_dict = torch.load(model_G, map_location=lambda storage, loc: storage)\r\n model_dict = G.state_dict()\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n model_dict.update(pretrained_dict)\r\n G.load_state_dict(model_dict)\r\n print('Pre-trained Generator model is loaded.')\r\n if os.path.exists(model_D):\r\n D.load_state_dict(torch.load(model_D, map_location=lambda storage, loc: storage))\r\n print('Pre-trained Discriminator model is loaded.')\r\n\r\n\r\n\r\nif cuda:\r\n G = G.cuda(gpus_list[0])\r\n D = D.cuda(gpus_list[0])\r\n PD_loss = PD_loss.cuda(gpus_list[0])\r\n VGG = VGG.cuda(gpus_list[0])\r\n L1_loss = L1_loss.cuda(gpus_list[0])\r\n BCE_loss = BCE_loss.cuda(gpus_list[0])\r\n L2_loss = L2_loss.cuda(gpus_list[0])\r\n lap_loss = lap_loss.cuda(gpus_list[0])\r\n ssim = ssim.cuda(gpus_list[0])\r\n\r\n\r\nG_optimizer = optim.Adam(G.parameters(), lr=opt.lr, betas=(0.5, 0.999), eps=1e-8)\r\nD_optimizer = optim.Adam(D.parameters(), lr=opt.lr, betas=(0.5, 0.999), eps=1e-8)\r\n\r\n\r\n# writer = SummaryWriter(opt.log_folder)\r\nfor epoch in range(opt.start_iter, opt.nEpochs + 1):\r\n train(epoch)\r\n\r\n if epoch % (opt.snapshots) == 0:\r\n checkpoint(epoch)"
] | [
[
"numpy.abs",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"numpy.uint8",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel"
],
[
"torch.nn.MSELoss",
"torch.ones",
"torch.cuda.manual_seed",
"torch.zeros",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.pow",
"torch.cuda.is_available",
"numpy.random.uniform",
"torch.nn.DataParallel",
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jminsk-cc/xarray | [
"48c680f8e631b0786989356e8360968bef551195"
] | [
"xarray/tests/test_combine.py"
] | [
"from collections import OrderedDict\nfrom datetime import datetime\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom xarray import (\n DataArray,\n Dataset,\n auto_combine,\n combine_by_coords,\n combine_nested,\n concat,\n)\nfrom xarray.core import dtypes\nfrom xarray.core.combine import (\n _check_shape_tile_ids,\n _combine_all_along_first_dim,\n _combine_nd,\n _infer_concat_order_from_coords,\n _infer_concat_order_from_positions,\n _new_tile_id,\n)\n\nfrom . import assert_equal, assert_identical, raises_regex\nfrom .test_dataset import create_test_data\n\n\ndef assert_combined_tile_ids_equal(dict1, dict2):\n assert len(dict1) == len(dict2)\n for k, v in dict1.items():\n assert k in dict2.keys()\n assert_equal(dict1[k], dict2[k])\n\n\nclass TestTileIDsFromNestedList:\n def test_1d(self):\n ds = create_test_data\n input = [ds(0), ds(1)]\n\n expected = {(0,): ds(0), (1,): ds(1)}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_2d(self):\n ds = create_test_data\n input = [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]]\n\n expected = {\n (0, 0): ds(0),\n (0, 1): ds(1),\n (1, 0): ds(2),\n (1, 1): ds(3),\n (2, 0): ds(4),\n (2, 1): ds(5),\n }\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_3d(self):\n ds = create_test_data\n input = [\n [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]],\n [[ds(6), ds(7)], [ds(8), ds(9)], [ds(10), ds(11)]],\n ]\n\n expected = {\n (0, 0, 0): ds(0),\n (0, 0, 1): ds(1),\n (0, 1, 0): ds(2),\n (0, 1, 1): ds(3),\n (0, 2, 0): ds(4),\n (0, 2, 1): ds(5),\n (1, 0, 0): ds(6),\n (1, 0, 1): ds(7),\n (1, 1, 0): ds(8),\n (1, 1, 1): ds(9),\n (1, 2, 0): ds(10),\n (1, 2, 1): ds(11),\n }\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_single_dataset(self):\n ds = create_test_data(0)\n input = [ds]\n\n expected = {(0,): ds}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_redundant_nesting(self):\n ds = create_test_data\n input = [[ds(0)], [ds(1)]]\n\n expected = {(0, 0): ds(0), (1, 0): ds(1)}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_ignore_empty_list(self):\n ds = create_test_data(0)\n input = [ds, []]\n expected = {(0,): ds}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_uneven_depth_input(self):\n # Auto_combine won't work on ragged input\n # but this is just to increase test coverage\n ds = create_test_data\n input = [ds(0), [ds(1), ds(2)]]\n\n expected = {(0,): ds(0), (1, 0): ds(1), (1, 1): ds(2)}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_uneven_length_input(self):\n # Auto_combine won't work on ragged input\n # but this is just to increase test coverage\n ds = create_test_data\n input = [[ds(0)], [ds(1), ds(2)]]\n\n expected = {(0, 0): ds(0), (1, 0): ds(1), (1, 1): ds(2)}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n def test_infer_from_datasets(self):\n ds = create_test_data\n input = [ds(0), ds(1)]\n\n expected = {(0,): ds(0), (1,): ds(1)}\n actual = _infer_concat_order_from_positions(input)\n assert_combined_tile_ids_equal(expected, actual)\n\n\nclass TestTileIDsFromCoords:\n def test_1d(self):\n ds0 = Dataset({\"x\": [0, 1]})\n ds1 = Dataset({\"x\": [2, 3]})\n\n expected = {(0,): ds0, (1,): ds1}\n actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"x\"]\n\n def test_2d(self):\n ds0 = Dataset({\"x\": [0, 1], \"y\": [10, 20, 30]})\n ds1 = Dataset({\"x\": [2, 3], \"y\": [10, 20, 30]})\n ds2 = Dataset({\"x\": [0, 1], \"y\": [40, 50, 60]})\n ds3 = Dataset({\"x\": [2, 3], \"y\": [40, 50, 60]})\n ds4 = Dataset({\"x\": [0, 1], \"y\": [70, 80, 90]})\n ds5 = Dataset({\"x\": [2, 3], \"y\": [70, 80, 90]})\n\n expected = {\n (0, 0): ds0,\n (1, 0): ds1,\n (0, 1): ds2,\n (1, 1): ds3,\n (0, 2): ds4,\n (1, 2): ds5,\n }\n actual, concat_dims = _infer_concat_order_from_coords(\n [ds1, ds0, ds3, ds5, ds2, ds4]\n )\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"x\", \"y\"]\n\n def test_no_dimension_coords(self):\n ds0 = Dataset({\"foo\": (\"x\", [0, 1])})\n ds1 = Dataset({\"foo\": (\"x\", [2, 3])})\n with raises_regex(ValueError, \"Could not find any dimension\"):\n _infer_concat_order_from_coords([ds1, ds0])\n\n def test_coord_not_monotonic(self):\n ds0 = Dataset({\"x\": [0, 1]})\n ds1 = Dataset({\"x\": [3, 2]})\n with raises_regex(\n ValueError,\n \"Coordinate variable x is neither \" \"monotonically increasing nor\",\n ):\n _infer_concat_order_from_coords([ds1, ds0])\n\n def test_coord_monotonically_decreasing(self):\n ds0 = Dataset({\"x\": [3, 2]})\n ds1 = Dataset({\"x\": [1, 0]})\n\n expected = {(0,): ds0, (1,): ds1}\n actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"x\"]\n\n def test_no_concatenation_needed(self):\n ds = Dataset({\"foo\": (\"x\", [0, 1])})\n expected = {(): ds}\n actual, concat_dims = _infer_concat_order_from_coords([ds])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == []\n\n def test_2d_plus_bystander_dim(self):\n ds0 = Dataset({\"x\": [0, 1], \"y\": [10, 20, 30], \"t\": [0.1, 0.2]})\n ds1 = Dataset({\"x\": [2, 3], \"y\": [10, 20, 30], \"t\": [0.1, 0.2]})\n ds2 = Dataset({\"x\": [0, 1], \"y\": [40, 50, 60], \"t\": [0.1, 0.2]})\n ds3 = Dataset({\"x\": [2, 3], \"y\": [40, 50, 60], \"t\": [0.1, 0.2]})\n\n expected = {(0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3}\n actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0, ds3, ds2])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"x\", \"y\"]\n\n def test_string_coords(self):\n ds0 = Dataset({\"person\": [\"Alice\", \"Bob\"]})\n ds1 = Dataset({\"person\": [\"Caroline\", \"Daniel\"]})\n\n expected = {(0,): ds0, (1,): ds1}\n actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"person\"]\n\n # Decided against natural sorting of string coords GH #2616\n def test_lexicographic_sort_string_coords(self):\n ds0 = Dataset({\"simulation\": [\"run8\", \"run9\"]})\n ds1 = Dataset({\"simulation\": [\"run10\", \"run11\"]})\n\n expected = {(0,): ds1, (1,): ds0}\n actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"simulation\"]\n\n def test_datetime_coords(self):\n ds0 = Dataset({\"time\": [datetime(2000, 3, 6), datetime(2001, 3, 7)]})\n ds1 = Dataset({\"time\": [datetime(1999, 1, 1), datetime(1999, 2, 4)]})\n\n expected = {(0,): ds1, (1,): ds0}\n actual, concat_dims = _infer_concat_order_from_coords([ds0, ds1])\n assert_combined_tile_ids_equal(expected, actual)\n assert concat_dims == [\"time\"]\n\n\[email protected](scope=\"module\")\ndef create_combined_ids():\n return _create_combined_ids\n\n\ndef _create_combined_ids(shape):\n tile_ids = _create_tile_ids(shape)\n nums = range(len(tile_ids))\n return {tile_id: create_test_data(num) for tile_id, num in zip(tile_ids, nums)}\n\n\ndef _create_tile_ids(shape):\n tile_ids = product(*(range(i) for i in shape))\n return list(tile_ids)\n\n\nclass TestNewTileIDs:\n @pytest.mark.parametrize(\n \"old_id, new_id\",\n [((3, 0, 1), (0, 1)), ((0, 0), (0,)), ((1,), ()), ((0,), ()), ((1, 0), (0,))],\n )\n def test_new_tile_id(self, old_id, new_id):\n ds = create_test_data\n assert _new_tile_id((old_id, ds)) == new_id\n\n def test_get_new_tile_ids(self, create_combined_ids):\n shape = (1, 2, 3)\n combined_ids = create_combined_ids(shape)\n\n expected_tile_ids = sorted(combined_ids.keys())\n actual_tile_ids = _create_tile_ids(shape)\n assert expected_tile_ids == actual_tile_ids\n\n\nclass TestCombineND:\n @pytest.mark.parametrize(\"concat_dim\", [\"dim1\", \"new_dim\"])\n def test_concat_once(self, create_combined_ids, concat_dim):\n shape = (2,)\n combined_ids = create_combined_ids(shape)\n ds = create_test_data\n result = _combine_all_along_first_dim(\n combined_ids,\n dim=concat_dim,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"no_conflicts\",\n )\n\n expected_ds = concat([ds(0), ds(1)], dim=concat_dim)\n assert_combined_tile_ids_equal(result, {(): expected_ds})\n\n def test_concat_only_first_dim(self, create_combined_ids):\n shape = (2, 3)\n combined_ids = create_combined_ids(shape)\n result = _combine_all_along_first_dim(\n combined_ids,\n dim=\"dim1\",\n data_vars=\"all\",\n coords=\"different\",\n compat=\"no_conflicts\",\n )\n\n ds = create_test_data\n partway1 = concat([ds(0), ds(3)], dim=\"dim1\")\n partway2 = concat([ds(1), ds(4)], dim=\"dim1\")\n partway3 = concat([ds(2), ds(5)], dim=\"dim1\")\n expected_datasets = [partway1, partway2, partway3]\n expected = {(i,): ds for i, ds in enumerate(expected_datasets)}\n\n assert_combined_tile_ids_equal(result, expected)\n\n @pytest.mark.parametrize(\"concat_dim\", [\"dim1\", \"new_dim\"])\n def test_concat_twice(self, create_combined_ids, concat_dim):\n shape = (2, 3)\n combined_ids = create_combined_ids(shape)\n result = _combine_nd(combined_ids, concat_dims=[\"dim1\", concat_dim])\n\n ds = create_test_data\n partway1 = concat([ds(0), ds(3)], dim=\"dim1\")\n partway2 = concat([ds(1), ds(4)], dim=\"dim1\")\n partway3 = concat([ds(2), ds(5)], dim=\"dim1\")\n expected = concat([partway1, partway2, partway3], dim=concat_dim)\n\n assert_equal(result, expected)\n\n\nclass TestCheckShapeTileIDs:\n def test_check_depths(self):\n ds = create_test_data(0)\n combined_tile_ids = {(0,): ds, (0, 1): ds}\n with raises_regex(ValueError, \"sub-lists do not have \" \"consistent depths\"):\n _check_shape_tile_ids(combined_tile_ids)\n\n def test_check_lengths(self):\n ds = create_test_data(0)\n combined_tile_ids = {(0, 0): ds, (0, 1): ds, (0, 2): ds, (1, 0): ds, (1, 1): ds}\n with raises_regex(ValueError, \"sub-lists do not have \" \"consistent lengths\"):\n _check_shape_tile_ids(combined_tile_ids)\n\n\nclass TestNestedCombine:\n def test_nested_concat(self):\n objs = [Dataset({\"x\": [0]}), Dataset({\"x\": [1]})]\n expected = Dataset({\"x\": [0, 1]})\n actual = combine_nested(objs, concat_dim=\"x\")\n assert_identical(expected, actual)\n actual = combine_nested(objs, concat_dim=[\"x\"])\n assert_identical(expected, actual)\n\n actual = combine_nested([actual], concat_dim=None)\n assert_identical(expected, actual)\n\n actual = combine_nested([actual], concat_dim=\"x\")\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0, 1]}), Dataset({\"x\": [2]})]\n actual = combine_nested(objs, concat_dim=\"x\")\n expected = Dataset({\"x\": [0, 1, 2]})\n assert_identical(expected, actual)\n\n # ensure combine_nested handles non-sorted variables\n objs = [\n Dataset(OrderedDict([(\"x\", (\"a\", [0])), (\"y\", (\"a\", [0]))])),\n Dataset(OrderedDict([(\"y\", (\"a\", [1])), (\"x\", (\"a\", [1]))])),\n ]\n actual = combine_nested(objs, concat_dim=\"a\")\n expected = Dataset({\"x\": (\"a\", [0, 1]), \"y\": (\"a\", [0, 1])})\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [0]})]\n with pytest.raises(KeyError):\n combine_nested(objs, concat_dim=\"x\")\n\n @pytest.mark.parametrize(\n \"join, expected\",\n [\n (\"outer\", Dataset({\"x\": [0, 1], \"y\": [0, 1]})),\n (\"inner\", Dataset({\"x\": [0, 1], \"y\": []})),\n (\"left\", Dataset({\"x\": [0, 1], \"y\": [0]})),\n (\"right\", Dataset({\"x\": [0, 1], \"y\": [1]})),\n ],\n )\n def test_combine_nested_join(self, join, expected):\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [1], \"y\": [1]})]\n actual = combine_nested(objs, concat_dim=\"x\", join=join)\n assert_identical(expected, actual)\n\n def test_combine_nested_join_exact(self):\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [1], \"y\": [1]})]\n with raises_regex(ValueError, \"indexes along dimension\"):\n combine_nested(objs, concat_dim=\"x\", join=\"exact\")\n\n def test_empty_input(self):\n assert_identical(Dataset(), combine_nested([], concat_dim=\"x\"))\n\n # Fails because of concat's weird treatment of dimension coords, see #2975\n @pytest.mark.xfail\n def test_nested_concat_too_many_dims_at_once(self):\n objs = [Dataset({\"x\": [0], \"y\": [1]}), Dataset({\"y\": [0], \"x\": [1]})]\n with pytest.raises(ValueError, match=\"not equal across datasets\"):\n combine_nested(objs, concat_dim=\"x\", coords=\"minimal\")\n\n def test_nested_concat_along_new_dim(self):\n objs = [\n Dataset({\"a\": (\"x\", [10]), \"x\": [0]}),\n Dataset({\"a\": (\"x\", [20]), \"x\": [0]}),\n ]\n expected = Dataset({\"a\": ((\"t\", \"x\"), [[10], [20]]), \"x\": [0]})\n actual = combine_nested(objs, concat_dim=\"t\")\n assert_identical(expected, actual)\n\n # Same but with a DataArray as new dim, see GH #1988 and #2647\n dim = DataArray([100, 150], name=\"baz\", dims=\"baz\")\n expected = Dataset(\n {\"a\": ((\"baz\", \"x\"), [[10], [20]]), \"x\": [0], \"baz\": [100, 150]}\n )\n actual = combine_nested(objs, concat_dim=dim)\n assert_identical(expected, actual)\n\n def test_nested_merge(self):\n data = Dataset({\"x\": 0})\n actual = combine_nested([data, data, data], concat_dim=None)\n assert_identical(data, actual)\n\n ds1 = Dataset({\"a\": (\"x\", [1, 2]), \"x\": [0, 1]})\n ds2 = Dataset({\"a\": (\"x\", [2, 3]), \"x\": [1, 2]})\n expected = Dataset({\"a\": (\"x\", [1, 2, 3]), \"x\": [0, 1, 2]})\n actual = combine_nested([ds1, ds2], concat_dim=None)\n assert_identical(expected, actual)\n actual = combine_nested([ds1, ds2], concat_dim=[None])\n assert_identical(expected, actual)\n\n tmp1 = Dataset({\"x\": 0})\n tmp2 = Dataset({\"x\": np.nan})\n actual = combine_nested([tmp1, tmp2], concat_dim=None)\n assert_identical(tmp1, actual)\n actual = combine_nested([tmp1, tmp2], concat_dim=[None])\n assert_identical(tmp1, actual)\n\n # Single object, with a concat_dim explicitly provided\n # Test the issue reported in GH #1988\n objs = [Dataset({\"x\": 0, \"y\": 1})]\n dim = DataArray([100], name=\"baz\", dims=\"baz\")\n actual = combine_nested(objs, concat_dim=[dim])\n expected = Dataset({\"x\": (\"baz\", [0]), \"y\": (\"baz\", [1])}, {\"baz\": [100]})\n assert_identical(expected, actual)\n\n # Just making sure that auto_combine is doing what is\n # expected for non-scalar values, too.\n objs = [Dataset({\"x\": (\"z\", [0, 1]), \"y\": (\"z\", [1, 2])})]\n dim = DataArray([100], name=\"baz\", dims=\"baz\")\n actual = combine_nested(objs, concat_dim=[dim])\n expected = Dataset(\n {\"x\": ((\"baz\", \"z\"), [[0, 1]]), \"y\": ((\"baz\", \"z\"), [[1, 2]])},\n {\"baz\": [100]},\n )\n assert_identical(expected, actual)\n\n def test_concat_multiple_dims(self):\n objs = [\n [Dataset({\"a\": ((\"x\", \"y\"), [[0]])}), Dataset({\"a\": ((\"x\", \"y\"), [[1]])})],\n [Dataset({\"a\": ((\"x\", \"y\"), [[2]])}), Dataset({\"a\": ((\"x\", \"y\"), [[3]])})],\n ]\n actual = combine_nested(objs, concat_dim=[\"x\", \"y\"])\n expected = Dataset({\"a\": ((\"x\", \"y\"), [[0, 1], [2, 3]])})\n assert_identical(expected, actual)\n\n def test_concat_name_symmetry(self):\n \"\"\"Inspired by the discussion on GH issue #2777\"\"\"\n\n da1 = DataArray(name=\"a\", data=[[0]], dims=[\"x\", \"y\"])\n da2 = DataArray(name=\"b\", data=[[1]], dims=[\"x\", \"y\"])\n da3 = DataArray(name=\"a\", data=[[2]], dims=[\"x\", \"y\"])\n da4 = DataArray(name=\"b\", data=[[3]], dims=[\"x\", \"y\"])\n\n x_first = combine_nested([[da1, da2], [da3, da4]], concat_dim=[\"x\", \"y\"])\n y_first = combine_nested([[da1, da3], [da2, da4]], concat_dim=[\"y\", \"x\"])\n\n assert_identical(x_first, y_first)\n\n def test_concat_one_dim_merge_another(self):\n data = create_test_data()\n data1 = data.copy(deep=True)\n data2 = data.copy(deep=True)\n\n objs = [\n [data1.var1.isel(dim2=slice(4)), data2.var1.isel(dim2=slice(4, 9))],\n [data1.var2.isel(dim2=slice(4)), data2.var2.isel(dim2=slice(4, 9))],\n ]\n\n expected = data[[\"var1\", \"var2\"]]\n actual = combine_nested(objs, concat_dim=[None, \"dim2\"])\n assert expected.identical(actual)\n\n def test_auto_combine_2d(self):\n ds = create_test_data\n\n partway1 = concat([ds(0), ds(3)], dim=\"dim1\")\n partway2 = concat([ds(1), ds(4)], dim=\"dim1\")\n partway3 = concat([ds(2), ds(5)], dim=\"dim1\")\n expected = concat([partway1, partway2, partway3], dim=\"dim2\")\n\n datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]]\n result = combine_nested(datasets, concat_dim=[\"dim1\", \"dim2\"])\n assert_equal(result, expected)\n\n def test_combine_nested_missing_data_new_dim(self):\n # Your data includes \"time\" and \"station\" dimensions, and each year's\n # data has a different set of stations.\n datasets = [\n Dataset({\"a\": (\"x\", [2, 3]), \"x\": [1, 2]}),\n Dataset({\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}),\n ]\n expected = Dataset(\n {\"a\": ((\"t\", \"x\"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {\"x\": [0, 1, 2]}\n )\n actual = combine_nested(datasets, concat_dim=\"t\")\n assert_identical(expected, actual)\n\n def test_invalid_hypercube_input(self):\n ds = create_test_data\n\n datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4)]]\n with raises_regex(ValueError, \"sub-lists do not have \" \"consistent lengths\"):\n combine_nested(datasets, concat_dim=[\"dim1\", \"dim2\"])\n\n datasets = [[ds(0), ds(1)], [[ds(3), ds(4)]]]\n with raises_regex(ValueError, \"sub-lists do not have \" \"consistent depths\"):\n combine_nested(datasets, concat_dim=[\"dim1\", \"dim2\"])\n\n datasets = [[ds(0), ds(1)], [ds(3), ds(4)]]\n with raises_regex(ValueError, \"concat_dims has length\"):\n combine_nested(datasets, concat_dim=[\"dim1\"])\n\n def test_merge_one_dim_concat_another(self):\n objs = [\n [Dataset({\"foo\": (\"x\", [0, 1])}), Dataset({\"bar\": (\"x\", [10, 20])})],\n [Dataset({\"foo\": (\"x\", [2, 3])}), Dataset({\"bar\": (\"x\", [30, 40])})],\n ]\n expected = Dataset({\"foo\": (\"x\", [0, 1, 2, 3]), \"bar\": (\"x\", [10, 20, 30, 40])})\n\n actual = combine_nested(objs, concat_dim=[\"x\", None], compat=\"equals\")\n assert_identical(expected, actual)\n\n # Proving it works symmetrically\n objs = [\n [Dataset({\"foo\": (\"x\", [0, 1])}), Dataset({\"foo\": (\"x\", [2, 3])})],\n [Dataset({\"bar\": (\"x\", [10, 20])}), Dataset({\"bar\": (\"x\", [30, 40])})],\n ]\n actual = combine_nested(objs, concat_dim=[None, \"x\"], compat=\"equals\")\n assert_identical(expected, actual)\n\n def test_combine_concat_over_redundant_nesting(self):\n objs = [[Dataset({\"x\": [0]}), Dataset({\"x\": [1]})]]\n actual = combine_nested(objs, concat_dim=[None, \"x\"])\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(expected, actual)\n\n objs = [[Dataset({\"x\": [0]})], [Dataset({\"x\": [1]})]]\n actual = combine_nested(objs, concat_dim=[\"x\", None])\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(expected, actual)\n\n objs = [[Dataset({\"x\": [0]})]]\n actual = combine_nested(objs, concat_dim=[None, None])\n expected = Dataset({\"x\": [0]})\n assert_identical(expected, actual)\n\n def test_combine_nested_but_need_auto_combine(self):\n objs = [Dataset({\"x\": [0, 1]}), Dataset({\"x\": [2], \"wall\": [0]})]\n with raises_regex(ValueError, \"cannot be combined\"):\n combine_nested(objs, concat_dim=\"x\")\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0])\n def test_combine_nested_fill_value(self, fill_value):\n datasets = [\n Dataset({\"a\": (\"x\", [2, 3]), \"x\": [1, 2]}),\n Dataset({\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}),\n ]\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value = np.nan\n expected = Dataset(\n {\"a\": ((\"t\", \"x\"), [[fill_value, 2, 3], [1, 2, fill_value]])},\n {\"x\": [0, 1, 2]},\n )\n actual = combine_nested(datasets, concat_dim=\"t\", fill_value=fill_value)\n assert_identical(expected, actual)\n\n\nclass TestCombineAuto:\n def test_combine_by_coords(self):\n objs = [Dataset({\"x\": [0]}), Dataset({\"x\": [1]})]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(expected, actual)\n\n actual = combine_by_coords([actual])\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0, 1]}), Dataset({\"x\": [2]})]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": [0, 1, 2]})\n assert_identical(expected, actual)\n\n # ensure auto_combine handles non-sorted variables\n objs = [\n Dataset({\"x\": (\"a\", [0]), \"y\": (\"a\", [0]), \"a\": [0]}),\n Dataset({\"x\": (\"a\", [1]), \"y\": (\"a\", [1]), \"a\": [1]}),\n ]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": (\"a\", [0, 1]), \"y\": (\"a\", [0, 1]), \"a\": [0, 1]})\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"y\": [1], \"x\": [1]})]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": [0, 1], \"y\": [0, 1]})\n assert_equal(actual, expected)\n\n objs = [Dataset({\"x\": 0}), Dataset({\"x\": 1})]\n with raises_regex(ValueError, \"Could not find any dimension \" \"coordinates\"):\n combine_by_coords(objs)\n\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [0]})]\n with raises_regex(ValueError, \"Every dimension needs a coordinate\"):\n combine_by_coords(objs)\n\n def test_empty_input(self):\n assert_identical(Dataset(), combine_by_coords([]))\n\n @pytest.mark.parametrize(\n \"join, expected\",\n [\n (\"outer\", Dataset({\"x\": [0, 1], \"y\": [0, 1]})),\n (\"inner\", Dataset({\"x\": [0, 1], \"y\": []})),\n (\"left\", Dataset({\"x\": [0, 1], \"y\": [0]})),\n (\"right\", Dataset({\"x\": [0, 1], \"y\": [1]})),\n ],\n )\n def test_combine_coords_join(self, join, expected):\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [1], \"y\": [1]})]\n actual = combine_nested(objs, concat_dim=\"x\", join=join)\n assert_identical(expected, actual)\n\n def test_combine_coords_join_exact(self):\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [1], \"y\": [1]})]\n with raises_regex(ValueError, \"indexes along dimension\"):\n combine_nested(objs, concat_dim=\"x\", join=\"exact\")\n\n def test_infer_order_from_coords(self):\n data = create_test_data()\n objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))]\n actual = combine_by_coords(objs)\n expected = data\n assert expected.broadcast_equals(actual)\n\n def test_combine_leaving_bystander_dimensions(self):\n # Check non-monotonic bystander dimension coord doesn't raise\n # ValueError on combine (https://github.com/pydata/xarray/issues/3150)\n ycoord = [\"a\", \"c\", \"b\"]\n\n data = np.random.rand(7, 3)\n\n ds1 = Dataset(\n data_vars=dict(data=([\"x\", \"y\"], data[:3, :])),\n coords=dict(x=[1, 2, 3], y=ycoord),\n )\n\n ds2 = Dataset(\n data_vars=dict(data=([\"x\", \"y\"], data[3:, :])),\n coords=dict(x=[4, 5, 6, 7], y=ycoord),\n )\n\n expected = Dataset(\n data_vars=dict(data=([\"x\", \"y\"], data)),\n coords=dict(x=[1, 2, 3, 4, 5, 6, 7], y=ycoord),\n )\n\n actual = combine_by_coords((ds1, ds2))\n assert_identical(expected, actual)\n\n def test_combine_by_coords_previously_failed(self):\n # In the above scenario, one file is missing, containing the data for\n # one year's data for one variable.\n datasets = [\n Dataset({\"a\": (\"x\", [0]), \"x\": [0]}),\n Dataset({\"b\": (\"x\", [0]), \"x\": [0]}),\n Dataset({\"a\": (\"x\", [1]), \"x\": [1]}),\n ]\n expected = Dataset({\"a\": (\"x\", [0, 1]), \"b\": (\"x\", [0, np.nan])}, {\"x\": [0, 1]})\n actual = combine_by_coords(datasets)\n assert_identical(expected, actual)\n\n def test_combine_by_coords_still_fails(self):\n # concat can't handle new variables (yet):\n # https://github.com/pydata/xarray/issues/508\n datasets = [Dataset({\"x\": 0}, {\"y\": 0}), Dataset({\"x\": 1}, {\"y\": 1, \"z\": 1})]\n with pytest.raises(ValueError):\n combine_by_coords(datasets, \"y\")\n\n def test_combine_by_coords_no_concat(self):\n objs = [Dataset({\"x\": 0}), Dataset({\"y\": 1})]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": 0, \"y\": 1})\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": 0, \"y\": 1}), Dataset({\"y\": np.nan, \"z\": 2})]\n actual = combine_by_coords(objs)\n expected = Dataset({\"x\": 0, \"y\": 1, \"z\": 2})\n assert_identical(expected, actual)\n\n def test_check_for_impossible_ordering(self):\n ds0 = Dataset({\"x\": [0, 1, 5]})\n ds1 = Dataset({\"x\": [2, 3]})\n with raises_regex(\n ValueError, \"does not have monotonic global indexes\" \" along dimension x\"\n ):\n combine_by_coords([ds1, ds0])\n\n\[email protected](\n \"ignore:In xarray version 0.13 `auto_combine` \" \"will be deprecated\"\n)\[email protected](\"ignore:Also `open_mfdataset` will no longer\")\[email protected](\"ignore:The datasets supplied\")\nclass TestAutoCombineOldAPI:\n \"\"\"\n Set of tests which check that old 1-dimensional auto_combine behaviour is\n still satisfied. #2616\n \"\"\"\n\n def test_auto_combine(self):\n objs = [Dataset({\"x\": [0]}), Dataset({\"x\": [1]})]\n actual = auto_combine(objs)\n expected = Dataset({\"x\": [0, 1]})\n assert_identical(expected, actual)\n\n actual = auto_combine([actual])\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0, 1]}), Dataset({\"x\": [2]})]\n actual = auto_combine(objs)\n expected = Dataset({\"x\": [0, 1, 2]})\n assert_identical(expected, actual)\n\n # ensure auto_combine handles non-sorted variables\n objs = [\n Dataset(OrderedDict([(\"x\", (\"a\", [0])), (\"y\", (\"a\", [0]))])),\n Dataset(OrderedDict([(\"y\", (\"a\", [1])), (\"x\", (\"a\", [1]))])),\n ]\n actual = auto_combine(objs)\n expected = Dataset({\"x\": (\"a\", [0, 1]), \"y\": (\"a\", [0, 1])})\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"y\": [1], \"x\": [1]})]\n with raises_regex(ValueError, \"too many .* dimensions\"):\n auto_combine(objs)\n\n objs = [Dataset({\"x\": 0}), Dataset({\"x\": 1})]\n with raises_regex(ValueError, \"cannot infer dimension\"):\n auto_combine(objs)\n\n objs = [Dataset({\"x\": [0], \"y\": [0]}), Dataset({\"x\": [0]})]\n with pytest.raises(KeyError):\n auto_combine(objs)\n\n def test_auto_combine_previously_failed(self):\n # In the above scenario, one file is missing, containing the data for\n # one year's data for one variable.\n datasets = [\n Dataset({\"a\": (\"x\", [0]), \"x\": [0]}),\n Dataset({\"b\": (\"x\", [0]), \"x\": [0]}),\n Dataset({\"a\": (\"x\", [1]), \"x\": [1]}),\n ]\n expected = Dataset({\"a\": (\"x\", [0, 1]), \"b\": (\"x\", [0, np.nan])}, {\"x\": [0, 1]})\n actual = auto_combine(datasets)\n assert_identical(expected, actual)\n\n # Your data includes \"time\" and \"station\" dimensions, and each year's\n # data has a different set of stations.\n datasets = [\n Dataset({\"a\": (\"x\", [2, 3]), \"x\": [1, 2]}),\n Dataset({\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}),\n ]\n expected = Dataset(\n {\"a\": ((\"t\", \"x\"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {\"x\": [0, 1, 2]}\n )\n actual = auto_combine(datasets, concat_dim=\"t\")\n assert_identical(expected, actual)\n\n def test_auto_combine_still_fails(self):\n # concat can't handle new variables (yet):\n # https://github.com/pydata/xarray/issues/508\n datasets = [Dataset({\"x\": 0}, {\"y\": 0}), Dataset({\"x\": 1}, {\"y\": 1, \"z\": 1})]\n with pytest.raises(ValueError):\n auto_combine(datasets, \"y\")\n\n def test_auto_combine_no_concat(self):\n objs = [Dataset({\"x\": 0}), Dataset({\"y\": 1})]\n actual = auto_combine(objs)\n expected = Dataset({\"x\": 0, \"y\": 1})\n assert_identical(expected, actual)\n\n objs = [Dataset({\"x\": 0, \"y\": 1}), Dataset({\"y\": np.nan, \"z\": 2})]\n actual = auto_combine(objs)\n expected = Dataset({\"x\": 0, \"y\": 1, \"z\": 2})\n assert_identical(expected, actual)\n\n data = Dataset({\"x\": 0})\n actual = auto_combine([data, data, data], concat_dim=None)\n assert_identical(data, actual)\n\n # Single object, with a concat_dim explicitly provided\n # Test the issue reported in GH #1988\n objs = [Dataset({\"x\": 0, \"y\": 1})]\n dim = DataArray([100], name=\"baz\", dims=\"baz\")\n actual = auto_combine(objs, concat_dim=dim)\n expected = Dataset({\"x\": (\"baz\", [0]), \"y\": (\"baz\", [1])}, {\"baz\": [100]})\n assert_identical(expected, actual)\n\n # Just making sure that auto_combine is doing what is\n # expected for non-scalar values, too.\n objs = [Dataset({\"x\": (\"z\", [0, 1]), \"y\": (\"z\", [1, 2])})]\n dim = DataArray([100], name=\"baz\", dims=\"baz\")\n actual = auto_combine(objs, concat_dim=dim)\n expected = Dataset(\n {\"x\": ((\"baz\", \"z\"), [[0, 1]]), \"y\": ((\"baz\", \"z\"), [[1, 2]])},\n {\"baz\": [100]},\n )\n assert_identical(expected, actual)\n\n def test_auto_combine_order_by_appearance_not_coords(self):\n objs = [\n Dataset({\"foo\": (\"x\", [0])}, coords={\"x\": (\"x\", [1])}),\n Dataset({\"foo\": (\"x\", [1])}, coords={\"x\": (\"x\", [0])}),\n ]\n actual = auto_combine(objs)\n expected = Dataset({\"foo\": (\"x\", [0, 1])}, coords={\"x\": (\"x\", [1, 0])})\n assert_identical(expected, actual)\n\n @pytest.mark.parametrize(\"fill_value\", [dtypes.NA, 2, 2.0])\n def test_auto_combine_fill_value(self, fill_value):\n datasets = [\n Dataset({\"a\": (\"x\", [2, 3]), \"x\": [1, 2]}),\n Dataset({\"a\": (\"x\", [1, 2]), \"x\": [0, 1]}),\n ]\n if fill_value == dtypes.NA:\n # if we supply the default, we expect the missing value for a\n # float array\n fill_value = np.nan\n expected = Dataset(\n {\"a\": ((\"t\", \"x\"), [[fill_value, 2, 3], [1, 2, fill_value]])},\n {\"x\": [0, 1, 2]},\n )\n actual = auto_combine(datasets, concat_dim=\"t\", fill_value=fill_value)\n assert_identical(expected, actual)\n\n\nclass TestAutoCombineDeprecation:\n \"\"\"\n Set of tests to check that FutureWarnings are correctly raised until the\n deprecation cycle is complete. #2616\n \"\"\"\n\n def test_auto_combine_with_concat_dim(self):\n objs = [Dataset({\"x\": [0]}), Dataset({\"x\": [1]})]\n with pytest.warns(FutureWarning, match=\"`concat_dim`\"):\n auto_combine(objs, concat_dim=\"x\")\n\n def test_auto_combine_with_merge_and_concat(self):\n objs = [Dataset({\"x\": [0]}), Dataset({\"x\": [1]}), Dataset({\"z\": ((), 99)})]\n with pytest.warns(FutureWarning, match=\"require both concatenation\"):\n auto_combine(objs)\n\n def test_auto_combine_with_coords(self):\n objs = [\n Dataset({\"foo\": (\"x\", [0])}, coords={\"x\": (\"x\", [0])}),\n Dataset({\"foo\": (\"x\", [1])}, coords={\"x\": (\"x\", [1])}),\n ]\n with pytest.warns(FutureWarning, match=\"supplied have global\"):\n auto_combine(objs)\n\n def test_auto_combine_without_coords(self):\n objs = [Dataset({\"foo\": (\"x\", [0])}), Dataset({\"foo\": (\"x\", [1])})]\n with pytest.warns(FutureWarning, match=\"supplied do not have global\"):\n auto_combine(objs)\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
miltondp/clustermatch-gene-expr | [
"664bcf9032f53e22165ce7aa586dbf11365a5827",
"664bcf9032f53e22165ce7aa586dbf11365a5827",
"664bcf9032f53e22165ce7aa586dbf11365a5827"
] | [
"nbs/others/05_clustermatch_profiling/10_cm_optimized/py/02-cdist_parts_v01.py",
"nbs/others/05_clustermatch_profiling/07_cm_optimized/py/07-many_samples.py",
"nbs/others/05_clustermatch_profiling/11_cm_optimized/py/07-many_samples.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.5\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown]\n# UPDATE:\n#\n# list changes here\n\n# %% [markdown]\n# \n\n# %% [markdown] tags=[]\n# # Remove pycache dir\n\n# %%\n# !echo ${CODE_DIR}\n\n# %%\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -print\n\n# %%\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -exec rm -rf {} \\;\n\n# %%\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -print\n\n# %% [markdown] tags=[]\n# # Modules\n\n# %% tags=[]\nimport numpy as np\n\nfrom clustermatch.coef import _cm\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %%\nN_REPS = 10\n\n# %% tags=[]\nnp.random.seed(0)\n\n# %% [markdown] tags=[]\n# # Setup\n\n# %%\n# let numba compile all the code before profiling\n_cm.py_func(np.random.rand(10), np.random.rand(10))\n\n# %% [markdown] tags=[]\n# # Run with `n_samples` small\n\n# %%\nN_SAMPLES = 100\n\n# %%\nx = np.random.rand(N_SAMPLES)\ny = np.random.rand(N_SAMPLES)\n\n\n# %% tags=[]\ndef func():\n for i in range(N_REPS):\n # py_func accesses the original python function, not the numba-optimized one\n # this is needed to be able to profile the function\n _cm.py_func(x, y)\n\n\n# %% tags=[]\n# %%timeit -n1 -r1 func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 20 -T 02-n_samples_small.txt\nfunc()\n\n# %% [markdown] tags=[]\n# **No improvement** for this case.\n\n# %% [markdown] tags=[]\n# # Run with `n_samples` large\n\n# %%\nN_SAMPLES = 100000\n\n# %%\nx = np.random.rand(N_SAMPLES)\ny = np.random.rand(N_SAMPLES)\n\n\n# %% tags=[]\ndef func():\n for i in range(N_REPS):\n # py_func accesses the original python function, not the numba-optimized one\n # this is needed to be able to profile the function\n _cm.py_func(x, y)\n\n\n# %% tags=[]\n# %%timeit -n1 -r1 func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 20 -T 02-n_samples_large.txt\nfunc()\n\n# %% [markdown] tags=[]\n# **Important improvement** for this case. `cdist_parts` takes now 0.370 percall instead of 0.824 (from reference).\n#\n# **However**, compared with `v00` (0.370 per call), this one is slightly worse.\n\n# %%\n",
"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.5\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown]\n# Clustermatch run using a larger number of samples.\n\n# %% [markdown] tags=[]\n# # Modules\n\n# %% tags=[]\nimport numpy as np\n\nfrom clustermatch.coef import cm\n\n# %% [markdown] tags=[]\n# # Data\n\n# %% tags=[]\nn_genes, n_samples = 10, 30000\n\n# %% tags=[]\nnp.random.seed(0)\n\n# %% tags=[]\ndata = np.random.rand(n_genes, n_samples)\n\n# %% tags=[]\ndata.shape\n\n\n# %% [markdown] tags=[]\n# # With default `internal_n_clusters`\n\n# %% tags=[]\ndef func():\n n_clust = list(range(2, 10 + 1))\n return cm(data, internal_n_clusters=n_clust)\n\n\n# %% tags=[]\n# %%timeit func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 50 -T 07-cm_many_samples-default_internal_n_clusters.txt\nfunc()\n\n\n# %% [markdown] tags=[]\n# # With reduced `internal_n_clusters`\n\n# %% tags=[]\ndef func():\n n_clust = list(range(2, 5 + 1))\n return cm(data, internal_n_clusters=n_clust)\n\n\n# %% tags=[]\n# %%timeit func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 50 -T 07-cm_many_samples-less_internal_n_clusters.txt\nfunc()\n\n# %% tags=[]\n",
"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.5\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# Clustermatch run using a larger number of samples.\n\n# %% [markdown] tags=[]\n# # Remove pycache dir\n\n# %% tags=[]\n# !echo ${CODE_DIR}\n\n# %% tags=[]\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -print\n\n# %% tags=[]\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -prune -exec rm -rf {} \\;\n\n# %% tags=[]\n# !find ${CODE_DIR} -regex '^.*\\(__pycache__\\)$' -print\n\n# %% [markdown] tags=[]\n# # Modules\n\n# %% tags=[]\nimport numpy as np\n\nfrom clustermatch.coef import cm\n\n# %% tags=[]\n# let numba compile all the code before profiling\ncm(np.random.rand(10), np.random.rand(10))\n\n# %% [markdown] tags=[]\n# # Data\n\n# %% tags=[]\nn_genes, n_samples = 10, 30000\n\n# %% tags=[]\nnp.random.seed(0)\n\n# %% tags=[]\ndata = np.random.rand(n_genes, n_samples)\n\n# %% tags=[]\ndata.shape\n\n\n# %% [markdown] tags=[]\n# # With default `internal_n_clusters`\n\n# %% tags=[]\ndef func():\n n_clust = list(range(2, 10 + 1))\n return cm(data, internal_n_clusters=n_clust)\n\n\n# %% tags=[]\n# %%timeit func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 50 -T 07-cm_many_samples-default_internal_n_clusters.txt\nfunc()\n\n\n# %% [markdown] tags=[]\n# # With reduced `internal_n_clusters`\n\n# %% tags=[]\ndef func():\n n_clust = list(range(2, 5 + 1))\n return cm(data, internal_n_clusters=n_clust)\n\n\n# %% tags=[]\n# %%timeit func()\nfunc()\n\n# %% tags=[]\n# %%prun -s cumulative -l 50 -T 07-cm_many_samples-less_internal_n_clusters.txt\nfunc()\n\n# %% tags=[]\n"
] | [
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.random.rand",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bjwheltor/improver | [
"21b21106f2a7376ee32cd01f47ea81bb770f56a9",
"21b21106f2a7376ee32cd01f47ea81bb770f56a9"
] | [
"improver/ensemble_copula_coupling/ensemble_copula_coupling.py",
"improver_tests/cube_combiner/test_CubeCombiner.py"
] | [
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nThis module defines the plugins required for Ensemble Copula Coupling.\n\n\"\"\"\nimport warnings\nfrom typing import List, Optional, Tuple\n\nimport iris\nimport numpy as np\nfrom iris.cube import Cube\nfrom iris.exceptions import CoordinateNotFoundError, InvalidCubeError\nfrom numpy import ndarray\nfrom scipy import stats\n\nimport improver.ensemble_copula_coupling._scipy_continuous_distns as scipy_cont_distns\nfrom improver import BasePlugin\nfrom improver.calibration.utilities import convert_cube_data_to_2d\nfrom improver.ensemble_copula_coupling.utilities import (\n choose_set_of_percentiles,\n concatenate_2d_array_with_2d_array_endpoints,\n create_cube_with_percentiles,\n get_bounds_of_distribution,\n insert_lower_and_upper_endpoint_to_1d_array,\n interpolate_multiple_rows_same_x,\n interpolate_multiple_rows_same_y,\n restore_non_percentile_dimensions,\n)\nfrom improver.metadata.probabilistic import (\n find_percentile_coordinate,\n find_threshold_coordinate,\n format_cell_methods_for_diagnostic,\n get_diagnostic_cube_name_from_probability_name,\n get_threshold_coord_name_from_probability_name,\n probability_is_above_or_below,\n)\nfrom improver.utilities.cube_checker import (\n check_cube_coordinates,\n check_for_x_and_y_axes,\n)\nfrom improver.utilities.cube_manipulation import (\n MergeCubes,\n enforce_coordinate_ordering,\n get_dim_coord_names,\n)\nfrom improver.utilities.indexing_operations import choose\n\n\nclass RebadgePercentilesAsRealizations(BasePlugin):\n \"\"\"\n Class to rebadge percentiles as ensemble realizations.\n This will allow the quantisation to percentiles to be completed, without\n a subsequent EnsembleReordering step to restore spatial correlations,\n if required.\n \"\"\"\n\n @staticmethod\n def process(\n cube: Cube, ensemble_realization_numbers: Optional[ndarray] = None\n ) -> Cube:\n \"\"\"\n Rebadge percentiles as ensemble realizations. The ensemble\n realization numbering will depend upon the number of percentiles in\n the input cube i.e. 0, 1, 2, 3, ..., n-1, if there are n percentiles.\n\n Args:\n cube:\n Cube containing a percentile coordinate, which will be\n rebadged as ensemble realization.\n ensemble_realization_numbers:\n An array containing the ensemble numbers required in the output\n realization coordinate. Default is None, meaning the\n realization coordinate will be numbered 0, 1, 2 ... n-1 for n\n percentiles on the input cube.\n\n Returns:\n Processed cube\n\n Raises:\n InvalidCubeError:\n If the realization coordinate already exists on the cube.\n \"\"\"\n percentile_coord_name = find_percentile_coordinate(cube).name()\n\n if ensemble_realization_numbers is None:\n ensemble_realization_numbers = np.arange(\n len(cube.coord(percentile_coord_name).points), dtype=np.int32\n )\n\n cube.coord(percentile_coord_name).points = ensemble_realization_numbers\n\n # we can't rebadge if the realization coordinate already exists:\n try:\n realization_coord = cube.coord(\"realization\")\n except CoordinateNotFoundError:\n realization_coord = None\n\n if realization_coord:\n raise InvalidCubeError(\n \"Cannot rebadge percentile coordinate to realization \"\n \"coordinate because a realization coordinate already exists.\"\n )\n\n cube.coord(percentile_coord_name).rename(\"realization\")\n cube.coord(\"realization\").units = \"1\"\n cube.coord(\"realization\").points = cube.coord(\"realization\").points.astype(\n np.int32\n )\n\n return cube\n\n\nclass ResamplePercentiles(BasePlugin):\n \"\"\"\n Class for resampling percentiles from an existing set of percentiles.\n In combination with the Ensemble Reordering plugin, this is a variant of\n Ensemble Copula Coupling.\n\n This class includes the ability to linearly interpolate from an\n input set of percentiles to a different output set of percentiles.\n\n \"\"\"\n\n def __init__(self, ecc_bounds_warning: bool = False) -> None:\n \"\"\"\n Initialise the class.\n\n Args:\n ecc_bounds_warning:\n If true and ECC bounds are exceeded by the percentile values,\n a warning will be generated rather than an exception.\n Default value is FALSE.\n \"\"\"\n self.ecc_bounds_warning = ecc_bounds_warning\n\n def _add_bounds_to_percentiles_and_forecast_at_percentiles(\n self,\n percentiles: ndarray,\n forecast_at_percentiles: ndarray,\n bounds_pairing: Tuple[int, int],\n ) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Padding of the lower and upper bounds of the percentiles for a\n given phenomenon, and padding of forecast values using the\n constant lower and upper bounds.\n\n Args:\n percentiles:\n Array of percentiles from a Cumulative Distribution Function.\n forecast_at_percentiles:\n Array containing the underlying forecast values at each\n percentile.\n bounds_pairing:\n Lower and upper bound to be used as the ends of the\n cumulative distribution function.\n\n Returns:\n - Percentiles\n - Forecast at percentiles with endpoints\n\n Raises:\n ValueError: If the percentile points are outside the ECC bounds\n and self.ecc_bounds_warning is False.\n ValueError: If the percentiles are not in ascending order.\n\n Warns:\n Warning: If the percentile points are outside the ECC bounds\n and self.ecc_bounds_warning is True.\n \"\"\"\n lower_bound, upper_bound = bounds_pairing\n percentiles = insert_lower_and_upper_endpoint_to_1d_array(percentiles, 0, 100)\n forecast = concatenate_2d_array_with_2d_array_endpoints(\n forecast_at_percentiles, lower_bound, upper_bound\n )\n\n if np.any(np.diff(forecast) < 0):\n out_of_bounds_vals = forecast[np.where(np.diff(forecast) < 0)]\n msg = (\n \"Forecast values exist that fall outside the expected extrema \"\n \"values that are defined as bounds in \"\n \"ensemble_copula_coupling/constants.py. \"\n \"Applying the extrema values as end points to the distribution \"\n \"would result in non-monotonically increasing values. \"\n \"The defined extremes are {}, whilst the following forecast \"\n \"values exist outside this range: {}.\".format(\n bounds_pairing, out_of_bounds_vals\n )\n )\n\n if self.ecc_bounds_warning:\n warn_msg = msg + (\n \" The percentile values that have \"\n \"exceeded the existing bounds will be used \"\n \"as new bounds.\"\n )\n warnings.warn(warn_msg)\n if upper_bound < forecast.max():\n upper_bound = forecast.max()\n if lower_bound > forecast.min():\n lower_bound = forecast.min()\n forecast = concatenate_2d_array_with_2d_array_endpoints(\n forecast_at_percentiles, lower_bound, upper_bound\n )\n else:\n raise ValueError(msg)\n if np.any(np.diff(percentiles) < 0):\n msg = (\n \"The percentiles must be in ascending order.\"\n \"The input percentiles were {}\".format(percentiles)\n )\n raise ValueError(msg)\n return percentiles, forecast\n\n def _interpolate_percentiles(\n self,\n forecast_at_percentiles: Cube,\n desired_percentiles: ndarray,\n bounds_pairing: Tuple[int, int],\n percentile_coord_name: str,\n ) -> Cube:\n \"\"\"\n Interpolation of forecast for a set of percentiles from an initial\n set of percentiles to a new set of percentiles. This is constructed\n by linearly interpolating between the original set of percentiles\n to a new set of percentiles.\n\n Args:\n forecast_at_percentiles:\n Cube containing a percentile coordinate.\n desired_percentiles:\n Array of the desired percentiles.\n bounds_pairing:\n Lower and upper bound to be used as the ends of the\n cumulative distribution function.\n percentile_coord_name:\n Name of required percentile coordinate.\n\n Returns:\n Cube containing values for the required diagnostic e.g.\n air_temperature at the required percentiles.\n \"\"\"\n original_percentiles = forecast_at_percentiles.coord(\n percentile_coord_name\n ).points\n\n original_mask = None\n if np.ma.is_masked(forecast_at_percentiles.data):\n original_mask = forecast_at_percentiles.data.mask[0]\n\n # Ensure that the percentile dimension is first, so that the\n # conversion to a 2d array produces data in the desired order.\n enforce_coordinate_ordering(forecast_at_percentiles, percentile_coord_name)\n forecast_at_reshaped_percentiles = convert_cube_data_to_2d(\n forecast_at_percentiles, coord=percentile_coord_name\n )\n\n (\n original_percentiles,\n forecast_at_reshaped_percentiles,\n ) = self._add_bounds_to_percentiles_and_forecast_at_percentiles(\n original_percentiles, forecast_at_reshaped_percentiles, bounds_pairing\n )\n\n forecast_at_interpolated_percentiles = interpolate_multiple_rows_same_x(\n np.array(desired_percentiles, dtype=np.float64),\n original_percentiles.astype(np.float64),\n forecast_at_reshaped_percentiles.astype(np.float64),\n )\n forecast_at_interpolated_percentiles = np.transpose(\n forecast_at_interpolated_percentiles\n )\n\n # Reshape forecast_at_percentiles, so the percentiles dimension is\n # first, and any other dimension coordinates follow.\n forecast_at_percentiles_data = restore_non_percentile_dimensions(\n forecast_at_interpolated_percentiles,\n next(forecast_at_percentiles.slices_over(percentile_coord_name)),\n len(desired_percentiles),\n )\n\n template_cube = next(forecast_at_percentiles.slices_over(percentile_coord_name))\n template_cube.remove_coord(percentile_coord_name)\n percentile_cube = create_cube_with_percentiles(\n desired_percentiles, template_cube, forecast_at_percentiles_data,\n )\n if original_mask is not None:\n original_mask = np.broadcast_to(original_mask, percentile_cube.shape)\n percentile_cube.data = np.ma.MaskedArray(\n percentile_cube.data, mask=original_mask\n )\n return percentile_cube\n\n def process(\n self,\n forecast_at_percentiles: Cube,\n no_of_percentiles: Optional[int] = None,\n sampling: Optional[str] = \"quantile\",\n percentiles: Optional[List] = None,\n ) -> Cube:\n \"\"\"\n 1. Creates a list of percentiles, if not provided.\n 2. Accesses the lower and upper bound pair of the forecast values,\n in order to specify lower and upper bounds for the percentiles.\n 3. Interpolate the percentile coordinate into an alternative\n set of percentiles using linear interpolation.\n\n Args:\n forecast_at_percentiles:\n Cube expected to contain a percentile coordinate.\n no_of_percentiles:\n Number of percentiles\n If None, the number of percentiles within the input\n forecast_at_percentiles cube is used as the\n number of percentiles.\n sampling:\n Type of sampling of the distribution to produce a set of\n percentiles e.g. quantile or random.\n\n Accepted options for sampling are:\n\n * Quantile: A regular set of equally-spaced percentiles aimed\n at dividing a Cumulative Distribution Function into\n blocks of equal probability.\n * Random: A random set of ordered percentiles.\n percentiles:\n List of the desired output percentiles.\n\n Returns:\n Cube with forecast values at the desired set of percentiles.\n The percentile coordinate is always the zeroth dimension.\n\n Raises:\n ValueError: The percentiles supplied must be between 0 and 100.\n \"\"\"\n percentile_coord = find_percentile_coordinate(forecast_at_percentiles)\n\n if percentiles:\n if any(p < 0 or p > 100 for p in percentiles):\n msg = (\n \"The percentiles supplied must be between 0 and 100. \"\n f\"Percentiles supplied: {percentiles}\"\n )\n raise ValueError(msg)\n else:\n if no_of_percentiles is None:\n no_of_percentiles = len(\n forecast_at_percentiles.coord(percentile_coord).points\n )\n percentiles = choose_set_of_percentiles(\n no_of_percentiles, sampling=sampling\n )\n\n cube_units = forecast_at_percentiles.units\n bounds_pairing = get_bounds_of_distribution(\n forecast_at_percentiles.name(), cube_units\n )\n\n forecast_at_percentiles = self._interpolate_percentiles(\n forecast_at_percentiles,\n percentiles,\n bounds_pairing,\n percentile_coord.name(),\n )\n return forecast_at_percentiles\n\n\nclass ConvertProbabilitiesToPercentiles(BasePlugin):\n \"\"\"\n Class for generating percentiles from probabilities.\n In combination with the Ensemble Reordering plugin, this is a variant\n Ensemble Copula Coupling.\n\n This class includes the ability to interpolate between probabilities\n specified using multiple thresholds in order to generate the percentiles,\n see Figure 1 from Flowerdew, 2014.\n\n Scientific Reference:\n Flowerdew, J., 2014.\n Calibrated ensemble reliability whilst preserving spatial structure.\n Tellus Series A, Dynamic Meteorology and Oceanography, 66, 22662.\n\n \"\"\"\n\n def __init__(self, ecc_bounds_warning: bool = False) -> None:\n \"\"\"\n Initialise the class.\n\n Args:\n ecc_bounds_warning:\n If true and ECC bounds are exceeded by the percentile values,\n a warning will be generated rather than an exception.\n Default value is FALSE.\n \"\"\"\n self.ecc_bounds_warning = ecc_bounds_warning\n\n def _add_bounds_to_thresholds_and_probabilities(\n self,\n threshold_points: ndarray,\n probabilities_for_cdf: ndarray,\n bounds_pairing: Tuple[int, int],\n ) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Padding of the lower and upper bounds of the distribution for a\n given phenomenon for the threshold_points, and padding of\n probabilities of 0 and 1 to the forecast probabilities.\n\n Args:\n threshold_points:\n Array of threshold values used to calculate the probabilities.\n probabilities_for_cdf:\n Array containing the probabilities used for constructing an\n cumulative distribution function i.e. probabilities\n below threshold.\n bounds_pairing:\n Lower and upper bound to be used as the ends of the\n cumulative distribution function.\n\n Returns:\n - Array of threshold values padded with the lower and upper\n bound of the distribution.\n - Array containing the probabilities padded with 0 and 1 at\n each end.\n\n Raises:\n ValueError: If the thresholds exceed the ECC bounds for\n the diagnostic and self.ecc_bounds_warning is False.\n\n Warns:\n Warning: If the thresholds exceed the ECC bounds for\n the diagnostic and self.ecc_bounds_warning is True.\n \"\"\"\n lower_bound, upper_bound = bounds_pairing\n threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(\n threshold_points, lower_bound, upper_bound\n )\n probabilities_for_cdf = concatenate_2d_array_with_2d_array_endpoints(\n probabilities_for_cdf, 0, 1\n )\n\n if np.any(np.diff(threshold_points_with_endpoints) < 0):\n msg = (\n \"The calculated threshold values {} are not in ascending \"\n \"order as required for the cumulative distribution \"\n \"function (CDF). This is due to the threshold values \"\n \"exceeding the range given by the ECC bounds {}.\".format(\n threshold_points_with_endpoints, bounds_pairing\n )\n )\n # If ecc_bounds_warning has been set, generate a warning message\n # rather than raising an exception so that subsequent processing\n # can continue. Then apply the new bounds as necessary to\n # ensure the threshold values and endpoints are in ascending\n # order and avoid problems further along the processing chain.\n if self.ecc_bounds_warning:\n warn_msg = msg + (\n \" The threshold points that have \"\n \"exceeded the existing bounds will be used \"\n \"as new bounds.\"\n )\n warnings.warn(warn_msg)\n if upper_bound < max(threshold_points_with_endpoints):\n upper_bound = max(threshold_points_with_endpoints)\n if lower_bound > min(threshold_points_with_endpoints):\n lower_bound = min(threshold_points_with_endpoints)\n threshold_points_with_endpoints = insert_lower_and_upper_endpoint_to_1d_array(\n threshold_points, lower_bound, upper_bound\n )\n else:\n raise ValueError(msg)\n return threshold_points_with_endpoints, probabilities_for_cdf\n\n def _probabilities_to_percentiles(\n self,\n forecast_probabilities: Cube,\n percentiles: ndarray,\n bounds_pairing: Tuple[int, int],\n ) -> Cube:\n \"\"\"\n Conversion of probabilities to percentiles through the construction\n of an cumulative distribution function. This is effectively\n constructed by linear interpolation from the probabilities associated\n with each threshold to a set of percentiles.\n\n Args:\n forecast_probabilities:\n Cube with a threshold coordinate.\n percentiles:\n Array of percentiles, at which the corresponding values will be\n calculated.\n bounds_pairing:\n Lower and upper bound to be used as the ends of the\n cumulative distribution function.\n\n Returns:\n Cube containing values for the required diagnostic e.g.\n air_temperature at the required percentiles.\n\n Raises:\n NotImplementedError: If the threshold coordinate has an\n spp__relative_to_threshold attribute that is not either\n \"above\" or \"below\".\n\n Warns:\n Warning: If the probability values are not ascending, so the\n resulting cdf is not monotonically increasing.\n \"\"\"\n threshold_coord = find_threshold_coordinate(forecast_probabilities)\n threshold_unit = threshold_coord.units\n threshold_points = threshold_coord.points\n\n original_mask = None\n if np.ma.is_masked(forecast_probabilities.data):\n original_mask = forecast_probabilities.data.mask[0]\n\n # Ensure that the percentile dimension is first, so that the\n # conversion to a 2d array produces data in the desired order.\n enforce_coordinate_ordering(forecast_probabilities, threshold_coord.name())\n prob_slices = convert_cube_data_to_2d(\n forecast_probabilities, coord=threshold_coord.name()\n )\n\n # The requirement below for a monotonically changing probability\n # across thresholds can be thwarted by precision errors of order 1E-10,\n # as such, here we round to a precision of 9 decimal places.\n prob_slices = np.around(prob_slices, 9)\n\n # Invert probabilities for data thresholded above thresholds.\n relation = probability_is_above_or_below(forecast_probabilities)\n if relation == \"above\":\n probabilities_for_cdf = 1 - prob_slices\n elif relation == \"below\":\n probabilities_for_cdf = prob_slices\n else:\n msg = (\n \"Probabilities to percentiles only implemented for \"\n \"thresholds above or below a given value.\"\n \"The relation to threshold is given as {}\".format(relation)\n )\n raise NotImplementedError(msg)\n\n (\n threshold_points,\n probabilities_for_cdf,\n ) = self._add_bounds_to_thresholds_and_probabilities(\n threshold_points, probabilities_for_cdf, bounds_pairing\n )\n\n if np.any(np.diff(probabilities_for_cdf) < 0):\n msg = (\n \"The probability values used to construct the \"\n \"Cumulative Distribution Function (CDF) \"\n \"must be ascending i.e. in order to yield \"\n \"a monotonically increasing CDF.\"\n \"The probabilities are {}\".format(probabilities_for_cdf)\n )\n warnings.warn(msg)\n\n # Convert percentiles into fractions.\n percentiles_as_fractions = np.array(\n [x / 100.0 for x in percentiles], dtype=np.float32\n )\n\n forecast_at_percentiles = interpolate_multiple_rows_same_y(\n percentiles_as_fractions.astype(np.float64),\n probabilities_for_cdf.astype(np.float64),\n threshold_points.astype(np.float64),\n )\n forecast_at_percentiles = forecast_at_percentiles.transpose()\n\n # Reshape forecast_at_percentiles, so the percentiles dimension is\n # first, and any other dimension coordinates follow.\n forecast_at_percentiles = restore_non_percentile_dimensions(\n forecast_at_percentiles,\n next(forecast_probabilities.slices_over(threshold_coord)),\n len(percentiles),\n )\n\n template_cube = next(forecast_probabilities.slices_over(threshold_coord.name()))\n template_cube.rename(\n get_diagnostic_cube_name_from_probability_name(template_cube.name())\n )\n template_cube.remove_coord(threshold_coord.name())\n\n percentile_cube = create_cube_with_percentiles(\n percentiles,\n template_cube,\n forecast_at_percentiles,\n cube_unit=threshold_unit,\n )\n\n if original_mask is not None:\n original_mask = np.broadcast_to(original_mask, percentile_cube.shape)\n percentile_cube.data = np.ma.MaskedArray(\n percentile_cube.data, mask=original_mask\n )\n return percentile_cube\n\n def process(\n self,\n forecast_probabilities: Cube,\n no_of_percentiles: Optional[int] = None,\n percentiles: Optional[List[float]] = None,\n sampling: str = \"quantile\",\n ) -> Cube:\n \"\"\"\n 1. Concatenates cubes with a threshold coordinate.\n 2. Creates a list of percentiles.\n 3. Accesses the lower and upper bound pair to find the ends of the\n cumulative distribution function.\n 4. Convert the threshold coordinate into\n values at a set of percentiles using linear interpolation,\n see Figure 1 from Flowerdew, 2014.\n\n Args:\n forecast_probabilities:\n Cube containing a threshold coordinate.\n no_of_percentiles:\n Number of percentiles. If None and percentiles is not set,\n the number of thresholds within the input\n forecast_probabilities cube is used as the number of\n percentiles. This argument is mutually exclusive with\n percentiles.\n percentiles:\n The desired percentile values in the interval [0, 100].\n This argument is mutually exclusive with no_of_percentiles.\n sampling:\n Type of sampling of the distribution to produce a set of\n percentiles e.g. quantile or random.\n\n Accepted options for sampling are:\n\n * Quantile: A regular set of equally-spaced percentiles aimed\n at dividing a Cumulative Distribution Function into\n blocks of equal probability.\n * Random: A random set of ordered percentiles.\n\n Returns:\n Cube with forecast values at the desired set of percentiles.\n The threshold coordinate is always the zeroth dimension.\n\n Raises:\n ValueError: If both no_of_percentiles and percentiles are provided\n \"\"\"\n if no_of_percentiles is not None and percentiles is not None:\n raise ValueError(\n \"Cannot specify both no_of_percentiles and percentiles to \"\n \"{}\".format(self.__class__.__name__)\n )\n\n threshold_coord = find_threshold_coordinate(forecast_probabilities)\n phenom_name = get_threshold_coord_name_from_probability_name(\n forecast_probabilities.name()\n )\n\n if no_of_percentiles is None:\n no_of_percentiles = len(\n forecast_probabilities.coord(threshold_coord.name()).points\n )\n\n if percentiles is None:\n percentiles = choose_set_of_percentiles(\n no_of_percentiles, sampling=sampling\n )\n elif not isinstance(percentiles, (tuple, list)):\n percentiles = [percentiles]\n percentiles = np.array(percentiles, dtype=np.float32)\n\n cube_units = forecast_probabilities.coord(threshold_coord.name()).units\n bounds_pairing = get_bounds_of_distribution(phenom_name, cube_units)\n\n # If a cube still has multiple realizations, slice over these to reduce\n # the memory requirements into manageable chunks.\n try:\n slices_over_realization = forecast_probabilities.slices_over(\"realization\")\n except CoordinateNotFoundError:\n slices_over_realization = [forecast_probabilities]\n\n cubelist = iris.cube.CubeList([])\n for cube_realization in slices_over_realization:\n cubelist.append(\n self._probabilities_to_percentiles(\n cube_realization, percentiles, bounds_pairing\n )\n )\n forecast_at_percentiles = cubelist.merge_cube()\n\n # Update cell methods on final cube\n if forecast_at_percentiles.cell_methods:\n format_cell_methods_for_diagnostic(forecast_at_percentiles)\n\n return forecast_at_percentiles\n\n\nclass ConvertLocationAndScaleParameters:\n \"\"\"\n Base Class to support the plugins that compute percentiles and\n probabilities from the location and scale parameters.\n \"\"\"\n\n def __init__(\n self, distribution: str = \"norm\", shape_parameters: Optional[ndarray] = None,\n ) -> None:\n \"\"\"\n Initialise the class.\n\n In order to construct percentiles or probabilities from the location\n or scale parameter, the distribution for the resulting output needs\n to be selected. For use with the outputs from EMOS, where it has been\n assumed that the outputs from minimising the CRPS follow a particular\n distribution, then the same distribution should be selected, as used\n for the CRPS minimisation. The conversion to percentiles and\n probabilities from the location and scale parameter relies upon\n functionality within scipy.stats.\n\n Args:\n distribution:\n Name of a distribution supported by scipy.stats.\n shape_parameters:\n For use with distributions in scipy.stats (e.g. truncnorm) that\n require the specification of shape parameters to be able to\n define the shape of the distribution. For the truncated normal\n distribution, the shape parameters should be appropriate for\n the distribution constructed from the location and scale\n parameters provided.\n Please note that for use with\n :meth:`~improver.calibration.\\\nensemble_calibration.ContinuousRankedProbabilityScoreMinimisers.\\\ncalculate_truncated_normal_crps`,\n the shape parameters for a truncated normal distribution with\n a lower bound of zero should be [0, np.inf].\n\n \"\"\"\n if distribution == \"truncnorm\":\n # Use scipy v1.3.3 truncnorm\n self.distribution = scipy_cont_distns.truncnorm\n else:\n try:\n self.distribution = getattr(stats, distribution)\n except AttributeError as err:\n msg = (\n \"The distribution requested {} is not a valid distribution \"\n \"in scipy.stats. {}\".format(distribution, err)\n )\n raise AttributeError(msg)\n\n if shape_parameters is None:\n if self.distribution.name == \"truncnorm\":\n raise ValueError(\n \"For the truncated normal distribution, \"\n \"shape parameters must be specified.\"\n )\n shape_parameters = []\n self.shape_parameters = shape_parameters\n\n def __repr__(self) -> str:\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = (\n \"<ConvertLocationAndScaleParameters: distribution: {}; \"\n \"shape_parameters: {}>\"\n )\n return result.format(self.distribution.name, self.shape_parameters)\n\n def _rescale_shape_parameters(\n self, location_parameter: ndarray, scale_parameter: ndarray\n ) -> None:\n \"\"\"\n Rescale the shape parameters for the desired location and scale\n parameters for the truncated normal distribution. The shape parameters\n for any other distribution will remain unchanged.\n\n For the truncated normal distribution, if the shape parameters are not\n rescaled, then :data:`scipy.stats.truncnorm` will assume that the shape\n parameters are appropriate for a standard normal distribution. As the\n aim is to construct a distribution using specific values for the\n location and scale parameters, the assumption of a standard normal\n distribution is not appropriate. Therefore the shape parameters are\n rescaled using the equations:\n\n .. math::\n a\\\\_rescaled = (a - location\\\\_parameter)/scale\\\\_parameter\n\n b\\\\_rescaled = (b - location\\\\_parameter)/scale\\\\_parameter\n\n Please see :data:`scipy.stats.truncnorm` for some further information.\n\n Args:\n location_parameter:\n Location parameter to be used to scale the shape parameters.\n scale_parameter:\n Scale parameter to be used to scale the shape parameters.\n \"\"\"\n if self.distribution.name == \"truncnorm\":\n rescaled_values = []\n for value in self.shape_parameters:\n rescaled_values.append((value - location_parameter) / scale_parameter)\n self.shape_parameters = rescaled_values\n\n\nclass ConvertLocationAndScaleParametersToPercentiles(\n BasePlugin, ConvertLocationAndScaleParameters\n):\n \"\"\"\n Plugin focusing on generating percentiles from location and scale\n parameters. In combination with the EnsembleReordering plugin, this is\n Ensemble Copula Coupling.\n \"\"\"\n\n def __repr__(self) -> str:\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = (\n \"<ConvertLocationAndScaleParametersToPercentiles: \"\n \"distribution: {}; shape_parameters: {}>\"\n )\n return result.format(self.distribution.name, self.shape_parameters)\n\n def _location_and_scale_parameters_to_percentiles(\n self,\n location_parameter: Cube,\n scale_parameter: Cube,\n template_cube: Cube,\n percentiles: List[float],\n ) -> Cube:\n \"\"\"\n Function returning percentiles based on the supplied location and\n scale parameters.\n\n Args:\n location_parameter:\n Location parameter of calibrated distribution.\n scale_parameter:\n Scale parameter of the calibrated distribution.\n template_cube:\n Template cube containing either a percentile or realization\n coordinate. All coordinates apart from the percentile or\n realization coordinate will be copied from the template cube.\n Metadata will also be copied from this cube.\n percentiles:\n Percentiles at which to calculate the value of the phenomenon\n at.\n\n Returns:\n Cube containing the values for the phenomenon at each of the\n percentiles requested.\n\n Raises:\n ValueError: If any of the resulting percentile values are\n nans and these nans are not caused by a scale parameter of\n zero.\n \"\"\"\n # Remove any mask that may be applied to location and scale parameters\n # and replace with ones\n location_data = np.ma.filled(location_parameter.data, 1).flatten()\n scale_data = np.ma.filled(scale_parameter.data, 1).flatten()\n\n # Convert percentiles into fractions.\n percentiles_as_fractions = np.array(\n [x / 100.0 for x in percentiles], dtype=np.float32\n )\n\n result = np.zeros(\n (len(percentiles_as_fractions), location_data.shape[0]), dtype=np.float32\n )\n\n self._rescale_shape_parameters(location_data, scale_data)\n\n percentile_method = self.distribution(\n *self.shape_parameters, loc=location_data, scale=scale_data\n )\n\n # Loop over percentiles, and use the distribution as the\n # \"percentile_method\" with the location and scale parameter to\n # calculate the values at each percentile.\n for index, percentile in enumerate(percentiles_as_fractions):\n percentile_list = np.repeat(percentile, len(location_data))\n result[index, :] = percentile_method.ppf(percentile_list)\n # If percent point function (PPF) returns NaNs, fill in\n # mean instead of NaN values. NaN will only be generated if the\n # scale parameter (standard deviation) is zero. Therefore, if the\n # scale parameter (standard deviation) is zero, the mean value is\n # used for all gridpoints with a NaN.\n if np.any(scale_data == 0):\n nan_index = np.argwhere(np.isnan(result[index, :]))\n result[index, nan_index] = location_data[nan_index]\n if np.any(np.isnan(result)):\n msg = (\n \"NaNs are present within the result for the {} \"\n \"percentile. Unable to calculate the percent point \"\n \"function.\"\n )\n raise ValueError(msg)\n\n # Reshape forecast_at_percentiles, so the percentiles dimension is\n # first, and any other dimension coordinates follow.\n result = result.reshape((len(percentiles),) + location_parameter.data.shape)\n\n for prob_coord_name in [\"realization\", \"percentile\"]:\n if template_cube.coords(prob_coord_name, dim_coords=True):\n prob_coord = template_cube.coord(prob_coord_name)\n template_slice = next(template_cube.slices_over(prob_coord))\n template_slice.remove_coord(prob_coord)\n elif template_cube.coords(prob_coord_name, dim_coords=False):\n template_slice = template_cube\n\n percentile_cube = create_cube_with_percentiles(\n percentiles, template_slice, result\n )\n # Define a mask to be reapplied later\n mask = np.logical_or(\n np.ma.getmaskarray(location_parameter.data),\n np.ma.getmaskarray(scale_parameter.data),\n )\n # Make the mask defined above fit the data size and then apply to the\n # percentile cube.\n mask_array = np.stack([mask] * len(percentiles))\n percentile_cube.data = np.ma.masked_where(mask_array, percentile_cube.data)\n # Remove cell methods associated with finding the ensemble mean\n percentile_cube.cell_methods = {}\n return percentile_cube\n\n def process(\n self,\n location_parameter: Cube,\n scale_parameter: Cube,\n template_cube: Cube,\n no_of_percentiles: Optional[int] = None,\n percentiles: Optional[List[float]] = None,\n ) -> Cube:\n \"\"\"\n Generate ensemble percentiles from the location and scale parameters.\n\n Args:\n location_parameter:\n Cube containing the location parameters.\n scale_parameter:\n Cube containing the scale parameters.\n template_cube:\n Template cube containing either a percentile or realization\n coordinate. All coordinates apart from the percentile or\n realization coordinate will be copied from the template cube.\n Metadata will also be copied from this cube.\n no_of_percentiles:\n Integer defining the number of percentiles that will be\n calculated from the location and scale parameters.\n percentiles:\n List of percentiles that will be generated from the location\n and scale parameters provided.\n\n Returns:\n Cube for calibrated percentiles.\n The percentile coordinate is always the zeroth dimension.\n\n Raises:\n ValueError: Ensure that it is not possible to supply\n \"no_of_percentiles\" and \"percentiles\" simultaneously\n as keyword arguments.\n \"\"\"\n if no_of_percentiles and percentiles:\n msg = (\n \"Please specify either the number of percentiles or \"\n \"provide a list of percentiles. The number of percentiles \"\n \"provided was {} and the list of percentiles \"\n \"provided was {}\".format(no_of_percentiles, percentiles)\n )\n raise ValueError(msg)\n\n if no_of_percentiles:\n percentiles = choose_set_of_percentiles(no_of_percentiles)\n calibrated_forecast_percentiles = self._location_and_scale_parameters_to_percentiles(\n location_parameter, scale_parameter, template_cube, percentiles\n )\n\n return calibrated_forecast_percentiles\n\n\nclass ConvertLocationAndScaleParametersToProbabilities(\n BasePlugin, ConvertLocationAndScaleParameters\n):\n \"\"\"\n Plugin to generate probabilities relative to given thresholds from the\n location and scale parameters of a distribution.\n \"\"\"\n\n def __repr__(self) -> str:\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = (\n \"<ConvertLocationAndScaleParametersToProbabilities: \"\n \"distribution: {}; shape_parameters: {}>\"\n )\n return result.format(self.distribution.name, self.shape_parameters)\n\n def _check_template_cube(self, cube: Cube) -> None:\n \"\"\"\n The template cube is expected to contain a leading threshold dimension\n followed by spatial (y/x) dimensions for a gridded cube. For a spot\n template cube, the spatial dimensions are not expected to be dimension\n coordinates. If the cube contains the expected dimensions,\n a threshold leading order is enforced.\n\n Args:\n cube:\n A cube whose dimensions are checked to ensure they match what\n is expected.\n\n Raises:\n ValueError: If cube is not of the expected dimensions.\n \"\"\"\n require_dim_coords = False if cube.coords(\"wmo_id\") else True\n check_for_x_and_y_axes(cube, require_dim_coords=require_dim_coords)\n dim_coords = get_dim_coord_names(cube)\n msg = (\n \"{} expects a cube with only a leading threshold dimension, \"\n \"followed by spatial (y/x) dimensions. \"\n \"Got dimensions: {}\".format(self.__class__.__name__, dim_coords)\n )\n\n try:\n threshold_coord = find_threshold_coordinate(cube)\n except CoordinateNotFoundError:\n raise ValueError(msg)\n\n if len(dim_coords) < 4:\n enforce_coordinate_ordering(cube, threshold_coord.name())\n return\n\n raise ValueError(msg)\n\n @staticmethod\n def _check_unit_compatibility(\n location_parameter: Cube, scale_parameter: Cube, probability_cube_template: Cube\n ) -> None:\n \"\"\"\n The location parameter, scale parameters, and threshold values come\n from three different cubes. This is a sanity check to ensure the units\n are as expected, converting units of the location parameter and\n scale parameter if possible.\n\n Args:\n location_parameter:\n Cube of location parameter values.\n scale_parameter:\n Cube of scale parameter values.\n probability_cube_template:\n Cube containing threshold values.\n\n Raises:\n ValueError: If units of input cubes are not compatible.\n \"\"\"\n threshold_units = find_threshold_coordinate(probability_cube_template).units\n\n try:\n location_parameter.convert_units(threshold_units)\n scale_parameter.convert_units(threshold_units)\n except ValueError as err:\n msg = (\n \"Error: {} This is likely because the location parameter, \"\n \"scale parameter and template cube threshold units are \"\n \"not equivalent/compatible.\".format(err)\n )\n raise ValueError(msg)\n\n def _location_and_scale_parameters_to_probabilities(\n self,\n location_parameter: Cube,\n scale_parameter: Cube,\n probability_cube_template: Cube,\n ) -> Cube:\n \"\"\"\n Function returning probabilities relative to provided thresholds based\n on the supplied location and scale parameters.\n\n Args:\n location_parameter:\n Predictor for the calibrated forecast location parameter.\n scale_parameter:\n Scale parameter for the calibrated forecast.\n probability_cube_template:\n A probability cube that has a threshold coordinate, where the\n probabilities are defined as above or below the threshold by\n the spp__relative_to_threshold attribute. This cube matches\n the desired output cube format.\n\n Returns:\n Cube containing the data expressed as probabilities relative to\n the provided thresholds in the way described by\n spp__relative_to_threshold.\n \"\"\"\n # Define a mask to be reapplied later\n loc_mask = np.ma.getmaskarray(location_parameter.data)\n scale_mask = np.ma.getmaskarray(scale_parameter.data)\n mask = np.logical_or(loc_mask, scale_mask)\n # Remove any mask that may be applied to location and scale parameters\n # and replace with ones\n location_parameter.data = np.ma.filled(location_parameter.data, 1)\n scale_parameter.data = np.ma.filled(scale_parameter.data, 1)\n thresholds = find_threshold_coordinate(probability_cube_template).points\n relative_to_threshold = probability_is_above_or_below(probability_cube_template)\n\n self._rescale_shape_parameters(\n location_parameter.data.flatten(), scale_parameter.data.flatten()\n )\n\n # Loop over thresholds, and use the specified distribution with the\n # location and scale parameter to calculate the probabilities relative\n # to each threshold.\n probabilities = np.empty_like(probability_cube_template.data)\n\n distribution = self.distribution(\n *self.shape_parameters,\n loc=location_parameter.data.flatten(),\n scale=scale_parameter.data.flatten(),\n )\n\n probability_method = distribution.cdf\n if relative_to_threshold == \"above\":\n probability_method = distribution.sf\n\n for index, threshold in enumerate(thresholds):\n probabilities[index, ...] = np.reshape(\n probability_method(threshold), probabilities.shape[1:]\n )\n\n probability_cube = probability_cube_template.copy(data=probabilities)\n # Make the mask defined above fit the data size and then apply to the\n # probability cube.\n mask_array = np.array([mask] * len(probabilities))\n probability_cube.data = np.ma.masked_where(mask_array, probability_cube.data)\n return probability_cube\n\n def process(\n self,\n location_parameter: Cube,\n scale_parameter: Cube,\n probability_cube_template: Cube,\n ) -> Cube:\n \"\"\"\n Generate probabilities from the location and scale parameters of the\n distribution.\n\n Args:\n location_parameter:\n Cube containing the location parameters.\n scale_parameter:\n Cube containing the scale parameters.\n probability_cube_template:\n A probability cube that has a threshold coordinate, where the\n probabilities are defined as above or below the threshold by\n the spp__relative_to_threshold attribute. This cube matches\n the desired output cube format.\n\n Returns:\n A cube of diagnostic data expressed as probabilities relative\n to the thresholds found in the probability_cube_template.\n \"\"\"\n self._check_template_cube(probability_cube_template)\n self._check_unit_compatibility(\n location_parameter, scale_parameter, probability_cube_template\n )\n\n probability_cube = self._location_and_scale_parameters_to_probabilities(\n location_parameter, scale_parameter, probability_cube_template\n )\n\n return probability_cube\n\n\nclass EnsembleReordering(BasePlugin):\n \"\"\"\n Plugin for applying the reordering step of Ensemble Copula Coupling,\n in order to generate ensemble realizations with multivariate structure\n from percentiles. The percentiles are assumed to be in ascending order.\n\n Reference:\n Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013.\n Uncertainty Quantification in Complex Simulation Models Using Ensemble\n Copula Coupling.\n Statistical Science, 28(4), pp.616-640.\n\n \"\"\"\n\n @staticmethod\n def _recycle_raw_ensemble_realizations(\n post_processed_forecast_percentiles: Cube,\n raw_forecast_realizations: Cube,\n percentile_coord_name: str,\n ) -> Cube:\n \"\"\"\n Function to determine whether there is a mismatch between the number\n of percentiles and the number of raw forecast realizations. If more\n percentiles are requested than ensemble realizations, then the ensemble\n realizations are recycled. This assumes that the identity of the\n ensemble realizations within the raw ensemble forecast is random, such\n that the raw ensemble realizations are exchangeable. If fewer\n percentiles are requested than ensemble realizations, then only the\n first n ensemble realizations are used.\n\n Args:\n post_processed_forecast_percentiles :\n Cube for post-processed percentiles.\n The percentiles are assumed\n to be in ascending order.\n raw_forecast_realizations:\n Cube containing the raw (not post-processed) forecasts.\n percentile_coord_name:\n Name of required percentile coordinate.\n\n Returns:\n Cube for the raw ensemble forecast, where the raw ensemble\n realizations have either been recycled or constrained,\n depending upon the number of percentiles present\n in the post-processed forecast cube.\n \"\"\"\n plen = len(\n post_processed_forecast_percentiles.coord(percentile_coord_name).points\n )\n mlen = len(raw_forecast_realizations.coord(\"realization\").points)\n if plen == mlen:\n pass\n else:\n raw_forecast_realizations_extended = iris.cube.CubeList()\n realization_list = []\n mpoints = raw_forecast_realizations.coord(\"realization\").points\n # Loop over the number of percentiles and finding the\n # corresponding ensemble realization number. The ensemble\n # realization numbers are recycled e.g. 1, 2, 3, 1, 2, 3, etc.\n for index in range(plen):\n realization_list.append(mpoints[index % len(mpoints)])\n\n # Assume that the ensemble realizations are ascending linearly.\n new_realization_numbers = realization_list[0] + list(range(plen))\n\n # Extract the realizations required in the realization_list from\n # the raw_forecast_realizations. Edit the realization number as\n # appropriate and append to a cubelist containing rebadged\n # raw ensemble realizations.\n for realization, index in zip(realization_list, new_realization_numbers):\n constr = iris.Constraint(realization=realization)\n raw_forecast_realization = raw_forecast_realizations.extract(constr)\n raw_forecast_realization.coord(\"realization\").points = index\n raw_forecast_realizations_extended.append(raw_forecast_realization)\n raw_forecast_realizations = MergeCubes()(\n raw_forecast_realizations_extended, slice_over_realization=True\n )\n return raw_forecast_realizations\n\n @staticmethod\n def rank_ecc(\n post_processed_forecast_percentiles: Cube,\n raw_forecast_realizations: Cube,\n random_ordering: bool = False,\n random_seed: Optional[int] = None,\n ) -> Cube:\n \"\"\"\n Function to apply Ensemble Copula Coupling. This ranks the\n post-processed forecast realizations based on a ranking determined from\n the raw forecast realizations.\n\n Args:\n post_processed_forecast_percentiles:\n Cube for post-processed percentiles. The percentiles are\n assumed to be in ascending order.\n raw_forecast_realizations:\n Cube containing the raw (not post-processed) forecasts.\n The probabilistic dimension is assumed to be the zeroth\n dimension.\n random_ordering:\n If random_ordering is True, the post-processed forecasts are\n reordered randomly, rather than using the ordering of the\n raw ensemble.\n random_seed:\n If random_seed is an integer, the integer value is used for\n the random seed.\n If random_seed is None, no random seed is set, so the random\n values generated are not reproducible.\n\n Returns:\n Cube for post-processed realizations where at a particular grid\n point, the ranking of the values within the ensemble matches\n the ranking from the raw ensemble.\n \"\"\"\n results = iris.cube.CubeList([])\n for rawfc, calfc in zip(\n raw_forecast_realizations.slices_over(\"time\"),\n post_processed_forecast_percentiles.slices_over(\"time\"),\n ):\n if random_seed is not None:\n random_seed = int(random_seed)\n random_seed = np.random.RandomState(random_seed)\n random_data = random_seed.rand(*rawfc.data.shape)\n if random_ordering:\n # Returns the indices that would sort the array.\n # As these indices are from a random dataset, only an argsort\n # is used.\n ranking = np.argsort(random_data, axis=0)\n else:\n # Lexsort returns the indices sorted firstly by the\n # primary key, the raw forecast data (unless random_ordering\n # is enabled), and secondly by the secondary key, an array of\n # random data, in order to split tied values randomly.\n sorting_index = np.lexsort((random_data, rawfc.data), axis=0)\n # Returns the indices that would sort the array.\n ranking = np.argsort(sorting_index, axis=0)\n # Index the post-processed forecast data using the ranking array.\n # The following uses a custom choose function that reproduces the\n # required elements of the np.choose method without the limitation\n # of having < 32 arrays or a leading dimension < 32 in the\n # input data array. This function allows indexing of a 3d array\n # using a 3d array.\n mask = np.ma.getmask(calfc.data)\n calfc.data = choose(ranking, calfc.data)\n if mask is not np.ma.nomask:\n calfc.data = np.ma.MaskedArray(calfc.data, mask, dtype=np.float32)\n results.append(calfc)\n # Ensure we haven't lost any dimensional coordinates with only one\n # value in.\n results = results.merge_cube()\n results = check_cube_coordinates(post_processed_forecast_percentiles, results)\n return results\n\n @staticmethod\n def _check_input_cube_masks(post_processed_forecast, raw_forecast):\n \"\"\"\n Checks that if the raw_forecast is masked the post_processed_forecast\n is also masked. The code supports the post_processed_forecast being\n masked even if the raw_forecast isn't masked, but not vice versa.\n\n If both post_processed_forecast and raw_forecast are masked checks\n that both input cubes have the same mask applied to each\n x-y slice.\n\n Args:\n post_processed_forecast:\n The cube containing the post-processed\n forecast realizations.\n raw_forecast:\n The cube containing the raw (not post-processed)\n forecast.\n\n Raises:\n ValueError:\n If only the raw_forecast is masked\n ValueError:\n If the post_processed_forecast does not have same mask on all\n x-y slices\n ValueError:\n If the raw_forecast x-y slices do not all have the same mask\n as the post_processed_forecast.\n \"\"\"\n if np.ma.is_masked(post_processed_forecast.data) and np.ma.is_masked(\n raw_forecast.data\n ):\n for aslice in post_processed_forecast.data.mask[1:, ...]:\n if np.any(aslice != post_processed_forecast.data.mask[0]):\n\n message = (\n \"The post_processed_forecast does not have same\"\n \" mask on all x-y slices\"\n )\n raise (ValueError(message))\n for aslice in raw_forecast.data.mask[0:, ...]:\n if np.any(aslice != post_processed_forecast.data.mask[0]):\n message = (\n \"The raw_forecast x-y slices do not all have the\"\n \" same mask as the post_processed_forecast.\"\n )\n raise (ValueError(message))\n if np.ma.is_masked(raw_forecast.data) and not np.ma.is_masked(\n post_processed_forecast.data\n ):\n message = (\n \"The raw_forecast provided has a mask, but the \"\n \"post_processed_forecast isn't masked. The \"\n \"post_processed_forecast and the raw_forecast should \"\n \"have the same mask applied to them.\"\n )\n raise (ValueError(message))\n\n def process(\n self,\n post_processed_forecast: Cube,\n raw_forecast: Cube,\n random_ordering: bool = False,\n random_seed: Optional[int] = None,\n ) -> Cube:\n \"\"\"\n Reorder post-processed forecast using the ordering of the\n raw ensemble.\n\n Args:\n post_processed_forecast:\n The cube containing the post-processed\n forecast realizations.\n raw_forecast:\n The cube containing the raw (not post-processed)\n forecast.\n random_ordering:\n If random_ordering is True, the post-processed forecasts are\n reordered randomly, rather than using the ordering of the\n raw ensemble.\n random_seed:\n If random_seed is an integer, the integer value is used for\n the random seed.\n If random_seed is None, no random seed is set, so the random\n values generated are not reproducible.\n\n Returns:\n Cube containing the new ensemble realizations where all points\n within the dataset have been reordered in comparison to the\n input percentiles. This cube contains the same ensemble\n realization numbers as the raw forecast.\n \"\"\"\n percentile_coord_name = find_percentile_coordinate(\n post_processed_forecast\n ).name()\n\n enforce_coordinate_ordering(post_processed_forecast, percentile_coord_name)\n enforce_coordinate_ordering(raw_forecast, \"realization\")\n\n self._check_input_cube_masks(post_processed_forecast, raw_forecast)\n\n raw_forecast = self._recycle_raw_ensemble_realizations(\n post_processed_forecast, raw_forecast, percentile_coord_name\n )\n post_processed_forecast_realizations = self.rank_ecc(\n post_processed_forecast,\n raw_forecast,\n random_ordering=random_ordering,\n random_seed=random_seed,\n )\n plugin = RebadgePercentilesAsRealizations()\n post_processed_forecast_realizations = plugin(\n post_processed_forecast_realizations,\n ensemble_realization_numbers=raw_forecast.coord(\"realization\").points,\n )\n\n enforce_coordinate_ordering(post_processed_forecast_realizations, \"realization\")\n return post_processed_forecast_realizations\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2021 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for the cube_combiner.CubeCombiner plugin.\"\"\"\nimport unittest\nfrom copy import deepcopy\nfrom datetime import datetime\n\nimport iris\nimport numpy as np\nfrom iris.cube import Cube\nfrom iris.tests import IrisTest\n\nfrom improver.cube_combiner import Combine, CubeCombiner\nfrom improver.synthetic_data.set_up_test_cubes import (\n add_coordinate,\n set_up_probability_cube,\n)\nfrom improver_tests import ImproverTest\n\n\nclass Test__init__(IrisTest):\n\n \"\"\"Test the __init__ method.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the __init__ sets things up correctly\"\"\"\n plugin = CubeCombiner(\"+\")\n self.assertEqual(plugin.operator, np.add)\n\n def test_raise_error_wrong_operation(self):\n \"\"\"Test __init__ raises a ValueError for invalid operation\"\"\"\n msg = \"Unknown operation \"\n with self.assertRaisesRegex(ValueError, msg):\n CubeCombiner(\"%\")\n\n\nclass CombinerTest(ImproverTest):\n \"\"\"Set up a common set of test cubes for subsequent test classes.\"\"\"\n\n def setUp(self):\n \"\"\" Set up cubes for testing. \"\"\"\n data = np.full((1, 2, 2), 0.5, dtype=np.float32)\n self.cube1 = set_up_probability_cube(\n data,\n np.array([0.001], dtype=np.float32),\n variable_name=\"lwe_thickness_of_precipitation_amount\",\n time=datetime(2015, 11, 19, 0),\n time_bounds=(datetime(2015, 11, 18, 23), datetime(2015, 11, 19, 0)),\n frt=datetime(2015, 11, 18, 22),\n )\n\n data = np.full((1, 2, 2), 0.6, dtype=np.float32)\n self.cube2 = set_up_probability_cube(\n data,\n np.array([0.001], dtype=np.float32),\n variable_name=\"lwe_thickness_of_precipitation_amount\",\n time=datetime(2015, 11, 19, 1),\n time_bounds=(datetime(2015, 11, 19, 0), datetime(2015, 11, 19, 1)),\n frt=datetime(2015, 11, 18, 22),\n )\n\n data = np.full((1, 2, 2), 0.1, dtype=np.float32)\n self.cube3 = set_up_probability_cube(\n data,\n np.array([0.001], dtype=np.float32),\n variable_name=\"lwe_thickness_of_precipitation_amount\",\n time=datetime(2015, 11, 19, 1),\n time_bounds=(datetime(2015, 11, 19, 0), datetime(2015, 11, 19, 1)),\n frt=datetime(2015, 11, 18, 22),\n )\n\n data = np.full((2, 2, 2), 0.1, dtype=np.float32)\n self.cube4 = set_up_probability_cube(\n data,\n np.array([1.0, 2.0], dtype=np.float32),\n variable_name=\"lwe_thickness_of_precipitation_amount\",\n time=datetime(2015, 11, 19, 1),\n time_bounds=(datetime(2015, 11, 19, 0), datetime(2015, 11, 19, 1)),\n frt=datetime(2015, 11, 18, 22),\n )\n self.cube4 = add_coordinate(\n iris.util.squeeze(self.cube4), np.arange(3), \"realization\", coord_units=\"1\"\n )\n\n\nclass Test__get_expanded_coord_names(CombinerTest):\n \"\"\"Test method to determine coordinates for expansion\"\"\"\n\n def test_basic(self):\n \"\"\"Test correct names are returned for scalar coordinates with\n different values\"\"\"\n expected_coord_set = {\"time\", \"forecast_period\"}\n result = CubeCombiner(\"+\")._get_expanded_coord_names(\n [self.cube1, self.cube2, self.cube3]\n )\n self.assertIsInstance(result, list)\n self.assertSetEqual(set(result), expected_coord_set)\n\n def test_identical_inputs(self):\n \"\"\"Test no coordinates are returned if inputs are identical\"\"\"\n result = CubeCombiner(\"+\")._get_expanded_coord_names(\n [self.cube1, self.cube1, self.cube1]\n )\n self.assertFalse(result)\n\n def test_unmatched_coords_ignored(self):\n \"\"\"Test coordinates that are not present on all cubes are ignored,\n regardless of input order\"\"\"\n expected_coord_set = {\"time\", \"forecast_period\"}\n height = iris.coords.AuxCoord([1.5], \"height\", units=\"m\")\n self.cube1.add_aux_coord(height)\n result = CubeCombiner(\"+\")._get_expanded_coord_names(\n [self.cube1, self.cube2, self.cube3]\n )\n self.assertSetEqual(set(result), expected_coord_set)\n result = CubeCombiner(\"+\")._get_expanded_coord_names(\n [self.cube3, self.cube2, self.cube1]\n )\n self.assertSetEqual(set(result), expected_coord_set)\n\n\nclass Test_process(CombinerTest):\n\n \"\"\"Test the plugin combines the cubelist into a cube.\"\"\"\n\n def test_basic(self):\n \"\"\"Test that the plugin returns a Cube and doesn't modify the inputs.\"\"\"\n plugin = CubeCombiner(\"+\")\n cubelist = iris.cube.CubeList([self.cube1, self.cube2])\n input_copy = deepcopy(cubelist)\n result = plugin.process(cubelist, \"new_cube_name\")\n self.assertIsInstance(result, Cube)\n self.assertEqual(result.name(), \"new_cube_name\")\n expected_data = np.full((2, 2), 1.1, dtype=np.float32)\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertCubeListEqual(input_copy, cubelist)\n\n def test_basic_with_Combine(self):\n \"\"\"Test that the basic test also works through the Combine plugin.\"\"\"\n plugin = Combine(\"+\", new_name=\"new_cube_name\")\n cubelist = iris.cube.CubeList([self.cube1, self.cube2])\n input_copy = deepcopy(cubelist)\n result = plugin.process(cubelist)\n self.assertIsInstance(result, Cube)\n self.assertEqual(result.name(), \"new_cube_name\")\n expected_data = np.full((2, 2), 1.1, dtype=np.float32)\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertCubeListEqual(input_copy, cubelist)\n\n def test_mean(self):\n \"\"\"Test that the plugin calculates the mean correctly. \"\"\"\n plugin = CubeCombiner(\"mean\")\n cubelist = iris.cube.CubeList([self.cube1, self.cube2])\n result = plugin.process(cubelist, \"new_cube_name\")\n expected_data = np.full((2, 2), 0.55, dtype=np.float32)\n self.assertEqual(result.name(), \"new_cube_name\")\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_mixed_dtypes(self):\n \"\"\"Test that the plugin calculates the sum correctly and doesn't mangle dtypes.\"\"\"\n plugin = CubeCombiner(\"add\")\n cubelist = iris.cube.CubeList(\n [self.cube1, self.cube2.copy(np.ones_like(self.cube2.data, dtype=np.int8))]\n )\n result = plugin.process(cubelist, \"new_cube_name\")\n expected_data = np.full((2, 2), 1.5, dtype=np.float32)\n self.assertEqual(result.name(), \"new_cube_name\")\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertTrue(cubelist[0].dtype == np.float32)\n self.assertTrue(cubelist[1].dtype == np.int8)\n self.assertTrue(result.dtype == np.float32)\n\n def test_mixed_dtypes_overflow(self):\n \"\"\"Test the plugin with a dtype combination that results in float64 data.\"\"\"\n plugin = CubeCombiner(\"add\")\n cubelist = iris.cube.CubeList(\n [self.cube1, self.cube2.copy(np.ones_like(self.cube2.data, dtype=np.int32))]\n )\n msg = \"Operation .* results in float64 data\"\n with self.assertRaisesRegex(TypeError, msg):\n plugin.process(cubelist, \"new_cube_name\")\n\n def test_bounds_expansion(self):\n \"\"\"Test that the plugin calculates the sum of the input cubes\n correctly and expands the time coordinate bounds on the\n resulting output.\"\"\"\n plugin = CubeCombiner(\"add\")\n cubelist = iris.cube.CubeList([self.cube1, self.cube2])\n result = plugin.process(cubelist, \"new_cube_name\")\n expected_data = np.full((2, 2), 1.1, dtype=np.float32)\n self.assertEqual(result.name(), \"new_cube_name\")\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertEqual(result.coord(\"time\").points[0], 1447894800)\n self.assertArrayEqual(result.coord(\"time\").bounds, [[1447887600, 1447894800]])\n\n def test_unmatched_scalar_coords(self):\n \"\"\"Test a scalar coordinate that is present on the first cube is\n present unmodified on the output; and if present on a later cube is\n not present on the output.\"\"\"\n height = iris.coords.AuxCoord([1.5], \"height\", units=\"m\")\n self.cube1.add_aux_coord(height)\n result = CubeCombiner(\"add\").process([self.cube1, self.cube2], \"new_cube_name\")\n self.assertEqual(result.coord(\"height\"), height)\n result = CubeCombiner(\"add\").process([self.cube2, self.cube1], \"new_cube_name\")\n result_coords = [coord.name() for coord in result.coords()]\n self.assertNotIn(\"height\", result_coords)\n\n def test_mean_multi_cube(self):\n \"\"\"Test that the plugin calculates the mean for three cubes.\"\"\"\n plugin = CubeCombiner(\"mean\")\n cubelist = iris.cube.CubeList([self.cube1, self.cube2, self.cube3])\n result = plugin.process(cubelist, \"new_cube_name\")\n expected_data = np.full((2, 2), 0.4, dtype=np.float32)\n self.assertEqual(result.name(), \"new_cube_name\")\n self.assertArrayAlmostEqual(result.data, expected_data)\n\n def test_with_mask(self):\n \"\"\"Test that the plugin preserves the mask if any of the inputs are\n masked\"\"\"\n expected_data = np.full((2, 2), 1.2, dtype=np.float32)\n mask = [[False, True], [False, False]]\n self.cube1.data = np.ma.MaskedArray(self.cube1.data, mask=mask)\n plugin = CubeCombiner(\"add\")\n result = plugin.process([self.cube1, self.cube2, self.cube3], \"new_cube_name\")\n self.assertIsInstance(result.data, np.ma.MaskedArray)\n self.assertArrayAlmostEqual(result.data.data, expected_data)\n self.assertArrayEqual(result.data.mask, mask)\n\n def test_exception_mismatched_dimensions(self):\n \"\"\"Test an error is raised if dimension coordinates do not match\"\"\"\n self.cube2.coord(\"latitude\").rename(\"projection_y_coordinate\")\n plugin = CubeCombiner(\"+\")\n msg = \"Cannot combine cubes with different dimensions\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin.process([self.cube1, self.cube2], \"new_cube_name\")\n\n def test_exception_for_single_entry_cubelist(self):\n \"\"\"Test that the plugin raises an exception if a cubelist containing\n only one cube is passed in.\"\"\"\n plugin = CubeCombiner(\"-\")\n msg = \"Expecting 2 or more cubes in cube_list\"\n cubelist = iris.cube.CubeList([self.cube1])\n with self.assertRaisesRegex(ValueError, msg):\n plugin.process(cubelist, \"new_cube_name\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.ma.filled",
"numpy.ma.MaskedArray",
"numpy.ma.getmaskarray",
"numpy.isnan",
"numpy.around",
"numpy.empty_like",
"numpy.lexsort",
"numpy.logical_or",
"numpy.ma.getmask",
"numpy.broadcast_to",
"numpy.any",
"numpy.transpose",
"numpy.diff",
"numpy.argsort",
"numpy.array",
"numpy.ma.masked_where",
"numpy.ma.is_masked",
"numpy.random.RandomState"
],
[
"numpy.ones_like",
"numpy.arange",
"numpy.full",
"numpy.array",
"numpy.ma.MaskedArray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dossett/incubator-airflow | [
"60583a3c6d1c4b5bbecaad6cd195301107530de9"
] | [
"airflow/www/views.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport ast\nimport codecs\nimport copy\nimport datetime as dt\nimport itertools\nimport json\nimport logging\nimport math\nimport os\nimport traceback\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom functools import wraps\nfrom textwrap import dedent\n\nimport bleach\nimport markdown\nimport nvd3\nimport pendulum\nimport pkg_resources\nimport sqlalchemy as sqla\nfrom flask import (\n abort, jsonify, redirect, url_for, request, Markup, Response,\n current_app, render_template, make_response)\nfrom flask import flash\nfrom flask._compat import PY2\nfrom flask_admin import BaseView, expose, AdminIndexView\nfrom flask_admin.actions import action\nfrom flask_admin.babel import lazy_gettext\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin.form.fields import DateTimeField\nfrom flask_admin.tools import iterdecode\nfrom jinja2 import escape\nfrom jinja2.sandbox import ImmutableSandboxedEnvironment\nfrom past.builtins import basestring, unicode\nfrom pygments import highlight, lexers\nfrom pygments.formatters import HtmlFormatter\nfrom sqlalchemy import or_, desc, and_, union_all\nfrom wtforms import (\n Form, SelectField, TextAreaField, PasswordField,\n StringField, validators)\n\nimport airflow\nfrom airflow import configuration as conf\nfrom airflow import models\nfrom airflow import settings\nfrom airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_running,\n set_dag_run_state_to_success,\n set_dag_run_state_to_failed)\nfrom airflow.exceptions import AirflowException\nfrom airflow.models import BaseOperator\nfrom airflow.models import XCom, DagRun\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS\nfrom airflow.utils import timezone\nfrom airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date\nfrom airflow.utils.db import create_session, provide_session\nfrom airflow.utils.helpers import alchemy_to_dict\nfrom airflow.utils.json import json_ser\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.state import State\nfrom airflow.utils.timezone import datetime\nfrom airflow.www import utils as wwwutils\nfrom airflow.www.forms import (DateTimeForm, DateTimeWithNumRunsForm,\n DateTimeWithNumRunsWithDagRunsForm)\nfrom airflow.www.validators import GreaterEqualThan\n\nQUERY_LIMIT = 100000\nCHART_LIMIT = 200000\n\nUTF8_READER = codecs.getreader('utf-8')\n\ndagbag = models.DagBag(settings.DAGS_FOLDER)\n\nlogin_required = airflow.login.login_required\ncurrent_user = airflow.login.current_user\nlogout_user = airflow.login.logout_user\n\nFILTER_BY_OWNER = False\n\nPAGE_SIZE = conf.getint('webserver', 'page_size')\n\nif conf.getboolean('webserver', 'FILTER_BY_OWNER'):\n # filter_by_owner if authentication is enabled and filter_by_owner is true\n FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']\n\n\ndef dag_link(v, c, m, p):\n if m.dag_id is None:\n return Markup()\n\n dag_id = bleach.clean(m.dag_id)\n url = url_for(\n 'airflow.graph',\n dag_id=dag_id,\n execution_date=m.execution_date)\n return Markup(\n '<a href=\"{}\">{}</a>'.format(url, dag_id))\n\n\ndef log_url_formatter(v, c, m, p):\n return Markup(\n '<a href=\"{m.log_url}\">'\n ' <span class=\"glyphicon glyphicon-book\" aria-hidden=\"true\">'\n '</span></a>').format(**locals())\n\n\ndef dag_run_link(v, c, m, p):\n dag_id = bleach.clean(m.dag_id)\n url = url_for(\n 'airflow.graph',\n dag_id=m.dag_id,\n run_id=m.run_id,\n execution_date=m.execution_date)\n return Markup('<a href=\"{url}\">{m.run_id}</a>'.format(**locals()))\n\n\ndef task_instance_link(v, c, m, p):\n dag_id = bleach.clean(m.dag_id)\n task_id = bleach.clean(m.task_id)\n url = url_for(\n 'airflow.task',\n dag_id=dag_id,\n task_id=task_id,\n execution_date=m.execution_date.isoformat())\n url_root = url_for(\n 'airflow.graph',\n dag_id=dag_id,\n root=task_id,\n execution_date=m.execution_date.isoformat())\n return Markup(\n \"\"\"\n <span style=\"white-space: nowrap;\">\n <a href=\"{url}\">{task_id}</a>\n <a href=\"{url_root}\" title=\"Filter on this task and upstream\">\n <span class=\"glyphicon glyphicon-filter\" style=\"margin-left: 0px;\"\n aria-hidden=\"true\"></span>\n </a>\n </span>\n \"\"\".format(**locals()))\n\n\ndef state_token(state):\n color = State.color(state)\n return Markup(\n '<span class=\"label\" style=\"background-color:{color};\">'\n '{state}</span>'.format(**locals()))\n\n\ndef parse_datetime_f(value):\n if not isinstance(value, dt.datetime):\n return value\n\n return timezone.make_aware(value)\n\n\ndef state_f(v, c, m, p):\n return state_token(m.state)\n\n\ndef duration_f(v, c, m, p):\n if m.end_date and m.duration:\n return timedelta(seconds=m.duration)\n\n\ndef datetime_f(v, c, m, p):\n attr = getattr(m, p)\n dttm = attr.isoformat() if attr else ''\n if timezone.utcnow().isoformat()[:4] == dttm[:4]:\n dttm = dttm[5:]\n return Markup(\"<nobr>{}</nobr>\".format(dttm))\n\n\ndef nobr_f(v, c, m, p):\n return Markup(\"<nobr>{}</nobr>\".format(getattr(m, p)))\n\n\ndef label_link(v, c, m, p):\n try:\n default_params = ast.literal_eval(m.default_params)\n except Exception:\n default_params = {}\n url = url_for(\n 'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,\n **default_params)\n return Markup(\"<a href='{url}'>{m.label}</a>\".format(**locals()))\n\n\ndef pool_link(v, c, m, p):\n url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool\n return Markup(\"<a href='{url}'>{m.pool}</a>\".format(**locals()))\n\n\ndef pygment_html_render(s, lexer=lexers.TextLexer):\n return highlight(\n s,\n lexer(),\n HtmlFormatter(linenos=True),\n )\n\n\ndef render(obj, lexer):\n out = \"\"\n if isinstance(obj, basestring):\n out += pygment_html_render(obj, lexer)\n elif isinstance(obj, (tuple, list)):\n for i, s in enumerate(obj):\n out += \"<div>List item #{}</div>\".format(i)\n out += \"<div>\" + pygment_html_render(s, lexer) + \"</div>\"\n elif isinstance(obj, dict):\n for k, v in obj.items():\n out += '<div>Dict item \"{}\"</div>'.format(k)\n out += \"<div>\" + pygment_html_render(v, lexer) + \"</div>\"\n return out\n\n\ndef wrapped_markdown(s):\n return '<div class=\"rich_doc\">' + markdown.markdown(s) + \"</div>\"\n\n\nattr_renderer = {\n 'bash_command': lambda x: render(x, lexers.BashLexer),\n 'hql': lambda x: render(x, lexers.SqlLexer),\n 'sql': lambda x: render(x, lexers.SqlLexer),\n 'doc': lambda x: render(x, lexers.TextLexer),\n 'doc_json': lambda x: render(x, lexers.JsonLexer),\n 'doc_rst': lambda x: render(x, lexers.RstLexer),\n 'doc_yaml': lambda x: render(x, lexers.YamlLexer),\n 'doc_md': wrapped_markdown,\n 'python_callable': lambda x: render(\n wwwutils.get_python_source(x),\n lexers.PythonLexer,\n ),\n}\n\n\ndef data_profiling_required(f):\n \"\"\"Decorator for views requiring data profiling access\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if (\n current_app.config['LOGIN_DISABLED'] or\n (not current_user.is_anonymous and current_user.data_profiling())\n ):\n return f(*args, **kwargs)\n else:\n flash(\"This page requires data profiling privileges\", \"error\")\n return redirect(url_for('admin.index'))\n\n return decorated_function\n\n\ndef fused_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=running')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.used_slots()))\n\n\ndef fqueued_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=queued&sort=10&desc=1')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.queued_slots()))\n\n\ndef recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):\n if isinstance(tasks, list):\n for task in tasks:\n recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)\n return\n if isinstance(tasks, SubDagOperator):\n subtasks = tasks.subdag.tasks\n dag_ids.append(tasks.subdag.dag_id)\n for subtask in subtasks:\n if subtask.task_id not in task_ids:\n task_ids.append(subtask.task_id)\n task_id_to_dag[subtask.task_id] = tasks.subdag\n recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)\n if isinstance(tasks, BaseOperator):\n task_id_to_dag[tasks.task_id] = tasks.dag\n\n\ndef get_chart_height(dag):\n \"\"\"\n TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to\n approximate the size of generated chart (otherwise the charts are tiny and unreadable\n when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height\n charts, that is charts that take up space based on the size of the components within.\n \"\"\"\n return 600 + len(dag.tasks) * 10\n\n\ndef get_date_time_num_runs_dag_runs_form_data(request, session, dag):\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = pendulum.parse(dttm)\n else:\n dttm = dag.latest_execution_date or timezone.utcnow()\n\n base_date = request.args.get('base_date')\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n # The DateTimeField widget truncates milliseconds and would loose\n # the first dag run. Round to next second.\n base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)\n\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n DR = models.DagRun\n drs = (\n session.query(DR)\n .filter(\n DR.dag_id == dag.dag_id,\n DR.execution_date <= base_date)\n .order_by(desc(DR.execution_date))\n .limit(num_runs)\n .all()\n )\n dr_choices = []\n dr_state = None\n for dr in drs:\n dr_choices.append((dr.execution_date.isoformat(), dr.run_id))\n if dttm == dr.execution_date:\n dr_state = dr.state\n\n # Happens if base_date was changed and the selected dag run is not in result\n if not dr_state and drs:\n dr = drs[0]\n dttm = dr.execution_date\n dr_state = dr.state\n\n return {\n 'dttm': dttm,\n 'base_date': base_date,\n 'num_runs': num_runs,\n 'execution_date': dttm.isoformat(),\n 'dr_choices': dr_choices,\n 'dr_state': dr_state,\n }\n\n\nclass Airflow(BaseView):\n def is_visible(self):\n return False\n\n @expose('/')\n @login_required\n def index(self):\n return self.render('airflow/dags.html')\n\n @expose('/chart_data')\n @data_profiling_required\n @wwwutils.gzipped\n # @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)\n def chart_data(self):\n from airflow import macros\n import pandas as pd\n if conf.getboolean('core', 'secure_mode'):\n abort(404)\n\n with create_session() as session:\n chart_id = request.args.get('chart_id')\n csv = request.args.get('csv') == \"true\"\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n db = session.query(\n models.Connection).filter_by(conn_id=chart.conn_id).first()\n\n payload = {\n \"state\": \"ERROR\",\n \"error\": \"\"\n }\n\n # Processing templated fields\n try:\n args = ast.literal_eval(chart.default_params)\n if not isinstance(args, dict):\n raise AirflowException('Not a dict')\n except Exception:\n args = {}\n payload['error'] += (\n \"Default params is not valid, string has to evaluate as \"\n \"a Python dictionary. \")\n\n request_dict = {k: request.args.get(k) for k in request.args}\n args.update(request_dict)\n args['macros'] = macros\n sandbox = ImmutableSandboxedEnvironment()\n sql = sandbox.from_string(chart.sql).render(**args)\n label = sandbox.from_string(chart.label).render(**args)\n payload['sql_html'] = Markup(highlight(\n sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n payload['label'] = label\n\n pd.set_option('display.max_colwidth', 100)\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(\n wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))\n df = df.fillna(0)\n except Exception as e:\n payload['error'] += \"SQL execution failed. Details: \" + str(e)\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n if not payload['error'] and len(df) == CHART_LIMIT:\n payload['warning'] = (\n \"Data has been truncated to {0}\"\n \" rows. Expect incomplete results.\").format(CHART_LIMIT)\n\n if not payload['error'] and len(df) == 0:\n payload['error'] += \"Empty result set. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'series' and\n chart.chart_type != \"datatable\" and\n len(df.columns) < 3):\n payload['error'] += \"SQL needs to return at least 3 columns. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'columns' and\n len(df.columns) < 2):\n payload['error'] += \"SQL needs to return at least 2 columns. \"\n elif not payload['error']:\n import numpy as np\n chart_type = chart.chart_type\n\n data = None\n if chart.show_datatable or chart_type == \"datatable\":\n data = df.to_dict(orient=\"split\")\n data['columns'] = [{'title': c} for c in data['columns']]\n payload['data'] = data\n\n # Trying to convert time to something Highcharts likes\n x_col = 1 if chart.sql_layout == 'series' else 0\n if chart.x_is_date:\n try:\n # From string to datetime\n df[df.columns[x_col]] = pd.to_datetime(\n df[df.columns[x_col]])\n df[df.columns[x_col]] = df[df.columns[x_col]].apply(\n lambda x: int(x.strftime(\"%s\")) * 1000)\n except Exception as e:\n payload['error'] = \"Time conversion failed\"\n\n if chart_type == 'datatable':\n payload['state'] = 'SUCCESS'\n return wwwutils.json_response(payload)\n else:\n if chart.sql_layout == 'series':\n # User provides columns (series, x, y)\n df[df.columns[2]] = df[df.columns[2]].astype(np.float)\n df = df.pivot_table(\n index=df.columns[1],\n columns=df.columns[0],\n values=df.columns[2], aggfunc=np.sum)\n else:\n # User provides columns (x, y, metric1, metric2, ...)\n df.index = df[df.columns[0]]\n df = df.sort(df.columns[0])\n del df[df.columns[0]]\n for col in df.columns:\n df[col] = df[col].astype(np.float)\n\n df = df.fillna(0)\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n NVd3ChartClass = getattr(nvd3, NVd3ChartClass)\n nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)\n\n for col in df.columns:\n nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())\n try:\n nvd3_chart.buildcontent()\n payload['chart_type'] = nvd3_chart.__class__.__name__\n payload['htmlcontent'] = nvd3_chart.htmlcontent\n except Exception as e:\n payload['error'] = str(e)\n\n payload['state'] = 'SUCCESS'\n payload['request_dict'] = request_dict\n return wwwutils.json_response(payload)\n\n @expose('/chart')\n @data_profiling_required\n def chart(self):\n if conf.getboolean('core', 'secure_mode'):\n abort(404)\n\n with create_session() as session:\n chart_id = request.args.get('chart_id')\n embed = request.args.get('embed')\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n if not NVd3ChartClass:\n flash(\n \"Not supported anymore as the license was incompatible, \"\n \"sorry\",\n \"danger\")\n redirect('/admin/chart/')\n\n sql = \"\"\n if chart.show_sql:\n sql = Markup(highlight(\n chart.sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/nvd3.html',\n chart=chart,\n title=\"Airflow - Chart\",\n sql=sql,\n label=chart.label,\n embed=embed)\n\n @expose('/dag_stats')\n @login_required\n @provide_session\n def dag_stats(self, session=None):\n ds = models.DagStat\n\n ds.update(\n dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]\n )\n\n qry = (\n session.query(ds.dag_id, ds.state, ds.count)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n\n payload = {}\n for dag in dagbag.dags.values():\n payload[dag.safe_dag_id] = []\n for state in State.dag_states:\n try:\n count = data[dag.dag_id][state]\n except Exception:\n count = 0\n d = {\n 'state': state,\n 'count': count,\n 'dag_id': dag.dag_id,\n 'color': State.color(state)\n }\n payload[dag.safe_dag_id].append(d)\n return wwwutils.json_response(payload)\n\n @expose('/task_stats')\n @login_required\n @provide_session\n def task_stats(self, session=None):\n TI = models.TaskInstance\n DagRun = models.DagRun\n Dag = models.DagModel\n\n LastDagRun = (\n session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state != State.RUNNING)\n .filter(Dag.is_active == True) # noqa: E712\n .filter(Dag.is_subdag == False) # noqa: E712\n .group_by(DagRun.dag_id)\n .subquery('last_dag_run')\n )\n RunningDagRun = (\n session.query(DagRun.dag_id, DagRun.execution_date)\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state == State.RUNNING)\n .filter(Dag.is_active == True) # noqa: E712\n .filter(Dag.is_subdag == False) # noqa: E712\n .subquery('running_dag_run')\n )\n\n # Select all task_instances from active dag_runs.\n # If no dag_run is active, return task instances from most recent dag_run.\n LastTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(LastDagRun, and_(\n LastDagRun.c.dag_id == TI.dag_id,\n LastDagRun.c.execution_date == TI.execution_date))\n )\n RunningTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(RunningDagRun, and_(\n RunningDagRun.c.dag_id == TI.dag_id,\n RunningDagRun.c.execution_date == TI.execution_date))\n )\n\n UnionTI = union_all(LastTI, RunningTI).alias('union_ti')\n qry = (\n session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())\n .group_by(UnionTI.c.dag_id, UnionTI.c.state)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n session.commit()\n\n payload = {}\n for dag in dagbag.dags.values():\n payload[dag.safe_dag_id] = []\n for state in State.task_states:\n try:\n count = data[dag.dag_id][state]\n except Exception:\n count = 0\n d = {\n 'state': state,\n 'count': count,\n 'dag_id': dag.dag_id,\n 'color': State.color(state)\n }\n payload[dag.safe_dag_id].append(d)\n return wwwutils.json_response(payload)\n\n @expose('/code')\n @login_required\n def code(self):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = dag_id\n try:\n with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f:\n code = f.read()\n html_code = highlight(\n code, lexers.PythonLexer(), HtmlFormatter(linenos=True))\n except IOError as e:\n html_code = str(e)\n\n return self.render(\n 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,\n root=request.args.get('root'),\n demo_mode=conf.getboolean('webserver', 'demo_mode'))\n\n @expose('/dag_details')\n @login_required\n @provide_session\n def dag_details(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = \"DAG details\"\n\n TI = models.TaskInstance\n states = session\\\n .query(TI.state, sqla.func.count(TI.dag_id))\\\n .filter(TI.dag_id == dag_id)\\\n .group_by(TI.state)\\\n .all()\n\n return self.render(\n 'airflow/dag_details.html',\n dag=dag, title=title, states=states, State=State)\n\n @current_app.errorhandler(404)\n def circles(self):\n return render_template(\n 'airflow/circles.html', hostname=get_hostname()), 404\n\n @current_app.errorhandler(500)\n def show_traceback(self):\n from airflow.utils import asciiart as ascii_\n return render_template(\n 'airflow/traceback.html',\n hostname=get_hostname(),\n nukular=ascii_.nukular,\n info=traceback.format_exc()), 500\n\n @expose('/noaccess')\n def noaccess(self):\n return self.render('airflow/noaccess.html')\n\n @expose('/pickle_info')\n @login_required\n def pickle_info(self):\n d = {}\n dag_id = request.args.get('dag_id')\n dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()\n for dag in dags:\n if not dag.is_subdag:\n d[dag.dag_id] = dag.pickle_info()\n return wwwutils.json_response(d)\n\n @expose('/login', methods=['GET', 'POST'])\n def login(self):\n return airflow.login.login(self, request)\n\n @expose('/logout')\n def logout(self):\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('admin.index'))\n\n @expose('/rendered')\n @login_required\n @wwwutils.action_logging\n def rendered(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n task = copy.copy(dag.get_task(task_id))\n ti = models.TaskInstance(task=task, execution_date=dttm)\n try:\n ti.render_templates()\n except Exception as e:\n flash(\"Error rendering template: \" + str(e), \"error\")\n title = \"Rendered Template\"\n html_dict = {}\n for template_field in task.__class__.template_fields:\n content = getattr(task, template_field)\n if template_field in attr_renderer:\n html_dict[template_field] = attr_renderer[template_field](content)\n else:\n html_dict[template_field] = (\n \"<pre><code>\" + str(content) + \"</pre></code>\")\n\n return self.render(\n 'airflow/ti_code.html',\n html_dict=html_dict,\n dag=dag,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n title=title, )\n\n @expose('/get_logs_with_metadata')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def get_logs_with_metadata(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n try_number = int(request.args.get('try_number'))\n metadata = request.args.get('metadata')\n metadata = json.loads(metadata)\n\n # metadata may be null\n if not metadata:\n metadata = {}\n\n # Convert string datetime into actual datetime\n try:\n execution_date = timezone.parse(execution_date)\n except ValueError:\n error_message = (\n 'Given execution date, {}, could not be identified '\n 'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(\n execution_date))\n response = jsonify({'error': error_message})\n response.status_code = 400\n\n return response\n\n logger = logging.getLogger('airflow.task')\n task_log_reader = conf.get('core', 'task_log_reader')\n handler = next((handler for handler in logger.handlers\n if handler.name == task_log_reader), None)\n\n ti = session.query(models.TaskInstance).filter(\n models.TaskInstance.dag_id == dag_id,\n models.TaskInstance.task_id == task_id,\n models.TaskInstance.execution_date == dttm).first()\n try:\n if ti is None:\n logs = [\"*** Task instance did not exist in the DB\\n\"]\n metadata['end_of_log'] = True\n else:\n dag = dagbag.get_dag(dag_id)\n ti.task = dag.get_task(ti.task_id)\n logs, metadatas = handler.read(ti, try_number, metadata=metadata)\n metadata = metadatas[0]\n for i, log in enumerate(logs):\n if PY2 and not isinstance(log, unicode):\n logs[i] = log.decode('utf-8')\n message = logs[0]\n return jsonify(message=message, metadata=metadata)\n except AttributeError as e:\n error_message = [\"Task log handler {} does not support read logs.\\n{}\\n\"\n .format(task_log_reader, str(e))]\n metadata['end_of_log'] = True\n return jsonify(message=error_message, error=True, metadata=metadata)\n\n @expose('/log')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def log(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n\n ti = session.query(models.TaskInstance).filter(\n models.TaskInstance.dag_id == dag_id,\n models.TaskInstance.task_id == task_id,\n models.TaskInstance.execution_date == dttm).first()\n\n logs = [''] * (ti.next_try_number - 1 if ti is not None else 0)\n return self.render(\n 'airflow/ti_log.html',\n logs=logs, dag=dag, title=\"Log by attempts\",\n dag_id=dag.dag_id, task_id=task_id,\n execution_date=execution_date, form=form)\n\n @expose('/task')\n @login_required\n @wwwutils.action_logging\n def task(self):\n TI = models.TaskInstance\n\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n task = copy.copy(dag.get_task(task_id))\n task.resolve_template_files()\n ti = TI(task=task, execution_date=dttm)\n ti.refresh_from_db()\n\n ti_attrs = []\n for attr_name in dir(ti):\n if not attr_name.startswith('_'):\n attr = getattr(ti, attr_name)\n if type(attr) != type(self.task): # noqa: E721\n ti_attrs.append((attr_name, str(attr)))\n\n task_attrs = []\n for attr_name in dir(task):\n if not attr_name.startswith('_'):\n attr = getattr(task, attr_name)\n if type(attr) != type(self.task) and \\\n attr_name not in attr_renderer: # noqa: E721\n task_attrs.append((attr_name, str(attr)))\n\n # Color coding the special attributes that are code\n special_attrs_rendered = {}\n for attr_name in attr_renderer:\n if hasattr(task, attr_name):\n source = getattr(task, attr_name)\n special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)\n\n no_failed_deps_result = [(\n \"Unknown\",\n dedent(\"\"\"\\\n All dependencies are met but the task instance is not running.\n In most cases this just means that the task will probably\n be scheduled soon unless:<br/>\n - The scheduler is down or under heavy load<br/>\n - The following configuration values may be limiting the number\n of queueable processes:\n <code>parallelism</code>,\n <code>dag_concurrency</code>,\n <code>max_active_dag_runs_per_dag</code>,\n <code>non_pooled_task_slot_count</code><br/>\n {}\n <br/>\n If this task instance does not start soon please contact your Airflow \"\"\"\n \"\"\"administrator for assistance.\"\"\"\n .format(\n \"- This task instance already ran and had its state changed \"\n \"manually (e.g. cleared in the UI)<br/>\"\n if ti.state == State.NONE else \"\")))]\n\n # Use the scheduler's context to figure out which dependencies are not met\n dep_context = DepContext(SCHEDULER_DEPS)\n failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in\n ti.get_failed_dep_statuses(\n dep_context=dep_context)]\n\n title = \"Task Instance Details\"\n return self.render(\n 'airflow/task.html',\n task_attrs=task_attrs,\n ti_attrs=ti_attrs,\n failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,\n task_id=task_id,\n execution_date=execution_date,\n special_attrs_rendered=special_attrs_rendered,\n form=form,\n dag=dag, title=title)\n\n @expose('/xcom')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def xcom(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n\n xcomlist = session.query(XCom).filter(\n XCom.dag_id == dag_id, XCom.task_id == task_id,\n XCom.execution_date == dttm).all()\n\n attributes = []\n for xcom in xcomlist:\n if not xcom.key.startswith('_'):\n attributes.append((xcom.key, xcom.value))\n\n title = \"XCom\"\n return self.render(\n 'airflow/xcom.html',\n attributes=attributes,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n dag=dag, title=title)\n\n @expose('/run')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def run(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = pendulum.parse(execution_date)\n ignore_all_deps = request.args.get('ignore_all_deps') == \"true\"\n ignore_task_deps = request.args.get('ignore_task_deps') == \"true\"\n ignore_ti_state = request.args.get('ignore_ti_state') == \"true\"\n\n from airflow.executors import GetDefaultExecutor\n executor = GetDefaultExecutor()\n valid_celery_config = False\n valid_kubernetes_config = False\n\n try:\n from airflow.executors.celery_executor import CeleryExecutor\n valid_celery_config = isinstance(executor, CeleryExecutor)\n except ImportError:\n pass\n\n try:\n from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor\n valid_kubernetes_config = isinstance(executor, KubernetesExecutor)\n except ImportError:\n pass\n\n if not valid_celery_config and not valid_kubernetes_config:\n flash(\"Only works with the Celery or Kubernetes executors, sorry\", \"error\")\n return redirect(origin)\n\n ti = models.TaskInstance(task=task, execution_date=execution_date)\n ti.refresh_from_db()\n\n # Make sure the task instance can be queued\n dep_context = DepContext(\n deps=QUEUE_DEPS,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n if failed_deps:\n failed_deps_str = \", \".join(\n [\"{}: {}\".format(dep.dep_name, dep.reason) for dep in failed_deps])\n flash(\"Could not queue task instance for execution, dependencies not met: \"\n \"{}\".format(failed_deps_str),\n \"error\")\n return redirect(origin)\n\n executor.start()\n executor.queue_task_instance(\n ti,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n executor.heartbeat()\n flash(\n \"Sent {} to the message queue, \"\n \"it should start any moment now.\".format(ti))\n return redirect(origin)\n\n @expose('/delete')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def delete(self):\n from airflow.api.common.experimental import delete_dag\n from airflow.exceptions import DagNotFound, DagFileExists\n\n dag_id = request.args.get('dag_id')\n origin = request.args.get('origin') or \"/admin/\"\n\n try:\n delete_dag.delete_dag(dag_id)\n except DagNotFound:\n flash(\"DAG with id {} not found. Cannot delete\".format(dag_id))\n return redirect(request.referrer)\n except DagFileExists:\n flash(\"Dag id {} is still in DagBag. \"\n \"Remove the DAG file first.\".format(dag_id))\n return redirect(request.referrer)\n\n flash(\"Deleting DAG with id {}. May take a couple minutes to fully\"\n \" disappear.\".format(dag_id))\n # Upon successful delete return to origin\n return redirect(origin)\n\n @expose('/trigger')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def trigger(self):\n dag_id = request.args.get('dag_id')\n origin = request.args.get('origin') or \"/admin/\"\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash(\"Cannot find dag {}\".format(dag_id))\n return redirect(origin)\n\n execution_date = timezone.utcnow()\n run_id = \"manual__{0}\".format(execution_date.isoformat())\n\n dr = DagRun.find(dag_id=dag_id, run_id=run_id)\n if dr:\n flash(\"This run_id {} already exists\".format(run_id))\n return redirect(origin)\n\n run_conf = {}\n\n dag.create_dagrun(\n run_id=run_id,\n execution_date=execution_date,\n state=State.RUNNING,\n conf=run_conf,\n external_trigger=True\n )\n\n flash(\n \"Triggered {}, \"\n \"it should start any moment now.\".format(dag_id))\n return redirect(origin)\n\n def _clear_dag_tis(self, dag, start_date, end_date, origin,\n recursive=False, confirmed=False):\n if confirmed:\n count = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n include_parentdag=recursive,\n )\n\n flash(\"{0} task instances have been cleared\".format(count))\n return redirect(origin)\n\n tis = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n dry_run=True,\n include_parentdag=recursive,\n )\n if not tis:\n flash(\"No task instances to clear\", 'error')\n response = redirect(origin)\n else:\n details = \"\\n\".join([str(t) for t in tis])\n\n response = self.render(\n 'airflow/confirm.html',\n message=(\"Here's the list of task instances you are about \"\n \"to clear:\"),\n details=details)\n\n return response\n\n @expose('/clear')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def clear(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = pendulum.parse(execution_date)\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n recursive = request.args.get('recursive') == \"true\"\n\n dag = dag.sub_dag(\n task_regex=r\"^{0}$\".format(task_id),\n include_downstream=downstream,\n include_upstream=upstream)\n\n end_date = execution_date if not future else None\n start_date = execution_date if not past else None\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=recursive, confirmed=confirmed)\n\n @expose('/dagrun_clear')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def dagrun_clear(self):\n dag_id = request.args.get('dag_id')\n origin = request.args.get('origin')\n execution_date = request.args.get('execution_date')\n confirmed = request.args.get('confirmed') == \"true\"\n\n dag = dagbag.get_dag(dag_id)\n execution_date = pendulum.parse(execution_date)\n start_date = execution_date\n end_date = execution_date\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=True, confirmed=confirmed)\n\n @expose('/blocked')\n @login_required\n @provide_session\n def blocked(self, session=None):\n DR = models.DagRun\n dags = session\\\n .query(DR.dag_id, sqla.func.count(DR.id))\\\n .filter(DR.state == State.RUNNING)\\\n .group_by(DR.dag_id)\\\n .all()\n\n payload = []\n for dag_id, active_dag_runs in dags:\n max_active_runs = 0\n if dag_id in dagbag.dags:\n max_active_runs = dagbag.dags[dag_id].max_active_runs\n payload.append({\n 'dag_id': dag_id,\n 'active_dag_run': active_dag_runs,\n 'max_active_runs': max_active_runs,\n })\n return wwwutils.json_response(payload)\n\n def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):\n if not execution_date:\n flash('Invalid execution date', 'error')\n return redirect(origin)\n\n execution_date = pendulum.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash('Cannot find DAG: {}'.format(dag_id), 'error')\n return redirect(origin)\n\n new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)\n\n if confirmed:\n flash('Marked failed on {} task instances'.format(len(new_dag_state)))\n return redirect(origin)\n\n else:\n details = '\\n'.join([str(t) for t in new_dag_state])\n\n response = self.render('airflow/confirm.html',\n message=(\"Here's the list of task instances you are \"\n \"about to mark as failed\"),\n details=details)\n\n return response\n\n def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):\n if not execution_date:\n flash('Invalid execution date', 'error')\n return redirect(origin)\n\n execution_date = pendulum.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash('Cannot find DAG: {}'.format(dag_id), 'error')\n return redirect(origin)\n\n new_dag_state = set_dag_run_state_to_success(dag, execution_date,\n commit=confirmed)\n\n if confirmed:\n flash('Marked success on {} task instances'.format(len(new_dag_state)))\n return redirect(origin)\n\n else:\n details = '\\n'.join([str(t) for t in new_dag_state])\n\n response = self.render('airflow/confirm.html',\n message=(\"Here's the list of task instances you are \"\n \"about to mark as success\"),\n details=details)\n\n return response\n\n @expose('/dagrun_failed')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def dagrun_failed(self):\n dag_id = request.args.get('dag_id')\n execution_date = request.args.get('execution_date')\n confirmed = request.args.get('confirmed') == 'true'\n origin = request.args.get('origin')\n return self._mark_dagrun_state_as_failed(dag_id, execution_date,\n confirmed, origin)\n\n @expose('/dagrun_success')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def dagrun_success(self):\n dag_id = request.args.get('dag_id')\n execution_date = request.args.get('execution_date')\n confirmed = request.args.get('confirmed') == 'true'\n origin = request.args.get('origin')\n return self._mark_dagrun_state_as_success(dag_id, execution_date,\n confirmed, origin)\n\n def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, state):\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n task.dag = dag\n\n execution_date = pendulum.parse(execution_date)\n\n if not dag:\n flash(\"Cannot find DAG: {}\".format(dag_id))\n return redirect(origin)\n\n if not task:\n flash(\"Cannot find task {} in DAG {}\".format(task_id, dag.dag_id))\n return redirect(origin)\n\n from airflow.api.common.experimental.mark_tasks import set_state\n\n if confirmed:\n altered = set_state(task=task, execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=state,\n commit=True)\n\n flash(\"Marked {} on {} task instances\".format(state, len(altered)))\n return redirect(origin)\n\n to_be_altered = set_state(task=task, execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=state,\n commit=False)\n\n details = \"\\n\".join([str(t) for t in to_be_altered])\n\n response = self.render(\"airflow/confirm.html\",\n message=(\"Here's the list of task instances you are \"\n \"about to mark as {}:\".format(state)),\n details=details)\n\n return response\n\n @expose('/failed')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def failed(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n execution_date = request.args.get('execution_date')\n\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n\n return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, State.FAILED)\n\n @expose('/success')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def success(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n execution_date = request.args.get('execution_date')\n\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n\n return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, State.SUCCESS)\n\n @expose('/tree')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n @provide_session\n def tree(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if dag_id not in dagbag.dags:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect('/admin/')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_downstream=False,\n include_upstream=True)\n\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n DR = models.DagRun\n dag_runs = (\n session.query(DR)\n .filter(\n DR.dag_id == dag.dag_id,\n DR.execution_date <= base_date)\n .order_by(DR.execution_date.desc())\n .limit(num_runs)\n .all()\n )\n dag_runs = {\n dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}\n\n dates = sorted(list(dag_runs.keys()))\n max_date = max(dates) if dates else None\n min_date = min(dates) if dates else None\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n task_instances = {}\n for ti in tis:\n tid = alchemy_to_dict(ti)\n dr = dag_runs.get(ti.execution_date)\n tid['external_trigger'] = dr['external_trigger'] if dr else False\n task_instances[(ti.task_id, ti.execution_date)] = tid\n\n expanded = []\n # The default recursion traces every path so that tree view has full\n # expand/collapse functionality. After 5,000 nodes we stop and fall\n # back on a quick DFS search for performance. See PR #320.\n node_count = [0]\n node_limit = 5000 / max(1, len(dag.roots))\n\n def recurse_nodes(task, visited):\n visited.add(task)\n node_count[0] += 1\n\n children = [\n recurse_nodes(t, visited) for t in task.upstream_list\n if node_count[0] < node_limit or t not in visited]\n\n # D3 tree uses children vs _children to define what is\n # expanded or not. The following block makes it such that\n # repeated nodes are collapsed by default.\n children_key = 'children'\n if task.task_id not in expanded:\n expanded.append(task.task_id)\n elif children:\n children_key = \"_children\"\n\n def set_duration(tid):\n if isinstance(tid, dict) and tid.get(\"state\") == State.RUNNING \\\n and tid[\"start_date\"] is not None:\n d = timezone.utcnow() - pendulum.parse(tid[\"start_date\"])\n tid[\"duration\"] = d.total_seconds()\n return tid\n\n return {\n 'name': task.task_id,\n 'instances': [\n set_duration(task_instances.get((task.task_id, d))) or {\n 'execution_date': d.isoformat(),\n 'task_id': task.task_id\n }\n for d in dates],\n children_key: children,\n 'num_dep': len(task.upstream_list),\n 'operator': task.task_type,\n 'retries': task.retries,\n 'owner': task.owner,\n 'start_date': task.start_date,\n 'end_date': task.end_date,\n 'depends_on_past': task.depends_on_past,\n 'ui_color': task.ui_color,\n }\n\n data = {\n 'name': '[DAG]',\n 'children': [recurse_nodes(t, set()) for t in dag.roots],\n 'instances': [dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates],\n }\n\n # minimize whitespace as this can be huge for bigger dags\n data = json.dumps(data, default=json_ser, separators=(',', ':'))\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n return self.render(\n 'airflow/tree.html',\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n root=root,\n form=form,\n dag=dag, data=data, blur=blur, num_runs=num_runs)\n\n @expose('/graph')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n @provide_session\n def graph(self, session=None):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if dag_id not in dagbag.dags:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect('/admin/')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n arrange = request.args.get('arrange', dag.orientation)\n\n nodes = []\n edges = []\n for task in dag.tasks:\n nodes.append({\n 'id': task.task_id,\n 'value': {\n 'label': task.task_id,\n 'labelStyle': \"fill:{0};\".format(task.ui_fgcolor),\n 'style': \"fill:{0};\".format(task.ui_color),\n }\n })\n\n def get_upstream(task):\n for t in task.upstream_list:\n edge = {\n 'u': t.task_id,\n 'v': task.task_id,\n }\n if edge not in edges:\n edges.append(edge)\n get_upstream(t)\n\n for t in dag.roots:\n get_upstream(t)\n\n dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)\n dt_nr_dr_data['arrange'] = arrange\n dttm = dt_nr_dr_data['dttm']\n\n class GraphForm(DateTimeWithNumRunsWithDagRunsForm):\n arrange = SelectField(\"Layout\", choices=(\n ('LR', \"Left->Right\"),\n ('RL', \"Right->Left\"),\n ('TB', \"Top->Bottom\"),\n ('BT', \"Bottom->Top\"),\n ))\n\n form = GraphForm(data=dt_nr_dr_data)\n form.execution_date.choices = dt_nr_dr_data['dr_choices']\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n tasks = {\n t.task_id: {\n 'dag_id': t.dag_id,\n 'task_type': t.task_type,\n }\n for t in dag.tasks}\n if not tasks:\n flash(\"No tasks found\", \"error\")\n session.commit()\n doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''\n\n return self.render(\n 'airflow/graph.html',\n dag=dag,\n form=form,\n width=request.args.get('width', \"100%\"),\n height=request.args.get('height', \"800\"),\n execution_date=dttm.isoformat(),\n state_token=state_token(dt_nr_dr_data['dr_state']),\n doc_md=doc_md,\n arrange=arrange,\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n blur=blur,\n root=root or '',\n task_instances=json.dumps(task_instances, indent=2),\n tasks=json.dumps(tasks, indent=2),\n nodes=json.dumps(nodes, indent=2),\n edges=json.dumps(edges, indent=2), )\n\n @expose('/duration')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def duration(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if dag is None:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect('/admin/')\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n cum_chart = nvd3.lineChart(\n name=\"cumLineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n\n y = defaultdict(list)\n x = defaultdict(list)\n cum_y = defaultdict(list)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n TF = models.TaskFail\n ti_fails = (\n session\n .query(TF)\n .filter(\n TF.dag_id == dag.dag_id,\n TF.execution_date >= min_date,\n TF.execution_date <= base_date,\n TF.task_id.in_([t.task_id for t in dag.tasks]))\n .all()\n )\n\n fails_totals = defaultdict(int)\n for tf in ti_fails:\n dict_key = (tf.dag_id, tf.task_id, tf.execution_date)\n fails_totals[dict_key] += tf.duration\n\n for ti in tis:\n if ti.duration:\n dttm = wwwutils.epoch(ti.execution_date)\n x[ti.task_id].append(dttm)\n y[ti.task_id].append(float(ti.duration))\n fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)\n fails_total = fails_totals[fails_dict_key]\n cum_y[ti.task_id].append(float(ti.duration + fails_total))\n\n # determine the most relevant time unit for the set of task instance\n # durations for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])\n # update the y Axis on both charts to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '40'\n cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(cum_y_unit))\n cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n cum_chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(cum_y[task.task_id],\n cum_y_unit))\n\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n cum_chart.buildcontent()\n s_index = cum_chart.htmlcontent.rfind('});')\n cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +\n \"$(function() {$( document ).trigger('chartload') })\" +\n cum_chart.htmlcontent[s_index:])\n\n return self.render(\n 'airflow/duration_chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent,\n cum_chart=cum_chart.htmlcontent\n )\n\n @expose('/tries')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def tries(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, y_axis_format='d', height=chart_height,\n width=\"1200\")\n\n for task in dag.tasks:\n y = []\n x = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n dttm = wwwutils.epoch(ti.execution_date)\n x.append(dttm)\n y.append(ti.try_number)\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n tries = sorted(list({ti.try_number for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if tries else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n\n chart.buildcontent()\n\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent\n )\n\n @expose('/landing_times')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def landing_times(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n y = {}\n x = {}\n for task in dag.tasks:\n y[task.task_id] = []\n x[task.task_id] = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n if ti.end_date:\n ts = ti.execution_date\n following_schedule = dag.following_schedule(ts)\n if dag.schedule_interval and following_schedule:\n ts = following_schedule\n\n dttm = wwwutils.epoch(ti.execution_date)\n secs = (ti.end_date - ts).total_seconds()\n x[ti.task_id].append(dttm)\n y[ti.task_id].append(secs)\n\n # determine the most relevant time unit for the set of landing times\n # for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n # update the y Axis to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Landing Time ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '40'\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n chart=chart.htmlcontent,\n height=str(chart_height + 100) + \"px\",\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n )\n\n @expose('/paused', methods=['POST'])\n @login_required\n @wwwutils.action_logging\n @provide_session\n def paused(self, session=None):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n if request.args.get('is_paused') == 'false':\n orm_dag.is_paused = True\n else:\n orm_dag.is_paused = False\n session.merge(orm_dag)\n session.commit()\n\n dagbag.get_dag(dag_id)\n return \"OK\"\n\n @expose('/refresh')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def refresh(self, session=None):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n\n if orm_dag:\n orm_dag.last_expired = timezone.utcnow()\n session.merge(orm_dag)\n session.commit()\n\n dagbag.get_dag(dag_id)\n flash(\"DAG [{}] is now fresh as a daisy\".format(dag_id))\n return redirect(request.referrer)\n\n @expose('/refresh_all')\n @login_required\n @wwwutils.action_logging\n def refresh_all(self):\n dagbag.collect_dags(only_if_updated=False)\n flash(\"All DAGs are now up to date\")\n return redirect('/')\n\n @expose('/gantt')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def gantt(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n demo_mode = conf.getboolean('webserver', 'demo_mode')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)\n dttm = dt_nr_dr_data['dttm']\n\n form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)\n form.execution_date.choices = dt_nr_dr_data['dr_choices']\n\n tis = [\n ti for ti in dag.get_task_instances(session, dttm, dttm)\n if ti.start_date]\n tis = sorted(tis, key=lambda ti: ti.start_date)\n TF = models.TaskFail\n ti_fails = list(itertools.chain(*[(\n session\n .query(TF)\n .filter(TF.dag_id == ti.dag_id,\n TF.task_id == ti.task_id,\n TF.execution_date == ti.execution_date)\n .all()\n ) for ti in tis]))\n TR = models.TaskReschedule\n ti_reschedules = list(itertools.chain(*[(\n session\n .query(TR)\n .filter(TR.dag_id == ti.dag_id,\n TR.task_id == ti.task_id,\n TR.execution_date == ti.execution_date)\n .all()\n ) for ti in tis]))\n # determine bars to show in the gantt chart\n # all reschedules of one attempt are combinded into one bar\n gantt_bar_items = []\n for task_id, items in itertools.groupby(\n sorted(tis + ti_fails + ti_reschedules, key=lambda ti: ti.task_id),\n key=lambda ti: ti.task_id):\n start_date = None\n for i in sorted(items, key=lambda ti: ti.start_date):\n start_date = start_date or i.start_date\n end_date = i.end_date or timezone.utcnow()\n if type(i) == models.TaskInstance:\n gantt_bar_items.append((task_id, start_date, end_date, i.state))\n start_date = None\n elif type(i) == TF and (len(gantt_bar_items) == 0 or\n end_date != gantt_bar_items[-1][2]):\n gantt_bar_items.append((task_id, start_date, end_date, State.FAILED))\n start_date = None\n\n tasks = []\n for gantt_bar_item in gantt_bar_items:\n task_id = gantt_bar_item[0]\n start_date = gantt_bar_item[1]\n end_date = gantt_bar_item[2]\n state = gantt_bar_item[3]\n tasks.append({\n 'startDate': wwwutils.epoch(start_date),\n 'endDate': wwwutils.epoch(end_date),\n 'isoStart': start_date.isoformat()[:-4],\n 'isoEnd': end_date.isoformat()[:-4],\n 'taskName': task_id,\n 'duration': \"{}\".format(end_date - start_date)[:-4],\n 'status': state,\n 'executionDate': dttm.isoformat(),\n })\n states = {task['status']: task['status'] for task in tasks}\n data = {\n 'taskNames': [ti.task_id for ti in tis],\n 'tasks': tasks,\n 'taskStatus': states,\n 'height': len(tis) * 25 + 25,\n }\n\n session.commit()\n\n return self.render(\n 'airflow/gantt.html',\n dag=dag,\n execution_date=dttm.isoformat(),\n form=form,\n data=json.dumps(data, indent=2),\n base_date='',\n demo_mode=demo_mode,\n root=root,\n )\n\n @expose('/object/task_instances')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def task_instances(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = pendulum.parse(dttm)\n else:\n return \"Error: Invalid execution_date\"\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n\n return json.dumps(task_instances)\n\n @expose('/variables/<form>', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def variables(self, form):\n try:\n if request.method == 'POST':\n data = request.json\n if data:\n with create_session() as session:\n var = models.Variable(key=form, val=json.dumps(data))\n session.add(var)\n session.commit()\n return \"\"\n else:\n return self.render(\n 'airflow/variables/{}.html'.format(form)\n )\n except Exception:\n # prevent XSS\n form = escape(form)\n return (\"Error: form airflow/variables/{}.html \"\n \"not found.\").format(form), 404\n\n @expose('/varimport', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def varimport(self):\n try:\n d = json.load(UTF8_READER(request.files['file']))\n except Exception as e:\n flash(\"Missing file or syntax error: {}.\".format(e))\n else:\n suc_count = fail_count = 0\n for k, v in d.items():\n try:\n models.Variable.set(k, v, serialize_json=isinstance(v, dict))\n except Exception as e:\n logging.info('Variable import failed: {}'.format(repr(e)))\n fail_count += 1\n else:\n suc_count += 1\n flash(\"{} variable(s) successfully updated.\".format(suc_count), 'info')\n if fail_count:\n flash(\n \"{} variables(s) failed to be updated.\".format(fail_count), 'error')\n\n return redirect('/admin/variable')\n\n\nclass HomeView(AdminIndexView):\n @expose(\"/\")\n @login_required\n @provide_session\n def index(self, session=None):\n DM = models.DagModel\n\n # restrict the dags shown if filter_by_owner and current user is not superuser\n do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())\n owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()\n\n hide_paused_dags_by_default = conf.getboolean('webserver',\n 'hide_paused_dags_by_default')\n show_paused_arg = request.args.get('showPaused', 'None')\n\n def get_int_arg(value, default=0):\n try:\n return int(value)\n except ValueError:\n return default\n\n arg_current_page = request.args.get('page', '0')\n arg_search_query = request.args.get('search', None)\n\n dags_per_page = PAGE_SIZE\n current_page = get_int_arg(arg_current_page, default=0)\n\n if show_paused_arg.strip().lower() == 'false':\n hide_paused = True\n elif show_paused_arg.strip().lower() == 'true':\n hide_paused = False\n else:\n hide_paused = hide_paused_dags_by_default\n\n # read orm_dags from the db\n sql_query = session.query(DM)\n\n if do_filter and owner_mode == 'ldapgroup':\n sql_query = sql_query.filter(\n ~DM.is_subdag,\n DM.is_active,\n DM.owners.in_(current_user.ldap_groups)\n )\n elif do_filter and owner_mode == 'user':\n sql_query = sql_query.filter(\n ~DM.is_subdag, DM.is_active,\n DM.owners == current_user.user.username\n )\n else:\n sql_query = sql_query.filter(\n ~DM.is_subdag, DM.is_active\n )\n\n # optionally filter out \"paused\" dags\n if hide_paused:\n sql_query = sql_query.filter(~DM.is_paused)\n\n orm_dags = {dag.dag_id: dag for dag\n in sql_query\n .all()}\n\n import_errors = session.query(models.ImportError).all()\n for ie in import_errors:\n flash(\n \"Broken DAG: [{ie.filename}] {ie.stacktrace}\".format(ie=ie),\n \"error\")\n\n # get a list of all non-subdag dags visible to everyone\n # optionally filter out \"paused\" dags\n if hide_paused:\n unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if\n not dag.parent_dag and not dag.is_paused]\n\n else:\n unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if\n not dag.parent_dag]\n\n # optionally filter to get only dags that the user should see\n if do_filter and owner_mode == 'ldapgroup':\n # only show dags owned by someone in @current_user.ldap_groups\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner in current_user.ldap_groups\n }\n elif do_filter and owner_mode == 'user':\n # only show dags owned by @current_user.user.username\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner == current_user.user.username\n }\n else:\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n }\n\n if arg_search_query:\n lower_search_query = arg_search_query.lower()\n # filter by dag_id\n webserver_dags_filtered = {\n dag_id: dag\n for dag_id, dag in webserver_dags.items()\n if (lower_search_query in dag_id.lower() or\n lower_search_query in dag.owner.lower())\n }\n\n all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()\n if lower_search_query in dag.dag_id.lower() or\n lower_search_query in dag.owners.lower()]) |\n set(webserver_dags_filtered.keys()))\n\n sorted_dag_ids = sorted(all_dag_ids)\n else:\n webserver_dags_filtered = webserver_dags\n sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))\n\n start = current_page * dags_per_page\n end = start + dags_per_page\n\n num_of_all_dags = len(sorted_dag_ids)\n page_dag_ids = sorted_dag_ids[start:end]\n num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))\n\n auto_complete_data = set()\n for dag in webserver_dags_filtered.values():\n auto_complete_data.add(dag.dag_id)\n auto_complete_data.add(dag.owner)\n for dag in orm_dags.values():\n auto_complete_data.add(dag.dag_id)\n auto_complete_data.add(dag.owners)\n\n return self.render(\n 'airflow/dags.html',\n webserver_dags=webserver_dags_filtered,\n orm_dags=orm_dags,\n hide_paused=hide_paused,\n current_page=current_page,\n search_query=arg_search_query if arg_search_query else '',\n page_size=dags_per_page,\n num_of_pages=num_of_pages,\n num_dag_from=start + 1,\n num_dag_to=min(end, num_of_all_dags),\n num_of_all_dags=num_of_all_dags,\n paging=wwwutils.generate_pages(current_page, num_of_pages,\n search=arg_search_query,\n showPaused=not hide_paused),\n dag_ids_in_page=page_dag_ids,\n auto_complete_data=auto_complete_data)\n\n\nclass QueryView(wwwutils.DataProfilingMixin, BaseView):\n @expose('/', methods=['POST', 'GET'])\n @wwwutils.gzipped\n @provide_session\n def query(self, session=None):\n dbs = session.query(models.Connection).order_by(\n models.Connection.conn_id).all()\n session.expunge_all()\n db_choices = list(\n ((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))\n conn_id_str = request.form.get('conn_id')\n csv = request.form.get('csv') == \"true\"\n sql = request.form.get('sql')\n\n class QueryForm(Form):\n conn_id = SelectField(\"Layout\", choices=db_choices)\n sql = TextAreaField(\"SQL\", widget=wwwutils.AceEditorWidget())\n\n data = {\n 'conn_id': conn_id_str,\n 'sql': sql,\n }\n results = None\n has_data = False\n error = False\n if conn_id_str:\n db = [db for db in dbs if db.conn_id == conn_id_str][0]\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))\n # df = hook.get_pandas_df(sql)\n has_data = len(df) > 0\n df = df.fillna('')\n results = df.to_html(\n classes=[\n 'table', 'table-bordered', 'table-striped', 'no-wrap'],\n index=False,\n na_rep='',\n ) if has_data else ''\n except Exception as e:\n flash(str(e), 'error')\n error = True\n\n if has_data and len(df) == QUERY_LIMIT:\n flash(\n \"Query output truncated at \" + str(QUERY_LIMIT) +\n \" rows\", 'info')\n\n if not has_data and error:\n flash('No data', 'error')\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n form = QueryForm(request.form, data=data)\n session.commit()\n return self.render(\n 'airflow/query.html', form=form,\n title=\"Ad Hoc Query\",\n results=results or '',\n has_data=has_data)\n\n\nclass AirflowModelView(ModelView):\n list_template = 'airflow/model_list.html'\n edit_template = 'airflow/model_edit.html'\n create_template = 'airflow/model_create.html'\n column_display_actions = True\n page_size = PAGE_SIZE\n\n\nclass ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):\n \"\"\"\n Modifying the base ModelView class for non edit, browse only operations\n \"\"\"\n named_filter_urls = True\n can_create = False\n can_edit = False\n can_delete = False\n column_display_pk = True\n\n\nclass PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):\n column_list = ('pool', 'slots', 'used_slots', 'queued_slots')\n column_formatters = dict(\n pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)\n named_filter_urls = True\n form_args = {\n 'pool': {\n 'validators': [\n validators.DataRequired(),\n ]\n }\n }\n\n\nclass SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):\n verbose_name_plural = \"SLA misses\"\n verbose_name = \"SLA miss\"\n column_list = (\n 'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')\n column_formatters = dict(\n task_id=task_instance_link,\n execution_date=datetime_f,\n timestamp=datetime_f,\n dag_id=dag_link)\n named_filter_urls = True\n column_searchable_list = ('dag_id', 'task_id',)\n column_filters = (\n 'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')\n filter_converter = wwwutils.UtcFilterConverter()\n form_widget_args = {\n 'email_sent': {'disabled': True},\n 'timestamp': {'disabled': True},\n }\n\n\n@provide_session\ndef _connection_ids(session=None):\n return [(c.conn_id, c.conn_id) for c in (\n session\n .query(models.Connection.conn_id)\n .group_by(models.Connection.conn_id))]\n\n\nclass ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"chart\"\n verbose_name_plural = \"charts\"\n form_columns = (\n 'label',\n 'owner',\n 'conn_id',\n 'chart_type',\n 'show_datatable',\n 'x_is_date',\n 'y_log_scale',\n 'show_sql',\n 'height',\n 'sql_layout',\n 'sql',\n 'default_params',\n )\n column_list = (\n 'label',\n 'conn_id',\n 'chart_type',\n 'owner',\n 'last_modified',\n )\n column_sortable_list = (\n 'label',\n 'conn_id',\n 'chart_type',\n ('owner', 'owner.username'),\n 'last_modified',\n )\n column_formatters = dict(label=label_link, last_modified=datetime_f)\n column_default_sort = ('last_modified', True)\n create_template = 'airflow/chart/create.html'\n edit_template = 'airflow/chart/edit.html'\n column_filters = ('label', 'owner.username', 'conn_id')\n column_searchable_list = ('owner.username', 'label', 'sql')\n column_descriptions = {\n 'label': \"Can include {{ templated_fields }} and {{ macros }}\",\n 'chart_type': \"The type of chart to be displayed\",\n 'sql': \"Can include {{ templated_fields }} and {{ macros }}.\",\n 'height': \"Height of the chart, in pixels.\",\n 'conn_id': \"Source database to run the query against\",\n 'x_is_date': (\n \"Whether the X axis should be casted as a date field. Expect most \"\n \"intelligible date formats to get casted properly.\"\n ),\n 'owner': (\n \"The chart's owner, mostly used for reference and filtering in \"\n \"the list view.\"\n ),\n 'show_datatable':\n \"Whether to display an interactive data table under the chart.\",\n 'default_params': (\n 'A dictionary of {\"key\": \"values\",} that define what the '\n 'templated fields (parameters) values should be by default. '\n 'To be valid, it needs to \"eval\" as a Python dict. '\n 'The key values will show up in the url\\'s querystring '\n 'and can be altered there.'\n ),\n 'show_sql': \"Whether to display the SQL statement as a collapsible \"\n \"section in the chart page.\",\n 'y_log_scale': \"Whether to use a log scale for the Y axis.\",\n 'sql_layout': (\n \"Defines the layout of the SQL that the application should \"\n \"expect. Depending on the tables you are sourcing from, it may \"\n \"make more sense to pivot / unpivot the metrics.\"\n ),\n }\n column_labels = {\n 'sql': \"SQL\",\n 'height': \"Chart Height\",\n 'sql_layout': \"SQL Layout\",\n 'show_sql': \"Display the SQL Statement\",\n 'default_params': \"Default Parameters\",\n }\n form_choices = {\n 'chart_type': [\n ('line', 'Line Chart'),\n ('spline', 'Spline Chart'),\n ('bar', 'Bar Chart'),\n ('column', 'Column Chart'),\n ('area', 'Overlapping Area Chart'),\n ('stacked_area', 'Stacked Area Chart'),\n ('percent_area', 'Percent Area Chart'),\n ('datatable', 'No chart, data table only'),\n ],\n 'sql_layout': [\n ('series', 'SELECT series, x, y FROM ...'),\n ('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),\n ],\n 'conn_id': _connection_ids()\n }\n\n def on_model_change(self, form, model, is_created=True):\n if model.iteration_no is None:\n model.iteration_no = 0\n else:\n model.iteration_no += 1\n if not model.user_id and current_user and hasattr(current_user, 'id'):\n model.user_id = current_user.id\n model.last_modified = timezone.utcnow()\n\n\nchart_mapping = (\n ('line', 'lineChart'),\n ('spline', 'lineChart'),\n ('bar', 'multiBarChart'),\n ('column', 'multiBarChart'),\n ('area', 'stackedAreaChart'),\n ('stacked_area', 'stackedAreaChart'),\n ('percent_area', 'stackedAreaChart'),\n ('datatable', 'datatable'),\n)\nchart_mapping = dict(chart_mapping)\n\n\nclass KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"known event\"\n verbose_name_plural = \"known events\"\n form_columns = (\n 'label',\n 'event_type',\n 'start_date',\n 'end_date',\n 'reported_by',\n 'description',\n )\n form_args = {\n 'label': {\n 'validators': [\n validators.DataRequired(),\n ],\n },\n 'event_type': {\n 'validators': [\n validators.DataRequired(),\n ],\n },\n 'start_date': {\n 'validators': [\n validators.DataRequired(),\n ],\n 'filters': [\n parse_datetime_f,\n ],\n },\n 'end_date': {\n 'validators': [\n validators.DataRequired(),\n GreaterEqualThan(fieldname='start_date'),\n ],\n 'filters': [\n parse_datetime_f,\n ]\n },\n 'reported_by': {\n 'validators': [\n validators.DataRequired(),\n ],\n }\n }\n column_list = (\n 'label',\n 'event_type',\n 'start_date',\n 'end_date',\n 'reported_by',\n )\n column_default_sort = (\"start_date\", True)\n column_sortable_list = (\n 'label',\n # todo: yes this has a spelling error\n ('event_type', 'event_type.know_event_type'),\n 'start_date',\n 'end_date',\n ('reported_by', 'reported_by.username'),\n )\n filter_converter = wwwutils.UtcFilterConverter()\n form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)\n\n\nclass KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):\n pass\n\n\n# NOTE: For debugging / troubleshooting\n# mv = KnowEventTypeView(\n# models.KnownEventType,\n# Session, name=\"Known Event Types\", category=\"Manage\")\n# admin.add_view(mv)\n# class DagPickleView(SuperUserMixin, ModelView):\n# pass\n# mv = DagPickleView(\n# models.DagPickle,\n# Session, name=\"Pickles\", category=\"Manage\")\n# admin.add_view(mv)\n\n\nclass VariableView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"Variable\"\n verbose_name_plural = \"Variables\"\n list_template = 'airflow/variable_list.html'\n\n def hidden_field_formatter(view, context, model, name):\n if wwwutils.should_hide_value_for_key(model.key):\n return Markup('*' * 8)\n val = getattr(model, name)\n if val:\n return val\n else:\n return Markup('<span class=\"label label-danger\">Invalid</span>')\n\n form_columns = (\n 'key',\n 'val',\n )\n column_list = ('key', 'val', 'is_encrypted',)\n column_filters = ('key', 'val')\n column_searchable_list = ('key', 'val', 'is_encrypted',)\n column_default_sort = ('key', False)\n form_widget_args = {\n 'is_encrypted': {'disabled': True},\n 'val': {\n 'rows': 20,\n }\n }\n form_args = {\n 'key': {\n 'validators': {\n validators.DataRequired(),\n },\n },\n }\n column_sortable_list = (\n 'key',\n 'val',\n 'is_encrypted',\n )\n column_formatters = {\n 'val': hidden_field_formatter,\n }\n\n # Default flask-admin export functionality doesn't handle serialized json\n @action('varexport', 'Export', None)\n @provide_session\n def action_varexport(self, ids, session=None):\n V = models.Variable\n qry = session.query(V).filter(V.id.in_(ids)).all()\n\n var_dict = {}\n d = json.JSONDecoder()\n for var in qry:\n val = None\n try:\n val = d.decode(var.val)\n except Exception:\n val = var.val\n var_dict[var.key] = val\n\n response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=variables.json\"\n return response\n\n def on_form_prefill(self, form, id):\n if wwwutils.should_hide_value_for_key(form.key.data):\n form.val.data = '*' * 8\n\n\nclass XComView(wwwutils.SuperUserMixin, AirflowModelView):\n verbose_name = \"XCom\"\n verbose_name_plural = \"XComs\"\n\n form_columns = (\n 'key',\n 'value',\n 'execution_date',\n 'task_id',\n 'dag_id',\n )\n\n form_extra_fields = {\n 'value': StringField('Value'),\n }\n\n form_args = {\n 'execution_date': {\n 'filters': [\n parse_datetime_f,\n ]\n }\n }\n\n column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')\n column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')\n filter_converter = wwwutils.UtcFilterConverter()\n form_overrides = dict(execution_date=DateTimeField)\n\n\nclass JobModelView(ModelViewOnly):\n verbose_name_plural = \"jobs\"\n verbose_name = \"job\"\n column_display_actions = False\n column_default_sort = ('start_date', True)\n column_filters = (\n 'job_type', 'dag_id', 'state',\n 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')\n column_formatters = dict(\n start_date=datetime_f,\n end_date=datetime_f,\n hostname=nobr_f,\n state=state_f,\n latest_heartbeat=datetime_f)\n filter_converter = wwwutils.UtcFilterConverter()\n\n\nclass DagRunModelView(ModelViewOnly):\n verbose_name_plural = \"DAG Runs\"\n can_edit = True\n can_create = True\n column_editable_list = ('state',)\n verbose_name = \"dag run\"\n column_default_sort = ('execution_date', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n form_args = dict(\n dag_id=dict(validators=[validators.DataRequired()])\n )\n column_list = (\n 'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')\n column_filters = column_list\n filter_converter = wwwutils.UtcFilterConverter()\n column_searchable_list = ('dag_id', 'state', 'run_id')\n column_formatters = dict(\n execution_date=datetime_f,\n state=state_f,\n start_date=datetime_f,\n dag_id=dag_link,\n run_id=dag_run_link\n )\n\n @action('new_delete', \"Delete\", \"Are you sure you want to delete selected records?\")\n @provide_session\n def action_new_delete(self, ids, session=None):\n deleted = set(session.query(models.DagRun)\n .filter(models.DagRun.id.in_(ids))\n .all())\n session.query(models.DagRun) \\\n .filter(models.DagRun.id.in_(ids)) \\\n .delete(synchronize_session='fetch')\n session.commit()\n dirty_ids = []\n for row in deleted:\n dirty_ids.append(row.dag_id)\n models.DagStat.update(dirty_ids, dirty_only=False, session=session)\n\n @action('set_running', \"Set state to 'running'\", None)\n @provide_session\n def action_set_running(self, ids, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n for dr in session.query(DR).filter(DR.id.in_(ids)).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n dr.state = State.RUNNING\n dr.start_date = timezone.utcnow()\n models.DagStat.update(dirty_ids, session=session)\n flash(\n \"{count} dag runs were set to running\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n @action('set_failed', \"Set state to 'failed'\",\n \"All running task instances would also be marked as failed, are you sure?\")\n @provide_session\n def action_set_failed(self, ids, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n altered_tis = []\n for dr in session.query(DR).filter(DR.id.in_(ids)).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n altered_tis += \\\n set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),\n dr.execution_date,\n commit=True,\n session=session)\n models.DagStat.update(dirty_ids, session=session)\n altered_ti_count = len(altered_tis)\n flash(\n \"{count} dag runs and {altered_ti_count} task instances \"\n \"were set to failed\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n @action('set_success', \"Set state to 'success'\",\n \"All task instances would also be marked as success, are you sure?\")\n @provide_session\n def action_set_success(self, ids, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n altered_tis = []\n for dr in session.query(DR).filter(DR.id.in_(ids)).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n altered_tis += \\\n set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),\n dr.execution_date,\n commit=True,\n session=session)\n models.DagStat.update(dirty_ids, session=session)\n altered_ti_count = len(altered_tis)\n flash(\n \"{count} dag runs and {altered_ti_count} task instances \"\n \"were set to success\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n # Called after editing DagRun model in the UI.\n @provide_session\n def after_model_change(self, form, dagrun, is_created, session=None):\n altered_tis = []\n if dagrun.state == State.SUCCESS:\n altered_tis = set_dag_run_state_to_success(\n dagbag.get_dag(dagrun.dag_id),\n dagrun.execution_date,\n commit=True,\n session=session)\n elif dagrun.state == State.FAILED:\n altered_tis = set_dag_run_state_to_failed(\n dagbag.get_dag(dagrun.dag_id),\n dagrun.execution_date,\n commit=True,\n session=session)\n elif dagrun.state == State.RUNNING:\n altered_tis = set_dag_run_state_to_running(\n dagbag.get_dag(dagrun.dag_id),\n dagrun.execution_date,\n commit=True,\n session=session)\n\n altered_ti_count = len(altered_tis)\n models.DagStat.update([dagrun.dag_id], session=session)\n flash(\n \"1 dag run and {altered_ti_count} task instances \"\n \"were set to '{dagrun.state}'\".format(**locals()))\n\n\nclass LogModelView(ModelViewOnly):\n verbose_name_plural = \"logs\"\n verbose_name = \"log\"\n column_display_actions = False\n column_default_sort = ('dttm', True)\n column_filters = ('dag_id', 'task_id', 'execution_date', 'extra')\n filter_converter = wwwutils.UtcFilterConverter()\n column_formatters = dict(\n dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)\n\n\nclass TaskInstanceModelView(ModelViewOnly):\n verbose_name_plural = \"task instances\"\n verbose_name = \"task instance\"\n column_filters = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'hostname',\n 'queue', 'pool', 'operator', 'start_date', 'end_date')\n filter_converter = wwwutils.UtcFilterConverter()\n named_filter_urls = True\n column_formatters = dict(\n log_url=log_url_formatter,\n task_id=task_instance_link,\n hostname=nobr_f,\n state=state_f,\n execution_date=datetime_f,\n start_date=datetime_f,\n end_date=datetime_f,\n queued_dttm=datetime_f,\n dag_id=dag_link,\n run_id=dag_run_link,\n duration=duration_f)\n column_searchable_list = ('dag_id', 'task_id', 'state')\n column_default_sort = ('job_id', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n column_list = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'operator',\n 'start_date', 'end_date', 'duration', 'job_id', 'hostname',\n 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',\n 'pool', 'log_url')\n page_size = PAGE_SIZE\n\n @action('set_running', \"Set state to 'running'\", None)\n def action_set_running(self, ids):\n self.set_task_instance_state(ids, State.RUNNING)\n\n @action('set_failed', \"Set state to 'failed'\", None)\n def action_set_failed(self, ids):\n self.set_task_instance_state(ids, State.FAILED)\n\n @action('set_success', \"Set state to 'success'\", None)\n def action_set_success(self, ids):\n self.set_task_instance_state(ids, State.SUCCESS)\n\n @action('set_retry', \"Set state to 'up_for_retry'\", None)\n def action_set_retry(self, ids):\n self.set_task_instance_state(ids, State.UP_FOR_RETRY)\n\n @provide_session\n @action('clear',\n lazy_gettext('Clear'),\n lazy_gettext(\n 'Are you sure you want to clear the state of the selected task instance(s)'\n ' and set their dagruns to the running state?'))\n def action_clear(self, ids, session=None):\n try:\n TI = models.TaskInstance\n\n dag_to_task_details = {}\n dag_to_tis = {}\n\n # Collect dags upfront as dagbag.get_dag() will reset the session\n for id_str in ids:\n task_id, dag_id, execution_date = iterdecode(id_str)\n dag = dagbag.get_dag(dag_id)\n task_details = dag_to_task_details.setdefault(dag, [])\n task_details.append((task_id, execution_date))\n\n for dag, task_details in dag_to_task_details.items():\n for task_id, execution_date in task_details:\n execution_date = parse_execution_date(execution_date)\n\n ti = session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag.dag_id,\n TI.execution_date == execution_date).one()\n\n tis = dag_to_tis.setdefault(dag, [])\n tis.append(ti)\n\n for dag, tis in dag_to_tis.items():\n models.clear_task_instances(tis, session, dag=dag)\n\n session.commit()\n\n flash(\"{0} task instances have been cleared\".format(len(ids)))\n\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to clear task instances', 'error')\n\n @provide_session\n def set_task_instance_state(self, ids, target_state, session=None):\n try:\n TI = models.TaskInstance\n count = len(ids)\n for id in ids:\n task_id, dag_id, execution_date = iterdecode(id)\n execution_date = parse_execution_date(execution_date)\n\n ti = session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag_id,\n TI.execution_date == execution_date).one()\n ti.state = target_state\n session.commit()\n flash(\n \"{count} task instances were set to '{target_state}'\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n def get_one(self, id):\n \"\"\"\n As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().\n\n TODO: this method should be removed once the below bug is fixed on Flask-Admin side.\n https://github.com/flask-admin/flask-admin/issues/1226\n \"\"\"\n task_id, dag_id, execution_date = iterdecode(id)\n execution_date = pendulum.parse(execution_date)\n return self.session.query(self.model).get((task_id, dag_id, execution_date))\n\n\nclass ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):\n create_template = 'airflow/conn_create.html'\n edit_template = 'airflow/conn_edit.html'\n list_template = 'airflow/conn_list.html'\n form_columns = (\n 'conn_id',\n 'conn_type',\n 'host',\n 'schema',\n 'login',\n 'password',\n 'port',\n 'extra',\n 'extra__jdbc__drv_path',\n 'extra__jdbc__drv_clsname',\n 'extra__google_cloud_platform__project',\n 'extra__google_cloud_platform__key_path',\n 'extra__google_cloud_platform__keyfile_dict',\n 'extra__google_cloud_platform__scope',\n )\n verbose_name = \"Connection\"\n verbose_name_plural = \"Connections\"\n column_default_sort = ('conn_id', False)\n column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)\n form_overrides = dict(_password=PasswordField, _extra=TextAreaField)\n form_widget_args = {\n 'is_extra_encrypted': {'disabled': True},\n 'is_encrypted': {'disabled': True},\n }\n # Used to customized the form, the forms elements get rendered\n # and results are stored in the extra field as json. All of these\n # need to be prefixed with extra__ and then the conn_type ___ as in\n # extra__{conn_type}__name. You can also hide form elements and rename\n # others from the connection_form.js file\n form_extra_fields = {\n 'extra__jdbc__drv_path': StringField('Driver Path'),\n 'extra__jdbc__drv_clsname': StringField('Driver Class'),\n 'extra__google_cloud_platform__project': StringField('Project Id'),\n 'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),\n 'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),\n 'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),\n }\n form_choices = {\n 'conn_type': models.Connection._types\n }\n\n def on_model_change(self, form, model, is_created):\n formdata = form.data\n if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:\n extra = {\n key: formdata[key]\n for key in self.form_extra_fields.keys() if key in formdata}\n model.extra = json.dumps(extra)\n\n @classmethod\n def alert_fernet_key(cls):\n fk = None\n try:\n fk = conf.get('core', 'fernet_key')\n except Exception:\n pass\n return fk is None\n\n @classmethod\n def is_secure(cls):\n \"\"\"\n Used to display a message in the Connection list view making it clear\n that the passwords and `extra` field can't be encrypted.\n \"\"\"\n is_secure = False\n try:\n import cryptography # noqa F401\n conf.get('core', 'fernet_key')\n is_secure = True\n except Exception:\n pass\n return is_secure\n\n def on_form_prefill(self, form, id):\n try:\n d = json.loads(form.data.get('extra', '{}'))\n except Exception:\n d = {}\n\n for field in list(self.form_extra_fields.keys()):\n value = d.get(field, '')\n if value:\n field = getattr(form, field)\n field.data = value\n\n\nclass UserModelView(wwwutils.SuperUserMixin, AirflowModelView):\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"\n column_default_sort = 'username'\n\n\nclass VersionView(wwwutils.SuperUserMixin, BaseView):\n @expose('/')\n def version(self):\n # Look at the version from setup.py\n try:\n airflow_version = pkg_resources.require(\"apache-airflow\")[0].version\n except Exception as e:\n airflow_version = None\n logging.error(e)\n\n # Get the Git repo and git hash\n git_version = None\n try:\n with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:\n git_version = f.readline()\n except Exception as e:\n logging.error(e)\n\n # Render information\n title = \"Version Info\"\n return self.render('airflow/version.html',\n title=title,\n airflow_version=airflow_version,\n git_version=git_version)\n\n\nclass ConfigurationView(wwwutils.SuperUserMixin, BaseView):\n @expose('/')\n def conf(self):\n raw = request.args.get('raw') == \"true\"\n title = \"Airflow Configuration\"\n subtitle = conf.AIRFLOW_CONFIG\n if conf.getboolean(\"webserver\", \"expose_config\"):\n with open(conf.AIRFLOW_CONFIG, 'r') as f:\n config = f.read()\n table = [(section, key, value, source)\n for section, parameters in conf.as_dict(True, True).items()\n for key, (value, source) in parameters.items()]\n\n else:\n config = (\n \"# Your Airflow administrator chose not to expose the \"\n \"configuration, most likely for security reasons.\")\n table = None\n if raw:\n return Response(\n response=config,\n status=200,\n mimetype=\"application/text\")\n else:\n code_html = Markup(highlight(\n config,\n lexers.IniLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/config.html',\n pre_subtitle=settings.HEADER + \" v\" + airflow.__version__,\n code_html=code_html, title=title, subtitle=subtitle,\n table=table)\n\n\nclass DagModelView(wwwutils.SuperUserMixin, ModelView):\n column_list = ('dag_id', 'owners')\n column_editable_list = ('is_paused',)\n form_excluded_columns = ('is_subdag', 'is_active')\n column_searchable_list = ('dag_id',)\n column_filters = (\n 'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',\n 'last_scheduler_run', 'last_expired')\n filter_converter = wwwutils.UtcFilterConverter()\n form_widget_args = {\n 'last_scheduler_run': {'disabled': True},\n 'fileloc': {'disabled': True},\n 'is_paused': {'disabled': True},\n 'last_pickled': {'disabled': True},\n 'pickle_id': {'disabled': True},\n 'last_loaded': {'disabled': True},\n 'last_expired': {'disabled': True},\n 'pickle_size': {'disabled': True},\n 'scheduler_lock': {'disabled': True},\n 'owners': {'disabled': True},\n }\n column_formatters = dict(\n dag_id=dag_link,\n )\n can_delete = False\n can_create = False\n page_size = PAGE_SIZE\n list_template = 'airflow/list_dags.html'\n named_filter_urls = True\n\n def get_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return super(DagModelView, self)\\\n .get_query()\\\n .filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\\\n .filter(~models.DagModel.is_subdag)\n\n def get_count_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return super(DagModelView, self)\\\n .get_count_query()\\\n .filter(models.DagModel.is_active)\\\n .filter(~models.DagModel.is_subdag)\n"
] | [
[
"pandas.set_option",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gkaramanolakis/ISWD | [
"41452f447284491cf8ade8e09f3bc4e314ec64f7"
] | [
"iswd/cotrain_experiments.py"
] | [
"#!/usr/bin/env python\nimport os\nfrom os.path import expanduser\nhome = expanduser(\"~\")\nimport sys\nimport h5py\nimport argparse\nfrom datetime import datetime\nfrom time import time\n\nimport numpy as np\nfrom numpy.random import permutation, seed\nfrom scipy.cluster.vq import kmeans\nimport glob\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.init import xavier_uniform\nfrom torch.nn.utils import clip_grad_norm\n\nfrom tensorboardX import SummaryWriter\n\nfrom sklearn.externals import joblib\nfrom copy import deepcopy\n\n\nfrom DataHandler import DataHandler\n\nsys.path.append(os.path.join(home, \"code/research_code/Spring_2018/TextModules/\"))\nfrom Evaluator import Evaluator\nfrom Logger import get_logger\nfrom model_library import AspectCLF, StudentBoWCLF, SeedCLF, smooth_cross_entropy\n\nall_semeval_domains = ['english_restaurants', 'spanish_restaurants', 'french_restaurants', 'russian_restaurants',\n 'dutch_restaurants', 'turkish_restaurants']\nall_domains = ['bags_and_cases', 'keyboards', 'boots', 'bluetooth', 'tv', 'vacuums']\n\nclass Trainer:\n def __init__(self, args):\n self.args = args\n self.comment = '_{}'.format(args.domain)\n if self.args.loss in ['SmoothCrossEntropy', \"KL\"]:\n self.args.one_hot = True\n else:\n self.args.one_hot = False\n self.datahandler = DataHandler(self.args)\n self.writer = SummaryWriter(log_dir=self.args.logdir)\n loggerfile = os.path.join(self.args.logdir, 'log.log')\n self.logger = get_logger(logfile=loggerfile)\n self.check_gpu()\n joblib.dump(self.args, os.path.join(self.args.logdir, 'args.pkl'))\n\n self.evaluator = Evaluator(args) \n if args.no_seed_weights:\n self.logger.info('NOT using seed weights...')\n seed_weights = None\n else:\n self.logger.info('USING seed weights...')\n seed_weights = self.datahandler.seed_w\n\n if args.no_pretrained_emb:\n self.logger.info('NOT using pretrained word embeddings...')\n pretrained_emb = None\n else:\n pretrained_emb = self.datahandler.w_emb\n\n if self.datahandler.num_aspects != self.args.num_aspects:\n self.logger.info(\"Automatically changing num_aspects from {} to {}\".format(self.args.num_aspects, self.datahandler.num_aspects))\n self.args.num_aspects = self.datahandler.num_aspects\n\n if args.model_type == 'embedding_based':\n self.logger.info('Model: Embeddings based Classifier')\n # prev model is loaded just to gather previous predictions and regularize the new model to\n # provide similar predictions.\n if args.memory_reg > 0:\n self.prev_model = AspectCLF(vocab_size=self.datahandler.vocab_size, pretrained_emb=pretrained_emb, emb_size=self.datahandler.emb_size,\n seed_encodings=None, seed_weights=seed_weights, num_aspects=self.args.num_aspects,\n num_seeds=args.num_seeds, fix_a_emb=False, fix_w_emb=args.fix_w_emb, attention=args.attention,\n deep_clf=args.deep_aspect_clf, enable_gpu=args.enable_gpu, cuda_device=args.cuda_device,\n emb_dropout=args.emb_dropout, batch_norm= args.batch_norm, use_bert=args.use_bert,\n bert_model=args.bert_model)\n self.model = AspectCLF(vocab_size=self.datahandler.vocab_size, pretrained_emb=pretrained_emb, emb_size=self.datahandler.emb_size,\n seed_encodings=None, seed_weights=seed_weights, num_aspects=self.args.num_aspects,\n num_seeds=args.num_seeds, fix_a_emb=False, fix_w_emb=args.fix_w_emb, attention=args.attention,\n deep_clf=args.deep_aspect_clf, enable_gpu=args.enable_gpu, cuda_device=args.cuda_device,\n emb_dropout=args.emb_dropout, batch_norm= args.batch_norm, use_bert=args.use_bert,\n bert_model=args.bert_model)\n elif args.model_type == 'bow_based':\n self.logger.info('Model: BoW Classifier')\n self.model = StudentBoWCLF(self.datahandler.id2word, self.datahandler.aspects_ids)\n else:\n raise(BaseException('unknown model type: {}'.format(args.model_type)))\n self.model = self.cuda(self.model)\n\n self.teacher = SeedCLF(self.datahandler.id2word, self.datahandler.aspects_ids, seed_weights,\n verbose=0, general_ind=self.datahandler.general_ind,\n hard_pred=args.hard_teacher_pred)\n\n self.optimizer = self.get_optimizer(args)\n if args.scheduler_gamma > 0:\n ms=args.bootstrap_epoch\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[ms, ms+1, ms+2, ms+3], gamma=args.scheduler_gamma)\n self.loss_fn = self.get_loss_fn(args)\n self.logger.info('Saving log at {}'.format(loggerfile))\n self.logger.debug('enable_gpu={}'.format(args.enable_gpu))\n self.epoch = -1\n self.results = []\n self.metric = self.args.target_metric\n self.best_score = -1.0\n self.best_test_score = -1.0\n self.epoch_results = {}\n if args.memory_reg > 0:\n self.memory_loss = self.get_memory_loss_fn(args)\n self.prev_model = self.cuda(self.prev_model)\n\n self.student_proba_train = None\n self.student_proba_dev = None\n self.student_proba_test = None\n self.labels_dev = None\n self.labels_test = None\n self.teacher_proba_train = None\n self.teacher_pred_dev = None\n self.teacher_pred_test = None\n self.disagreement = -1\n\n def check_gpu(self):\n if self.args.enable_gpu:\n torch.cuda.manual_seed(self.args.seed)\n if self.args.enable_gpu and not torch.cuda.is_available():\n raise(BaseException('CUDA is not supported in this machine. Please rerun by setting enable_gpu=False'))\n if torch.cuda.device_count() > 1:\n self.logger.info(\"Tip: You could use {} GPUs in this machine!\".format(torch.cuda.device_count()))\n\n def get_optimizer(self, args):\n if args.optimizer == 'Adam':\n optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n elif args.optimizer == 'Adadelta':\n optimizer = torch.optim.Adadelta(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n elif args.optimizer == 'SGD':\n optimizer = torch.optim.SGD(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n else:\n raise(NotImplementedError('unknown optimizer: {}'.format(args.optimizer)))\n return optimizer\n\n def get_loss_fn(self, args):\n if args.loss == 'CrossEntropy':\n loss_fn = nn.CrossEntropyLoss()\n elif args.loss == 'NLL':\n loss_fn = nn.NLLLoss()\n elif args.loss == 'SmoothCrossEntropy':\n loss_fn = smooth_cross_entropy\n elif args.loss == 'KL':\n loss_fn = nn.KLDivLoss()\n else:\n raise(NotImplementedError('unknown loss function: {}'.format(args.loss)))\n return loss_fn\n\n\n def get_memory_loss_fn(self, args):\n if args.memory_loss == 'CrossEntropy':\n loss_fn = nn.CrossEntropyLoss()\n elif args.memory_loss == 'NLL':\n loss_fn = nn.NLLLoss()\n elif args.memory_loss == 'SmoothCrossEntropy':\n loss_fn = smooth_cross_entropy\n elif args.memory_loss == 'KL':\n loss_fn = nn.KLDivLoss()\n else:\n raise(NotImplementedError('unknown loss function: {}'.format(args.loss)))\n return loss_fn\n\n def cuda(self, x):\n if self.args.enable_gpu:\n return x.cuda(self.args.cuda_device)\n else:\n return x\n\n def train(self):\n self.model.train()\n if self.args.memory_reg > 0:\n self.prev_model.eval()\n all_losses = []\n all_memory_losses = []\n all_preds = self.cuda(torch.Tensor())\n all_labels = []\n self.teacher_scores_train = []\n self.teacher_seed_word_pred_train = []\n\n if args.scheduler_gamma > 0:\n self.scheduler.step()\n self.logger.info(\"Optimizing with lr={}\".format(self.optimizer.state_dict()['param_groups'][0]['lr']))\n\n if self.teacher.conf_mat is None:\n self.logger.info(\"TEACHER does NOT use confusion matrices\")\n else:\n self.logger.info(\"TEACHER uses confusion matrices\")\n for batch in self.datahandler.get_train_batches():\n self.optimizer.zero_grad()\n i = batch['ind']\n if (args.deep_aspect_clf in ['CNN', 'charCNN']) and i > batch['total'] - 20: #or\n # ignore really big segments when clf is CNN to avoid OOM error. \n break\n\n pred = self.model(batch)\n\n # I use different ids for the teacher, because if SWD=1, then the seed words are dropped from batch['ids']. \n teacher_scores, teacher_seed_word_pred = map(list, zip(*[self.teacher.predict_verbose(seg) for seg in batch['teacher_ids'].tolist()]))\n\n if self.args.loss not in [\"SmoothCrossEntropy\", \"KL\"]:\n label = np.argmax(teacher_scores, axis=1)\n all_labels.extend(list(label))\n label = self.cuda(Variable(torch.LongTensor(label)))\n else:\n # Convert the ground-truth aspect scores into probabilities summing to 1.\n label = teacher_scores\n all_labels.extend([np.argmax(l) for l in label])\n label = self.cuda(Variable(torch.Tensor(label)))\n label = F.softmax(label, dim=1)\n loss = self.loss_fn(pred, label)\n all_losses.append(loss.data.cpu().numpy())\n\n if args.memory_reg == 0.0:\n loss.backward()\n else:\n # Regularize the model to avoid forgetting the previous weights / predictions.\n prev_pred = F.softmax(self.prev_model(batch), dim=1)\n memory_loss = self.memory_loss(pred, prev_pred)\n all_memory_losses.append(memory_loss.data.cpu().numpy())\n total_loss = (1 - args.memory_reg) * loss + args.memory_reg * memory_loss\n loss += memory_loss\n total_loss.backward()\n\n self.optimizer.step()\n\n all_preds = torch.cat((all_preds, pred.data), dim=0)\n self.teacher_scores_train.extend(teacher_scores)\n self.teacher_seed_word_pred_train.extend(teacher_seed_word_pred)\n\n if (self.args.report_every != -1) and (i % self.args.report_every == 0) and (i > 0):\n avg_loss = np.mean(all_losses[-self.args.report_every:])\n avg_memory_loss = np.mean(all_memory_losses[-self.args.report_every:])\n if args.memory_reg == 0:\n self.logger.debug('[{}][{}:{}/{}]\\tLoss: {:f}'.format(self.args.domain, self.epoch, i, batch['total'], avg_loss))\n else:\n self.logger.debug('[{}][{}:{}/{}]\\tLoss: {:.5f}\\tMemory Loss: {:.5f}'.format(self.args.domain, self.epoch, i, batch['total'], avg_loss, avg_memory_loss))\n\n all_proba = all_preds.cpu().numpy()\n self.student_proba_train = all_proba\n max_prob, all_preds = all_preds.max(dim=1)\n all_preds = all_preds.cpu().numpy()\n avg_loss = np.mean(all_losses)\n res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects),verbose=False)\n res['loss'] = avg_loss\n self.epoch_results['train'] = res\n self.writer.add_histogram('train_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)\n\n # save disagreement\n s_pred_hard = np.argmax(self.student_proba_train, axis=1)\n t_pred_hard = np.argmax(self.teacher_scores_train, axis=1)\n self.disagreement = ((s_pred_hard != t_pred_hard).sum()) / float(s_pred_hard.shape[0])\n self.epoch_results['hard_disagreement'] = self.disagreement\n\n\n def update_teacher(self):\n # Use Maximum Likelihood Estimation to update the seed word confusion matrices.\n assert self.student_proba_train is not None, \"Student proba is None.\"\n assert self.teacher_scores_train is not None, \"Teacher scores is None.\"\n\n\n s_pred_hard = np.argmax(self.student_proba_train, axis=1)\n s_pred_soft = F.softmax(torch.Tensor(self.student_proba_train), dim=1).numpy()\n t_pred_hard = np.argmax(self.teacher_scores_train, axis=1)\n seed_word_occurences = np.array(self.teacher_seed_word_pred_train)\n teacher_answers = seed_word_occurences.sum(axis=1) > 0\n self.disagreement = ((s_pred_hard[teacher_answers] != t_pred_hard[teacher_answers]).sum()) / float(teacher_answers.sum())\n self.epoch_results['train_disagreement'] = self.disagreement\n\n K = self.args.num_aspects\n N = s_pred_hard.shape[0]\n\n # Initialize a zero confusion matrix for each seed word.\n conf_mat = {wid: np.zeros(K) for wid in self.teacher.seed_list}\n\n # Maximum Likelihood Estimation for the class priors\n self.q = np.array([np.sum(s_pred_hard == i) for i in range(K)]) / float(N)\n self.logger.info('Estimated class priors: {}'.format(\",\".join([\"{:.2f}\".format(x) for x in self.q])))\n\n # Maximum Likelihood Estimation for each confusion matrix\n for wid_i, wid in enumerate(self.teacher.seed_list):\n # keep the segments where this seed word has been activated\n relevant_ind = (seed_word_occurences[:, wid_i] > 0)\n pred_aspect = self.teacher.seed_dict[wid][0]\n\n if args.teacher_type == 'v1':\n # Precision-based updates\n if args.soft_updates == False:\n conf_mat[wid] = np.array([np.sum(s_pred_hard[relevant_ind]==i) / float(np.sum(relevant_ind)) for i in range(K)])\n else:\n conf_mat[wid] = np.array([s_pred_soft[relevant_ind][:, i].sum() for i in range(K)])\n conf_mat[wid] = conf_mat[wid] / float(conf_mat[wid].sum())\n elif args.teacher_type == 'v2':\n # Dawid-Skene model where each seed word is applied when it occurs in the segment\n # We allow positive mass to other aspects.\n conf_mat[wid][:] = self.args.pos_mass / float(K - 1)\n conf_mat[wid][pred_aspect] = 1 - self.args.pos_mass\n\n student_sum = s_pred_soft[relevant_ind].sum(axis=0) # adding student probabilities for all classes for all relevant samples\n conf_mat[wid] *= student_sum\n conf_mat[wid] /= conf_mat[wid].sum()\n else:\n raise(BaseException('{} not implemented'.format(args.teacher_type))) \n\n # GRADIENT EM\n prev_param = np.zeros(K)\n prev_param[pred_aspect] = 1\n conf_mat[wid] = self.args.teacher_memory * prev_param + (1 - self.args.teacher_memory) * conf_mat[wid] # (self.conf_mat[wid] + prev_param) / 2.0\n\n self.logger.info(\"Teacher answers on the {}% ({}/{}) of the training set\".format(100 * teacher_answers.sum() / teacher_answers.shape[0], teacher_answers.sum(), teacher_answers.shape[0]))\n self.logger.info(\"Student-Teacher disagreement: {}/{} ({:.2f}%)\".format((s_pred_hard[teacher_answers] != t_pred_hard[teacher_answers]).sum(), teacher_answers.sum(),100*self.disagreement))\n self.logger.info(\"Avg of seed word occurences in training set: {:.2f}\".format(np.average(seed_word_occurences.sum(axis=0))))\n\n self.conf_mat = conf_mat\n joblib.dump(self.conf_mat, self.args.logdir + 'conf_mat_{}.pkl'.format(self.epoch))\n joblib.dump(self.q, self.args.logdir + 'prior_{}.pkl'.format(self.epoch))\n\n return\n\n def validate(self):\n self.model.eval()\n all_losses = []\n all_preds = self.cuda(torch.Tensor())\n all_labels = []\n for batch in self.datahandler.get_eval_batches():\n i = batch['ind']\n pred = self.model(batch)\n label = batch['label']\n # import pdb; pdb.set_trace()\n if self.args.loss not in [\"SmoothCrossEntropy\", \"KL\"]:\n all_labels.extend(list(label))\n label = self.cuda(Variable(torch.LongTensor(label)))\n else:\n # Convert the ground-truth label into a one-hot label and treat is as a prob distribution\n all_labels.extend(list(label))\n one_hot = np.zeros((len(label), self.args.num_aspects))\n one_hot[np.arange(len(label)), label] = 1\n label = self.cuda(Variable(torch.Tensor(one_hot)))\n loss = self.loss_fn(pred, label)\n all_losses.append(loss.data.cpu().numpy())\n\n all_preds = torch.cat((all_preds, pred.data), dim=0)\n\n all_proba = all_preds.cpu().numpy()\n max_prob, all_preds = all_preds.max(dim=1)\n all_preds = all_preds.cpu().numpy()\n\n avg_loss = np.mean(all_losses)\n res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects), verbose=False)\n res['loss'] = avg_loss\n if res[self.metric] >= self.best_score:\n # Save the best validation model\n self.best_score = res[self.metric]\n torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'best_valid_model.pt'))\n self.epoch_results['valid'] = res\n self.writer.add_histogram('valid_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)\n self.flattened_valid_result_dict = self.evaluator.flattened_result_dict\n\n\n def validate_test(self):\n # Giannis: also validate on the test set\n self.model.eval()\n all_losses = []\n all_preds = self.cuda(torch.Tensor())\n all_labels = []\n for batch in self.datahandler.get_test_batches():\n i = batch['ind']\n pred = self.model(batch)\n label = batch['label']\n # import pdb; pdb.set_trace()\n if self.args.loss not in [\"SmoothCrossEntropy\", \"KL\"]:\n all_labels.extend(list(label))\n label = self.cuda(Variable(torch.LongTensor(label)))\n else:\n # Convert the ground-truth label into a one-hot label and treat is as a prob distribution\n all_labels.extend(list(label))\n one_hot = np.zeros((len(label), self.args.num_aspects))\n one_hot[np.arange(len(label)), label] = 1\n label = self.cuda(Variable(torch.Tensor(one_hot)))\n loss = self.loss_fn(pred, label)\n all_losses.append(loss.data.cpu().numpy())\n\n all_preds = torch.cat((all_preds, pred.data), dim=0)\n\n all_proba = all_preds.cpu().numpy()\n max_prob, all_preds = all_preds.max(dim=1)\n all_preds = all_preds.cpu().numpy()\n\n avg_loss = np.mean(all_losses)\n res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects), verbose=False)\n res['loss'] = avg_loss\n if res[self.metric] >= self.best_test_score:\n # Save the best test model\n self.best_test_score = res[self.metric]\n torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'best_test_model.pt'))\n self.epoch_results['test'] = res\n self.writer.add_histogram('test_loss{}'.format(self.comment), np.array(all_losses), self.epoch, bins=100)\n self.flattened_test_result_dict = self.evaluator.flattened_result_dict\n\n\n def test(self, savename='results.pkl'):\n self.model.eval()\n all_preds = self.cuda(torch.Tensor())\n all_labels = []\n teacher_scores_test = []\n\n for batch in self.datahandler.get_test_batches():\n i = batch['ind']\n pred = self.model(batch)\n teacher_scores, teacher_seed_word_pred = map(list, zip(*[self.teacher.predict_verbose(seg) for seg in batch['ids'].tolist()]))\n label = batch['label']\n\n all_preds = torch.cat((all_preds, pred.data), dim=0)\n teacher_scores_test.extend(teacher_scores)\n all_labels.extend(list(label))\n\n all_proba = all_preds.cpu().numpy()\n max_prob, all_preds = all_preds.max(dim=1)\n all_preds = all_preds.cpu().numpy()\n\n res = self.evaluator.evaluate_group(all_preds, all_labels, all_proba, gt_classes=range(self.args.num_aspects),verbose=False)\n self.epoch_results['test'] = res\n\n\n teacher_scores_test = np.array(teacher_scores_test)\n teacher_preds = np.argmax(teacher_scores_test, axis=1)\n teacher_res = self.evaluator.evaluate_group(teacher_preds, all_labels, teacher_scores_test, gt_classes=range(self.args.num_aspects), verbose=False)\n self.epoch_results['teacher_test'] = teacher_res\n\n self.logger.info('Test {}:\\t STUDENT={:.3}\\t TEACHER={:.3}'.format(self.metric, res[self.metric], teacher_res[self.metric]))\n self.logger.info('Train disagreement: {}%'.format(100*self.disagreement))\n self.logger.info('STUDENT confusion Matrix:\\n{}'.format(res['conf_mat']))\n self.logger.info('TEACHER confusion Matrix:\\n{}'.format(teacher_res['conf_mat']))\n\n\n joblib.dump(res, os.path.join(self.args.logdir, savename))\n\n\n def start_epoch(self):\n # Do necessary staff at the beginning of each epoch\n self.epoch_results = {}\n return\n\n def end_epoch(self):\n # Do necessary staff at the end of each epoch\n self.writer.add_scalars('loss{}'.format(self.comment), {\n 'train_loss': self.epoch_results['train']['loss'],\n 'valid_loss': self.epoch_results['valid']['loss']}, self.epoch)\n score = self.epoch_results['valid'][self.metric]\n test_score = self.epoch_results['test'][self.metric]\n self.logger.info('{}: {:.3}'.format(self.metric, score))\n self.logger.info('{} (test): {:.3}'.format(self.metric, test_score))\n self.writer.add_scalars(self.metric, {self.args.domain: score}, self.epoch)\n self.writer.add_scalars('test_' + self.metric, {self.args.domain: score}, self.epoch)\n\n res_flattened = self.flattened_test_result_dict\n res_flattened['avg_prec'] = np.average(self.epoch_results['valid']['prec'])\n res_flattened['avg_rec'] = np.average(self.epoch_results['valid']['rec'])\n important_list = ['acc', 'avg_prec', 'avg_rec', 'macro_average_f1', 'micro_average_f1']\n self.writer.add_scalars('average_test_results{}'.format(self.comment), {x: res_flattened[x] for x in important_list}, self.epoch)\n self.writer.add_scalars('test_results{}'.format(self.comment), {x:res_flattened[x] for x in res_flattened if not 'conf' in x}, self.epoch)\n self.writer.add_scalars('test_conf_matrix{}'.format(self.comment), {x: res_flattened[x] for x in res_flattened if 'conf' in x}, self.epoch)\n\n self.results.append(self.epoch_results)\n joblib.dump(self.results, os.path.join(self.args.logdir, 'epoch_results.pkl')) # saving intermediate results\n return\n\n def close(self):\n self.writer.close()\n torch.save(self.model.state_dict(), os.path.join(self.args.logdir, 'last_model.pt'))\n joblib.dump(self.results, os.path.join(self.args.logdir, 'results.pkl'))\n self.logger.info(\"Process ended in {:.3f} s\".format(self.total_time))\n self.logger.info(\"Results stored at {}\".format(self.args.logdir))\n\n\n def process(self):\n self.total_time = 0\n self.test()\n\n for epoch in range(self.args.num_epochs):\n if epoch == 0:\n # Do not regularize the model in the first epochs until we start bootstrapping.\n mem_reg = self.args.memory_reg\n self.args.memory_reg = 0\n\n # Use CrossEntropyLoss with hard targets for the first epochs.\n target_loss_fn = self.args.loss\n elif epoch == self.args.bootstrap_epoch + 1:\n # When we're done with the burnout epochs, we restore the right cotraining parameters. \n if mem_reg > 0:\n self.logger.info(\"Adding prev_model regularization with mem_reg={}\".format(mem_reg))\n self.args.memory_reg = mem_reg\n self.prev_model.load_state_dict(deepcopy(self.model.state_dict()))\n self.logger.info(\"Switching to loss={}\".format(target_loss_fn))\n self.args.loss = target_loss_fn\n self.loss_fn = self.get_loss_fn(self.args)\n t0 = time()\n self.epoch = epoch\n self.start_epoch()\n\n self.train()\n\n if epoch >= self.args.bootstrap_epoch:\n self.update_teacher()\n if not args.fix_teacher:\n self.teacher.conf_mat = self.conf_mat\n self.teacher.prior = self.q\n\n self.validate()\n self.validate_test()\n epoch_time = time() - t0\n self.total_time += epoch_time\n self.logger.info(\"Epoch {} Done in {} s.\".format(self.epoch, epoch_time))\n self.epoch_results['time'] = epoch_time\n self.test()\n self.end_epoch()\n\n self.test()\n self.close()\n\n\ndef run_cotrain(args, domain):\n print(\"Running {}\".format(domain))\n args.domain = domain\n\n # Define output paths\n args.logdir += '/' + domain + '/'\n if not os.path.exists(args.logdir):\n os.mkdir(args.logdir)\n args.pretrained_model += '/' + domain + '/'\n args.student_folder = args.logdir + \\\n 'student' + \\\n '_{}'.format(args.loss) + \\\n '_lr{}'.format(args.lr) + \\\n '_memloss{}'.format(args.memory_loss) + \\\n '_memreg{}'.format(args.memory_reg)\n\n args.teacher_folder = args.logdir + \\\n 'teacher' + \\\n \"_{}\".format(args.teacher_type) + \\\n \"_memory{}\".format(args.teacher_memory)\n\n if not os.path.exists(args.student_folder):\n os.mkdir(args.student_folder)\n if not os.path.exists(args.teacher_folder):\n os.mkdir(args.teacher_folder)\n\n\n trainer = Trainer(args)\n trainer.process()\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('domain', help=\"Domain name (without extension)\", type=str, default='pairs')\n\n # Trainer Specific\n parser.add_argument('--logdir', help=\"log directory for tensorboard\", type=str, default='../experiments/')\n parser.add_argument('--debug', help=\"Enable debug mode\", action='store_true')\n parser.add_argument('--num_epochs', help=\"Number of epochs (default: 25)\", type=int, default=5)\n parser.add_argument('--loss', help=\"Loss Function (CrossEntropy / NLL)\", type=str, default='CrossEntropy')\n parser.add_argument('--optimizer', help=\"Optimizer (Adam / Adadelta)\", type=str, default='Adam')\n parser.add_argument('--lr', help=\"Learning rate (default: 0.0001)\", type=float, default=0.00005)\n parser.add_argument('--weight_decay', help=\"Weight Decay\", type=float, default=0.0)\n parser.add_argument('--momentum', help=\"Momentum (used for optimizer=SGD)\", type=float, default=0.9)\n parser.add_argument('--report_every', help=\"Report every x number of batches\", type=int, default=50)\n parser.add_argument('--cuda_device', help=\"CUDA Device ID\", type=int, default=0)\n parser.add_argument('--batch_size', help=\"Batch Size\", type=int, default=1024)\n parser.add_argument('--target_metric', help=\"Target Metric to report\", type=str, default='micro_average_f1')\n parser.add_argument('--version', help=\"Run # (0..4)\", type=int, default=0)\n parser.add_argument('--memory_loss', help=\"Loss Function for the memory regularization term\", type=str, default='SmoothCrossEntropy')\n parser.add_argument('--memory_reg', help=\"Memory regularization (not forget the previous model)\", type=float, default=0.0)\n parser.add_argument('--teacher_memory', help=\"Teacher memory (not forget the initial teacher model)\", type=float, default=0.0)\n parser.add_argument('--scheduler_gamma', help=\"Scheduler's multiplier of lr in each epoch\", type=float, default=0.1)\n parser.add_argument('--bootstrap_epoch', help=\"Epoch at which we start the teacher updates\", type=int, default=0)\n parser.add_argument('--disable_gpu', help=\"Disable GPU\", action='store_true')\n\n # Domain Specific\n parser.add_argument('--test_data', help=\"hdf5 file of test segments\", type=str, default='')\n parser.add_argument('--min_len', help=\"Minimum number of non-stop-words in segment (default: 2)\", type=int, default=2)\n parser.add_argument('--num_aspects', help=\"Number of aspects (default: 9)\", type=int, default=9)\n parser.add_argument('--aspect_seeds', help='file that contains aspect seed words (overrides number of aspects)', type=str, default='')\n parser.add_argument('-q', '--quiet', help=\"No information to stdout\", action='store_true')\n parser.add_argument('--num_seeds', help=\"Number of seed words to use (default: 30)\", type=int, default=30)\n parser.add_argument('--no_seed_weights', help=\"Forcing the *unweighted* avg of seed word embeddings\", action='store_true')\n parser.add_argument('--batch_norm', help=\"Batch normalization on segment encodings\", action='store_true')\n parser.add_argument('--emb_dropout', help=\"Dropout at the segment embedding layer\", type=float, default=0.0)\n parser.add_argument('--swd', help=\"Seed Word Dropout (default=0.0 i.e., never drop the seed word)\", type=float, default=0.0)\n parser.add_argument('--no_pretrained_emb', help=\"Do NOT use pre-trained word embeddings\", action='store_true')\n parser.add_argument('--use_bert', help=\"Use BERT (base uncased) for segment embedding\", action='store_true')\n parser.add_argument('--bert_model', help=\"Type of BERT model: base/large\", type=str, default='base')\n parser.add_argument('--simple_aspects', help=\"Use fine/coarse grained aspects (-1: original A#B label, 0: first part, 1: second part of A#B label\", type=int, default=-1)\n\n\n # Model Specific\n parser.add_argument('--pretrained_model', help=\"Pre-trained model\", type=str, default='')\n parser.add_argument('--attention', help=\"Use word attention\", action='store_true')\n parser.add_argument('--fix_w_emb', help=\"Fix word embeddings\", action='store_true')\n parser.add_argument('--fix_a_emb', help=\"Fix aspect embeddings\", action='store_true')\n parser.add_argument('--model_type', help=\"Model type (embedding_based vs bow_based)\", type=str, default='embedding_based')\n parser.add_argument('--deep_aspect_clf', help=\"Use a deep CLF on top of word embeddings\", type=str, default='NO')\n parser.add_argument('--teacher_type', help=\"Teacher Type (v1..3)\", type=str, default='v1')\n parser.add_argument('--pos_mass', help=\"Probability mass to cut from the given aspect and distribute to the remaining aspects\", type=float, default=0.2)\n parser.add_argument('--soft_updates', help=\"Soft (instead of hard) teacher (precision-based) updates (only for v1)\", action='store_true')\n parser.add_argument('--hard_teacher_pred', help=\"Hard aspect predictions per seed word (only the most probable aspect)\", action='store_true')\n parser.add_argument('--fix_teacher', help=\"Fix teacher throughout training (instead of updating)\", action='store_true')\n\n args = parser.parse_args()\n args.enable_gpu = not args.disable_gpu\n\n seeds = [20, 7, 1993, 42, 127]\n args.seed = seeds[args.version]\n torch.cuda.manual_seed(args.seed)\n seed(args.seed)\n args.num_epochs += args.bootstrap_epoch\n\n if args.logdir == '../experiments/':\n args.logdir += datetime.now().strftime('%b%d_%H-%M-%S') + '_'\n\n if args.debug:\n args.logdir = './debug'\n if os.path.exists(args.logdir):\n os.system('rm -rf {}'.format(args.logdir))\n else:\n args.logdir = args.logdir + \\\n \"COTRAINING\" + \\\n \"_att{}\".format(args.attention) + \\\n \"_fixw{}\".format(args.fix_w_emb) + \\\n \"_fixa{}\".format(args.fix_a_emb) + \\\n \"_{}\".format(args.loss) + \\\n \"_lr{}\".format(args.lr) + \\\n \"_dropout{}\".format(args.emb_dropout) + \\\n '_memloss{}'.format(args.memory_loss) + \\\n '_memreg{}'.format(args.memory_reg) + \\\n \"_teacher{}\".format(args.teacher_type) + \\\n \"_tmem{}\".format(args.teacher_memory) + \\\n '_schedgamma{}'.format(args.scheduler_gamma) + \\\n \"_bepoch{}\".format(args.bootstrap_epoch)\n\n if not os.path.exists(args.logdir):\n os.mkdir(args.logdir)\n original_logdir = args.logdir\n args.logdir += '/v{}'.format(args.version)\n if not os.path.exists(args.logdir):\n os.mkdir(args.logdir)\n args.pretrained_model += '/v{}'.format(args.version)\n\n\n print('\\t\\tEXPERIMENT with domain={}\\nargs: {}\\nlogdir: {}'.format(args.domain, args, args.logdir))\n run_cotrain(args, args.domain)\n"
] | [
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.CrossEntropyLoss",
"torch.nn.NLLLoss",
"torch.nn.functional.softmax",
"torch.nn.KLDivLoss",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.Tensor",
"torch.cat",
"torch.LongTensor",
"numpy.argmax",
"numpy.mean",
"torch.cuda.is_available",
"numpy.average",
"torch.cuda.device_count",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1Stohk1/tami | [
"e0aa902bb767631dd2435ed0eac05209b9bd64ed",
"e823a64ed957b37ce5f9bcf77ada1e7097a06fc4",
"a58b1f814b929665c94c04cf1f4063c186fe51d1",
"a58b1f814b929665c94c04cf1f4063c186fe51d1"
] | [
"models_code/nedo.py",
"utils/preprocessing_data.py",
"check_similarities.py",
"utils/analyzing_data.py"
] | [
"from tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras.metrics import Precision, Recall, AUC\n\n\nclass NEDO:\n\n def __init__(self, num_classes, img_size, channels, name=\"nedo\"):\n self.name = name\n self.num_classes = num_classes\n self.input_width_height = img_size\n self.channels = channels\n self.input_type = 'images'\n\n def build(self):\n model = models.Sequential()\n model.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(self.input_width_height,\n self.input_width_height,\n self.channels)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(96, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.45))\n model.add(layers.Dense(1024, activation='relu'))\n model.add(layers.Dropout(0.35))\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(self.num_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['acc', Precision(name=\"prec\"), Recall(name=\"rec\"), AUC(name='auc')])\n\n return model\n\n def build_tuning(self, hp):\n\n model = models.Sequential()\n model.add(layers.Conv2D(hp.Int('filters_1', 16, 128, step=16), (3, 3), activation='relu',\n input_shape=(self.input_width_height, self.input_width_height, self.channels)))\n model.add(layers.MaxPooling2D((2, 2)))\n for i in range(hp.Int('conv_blocks', 2, 5, default=3)):\n model.add(layers.Conv2D(hp.Int('filters_' + str(i), 32, 256, step=32), (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n #if hp.Choice('pooling_' + str(i), ['avg', 'max']) == 'max':\n # x = tf.keras.layers.MaxPool2D()(x)\n #else:\n # x = tf.keras.layers.AvgPool2D()(x)\n model.add(layers.Flatten())\n model.add(layers.Dropout(hp.Float('dropout', 0, 0.7, step=0.1, default=0.5)))\n model.add(layers.Dense(hp.Int('hidden_size', 512, 1024, step=128, default=512), activation='relu'))\n model.add(layers.Dropout(hp.Float('dropout', 0, 0.7, step=0.1, default=0.5)))\n model.add(layers.Dense(hp.Int('hidden_size', 128, 512, step=128, default=512), activation='relu'))\n model.add(layers.Dense(self.num_classes, activation='softmax'))\n # activation=hp.Choice('act_1', ['relu', 'tanh'])\n\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['acc', Precision(name=\"prec\"), Recall(name=\"rec\"), AUC(name='auc')])\n\n return model\n",
"import os\nimport numpy as np\nimport pickle\nimport pathlib\nfrom random import shuffle, choice\n\n\ndef get_info_dataset(dataset_path, update=False):\n # TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data)\n storing_data_path = dataset_path + \"/info.txt\"\n\n if update and os.path.exists(dataset_path + \"/info.txt\"):\n os.remove(dataset_path + \"/info.txt\")\n\n if os.path.isfile(storing_data_path):\n with open(storing_data_path, 'rb') as filehandle:\n\n data = pickle.load(filehandle)\n class_info = data['class_info']\n ds_info = data['ds_info']\n\n # CHECKS if the paths stored match the DB\n # TODO: This check just pick 3 elements and check existence, can be improved\n if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \\\n or not os.path.exists(choice(ds_info['test_paths'])):\n print(f\"Dataset paths seem incorrect, \"\n f\"you should update the dataset info running '-m DATA -d {dataset_path}\")\n exit()\n # Shuffle elements\n else:\n shuffle(ds_info['train_paths'])\n shuffle(ds_info['val_paths'])\n shuffle(ds_info['final_training_paths'])\n shuffle(ds_info['test_paths'])\n\n else:\n\n # Create dataset filepaths\n train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + \"/training/train\")\n for file in f if \".png\" in file or \".jpg\" in file]\n val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + \"/training/val\")\n for file in f if \".png\" in file or \".jpg\" in file]\n final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + \"/training\")\n for file in f if \".png\" in file or \".jpg\" in file]\n test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + \"/test\")\n for file in f if \".png\" in file or \".jpg\" in file]\n\n ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths,\n 'final_training_paths': final_training_paths}\n\n temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + \"/training/train\").glob('*')])\n # Sort class_names to keep same order, which influence training in one-hot encore, over different machines\n class_names = np.sort(temp_class_names, axis=-1)\n nclasses = len(class_names)\n class_info = {\"class_names\": class_names, \"n_classes\": nclasses}\n\n # GENERAL STATS\n size_train = len(train_paths)\n size_val = len(val_paths)\n size_test = len(test_paths)\n\n class_info.update({\"train_size\": size_train, \"val_size\": size_val, \"test_size\": size_test, 'info': {}})\n\n for name in class_names:\n size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + \"/training/train/{}\".format(name))])\n size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + \"/training/val/{}\".format(name))])\n size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + \"/test/{}\".format(name))])\n class_info['info'][\"{}\".format(name)] = {}\n class_info['info'][\"{}\".format(name)]['TRAIN'] = size_trainf\n class_info['info'][\"{}\".format(name)]['VAL'] = size_valf\n class_info['info'][\"{}\".format(name)]['TEST'] = size_testf\n class_info['info'][\"{}\".format(name)]['TOT'] = size_testf + size_valf + size_trainf\n\n with open(storing_data_path, 'wb') as filehandle:\n data = {'ds_info': ds_info, 'class_info': class_info}\n pickle.dump(data, filehandle)\n\n return class_info, ds_info\n\n",
"import sys\nimport os\nimport cv2\nimport gist\nfrom tqdm import tqdm\nfrom scipy.spatial import distance as dist\nimport numpy as np\nfrom utils.config import *\nfrom shutil import copyfile, rmtree\nimport json\n\n\ndef val_distances(features_vectors, res, fold, distance_algo='eucl'):\n name_folder = fold.split(os.sep)[-1]\n error = 0\n sparse_distance = {'<0.1': 0, '0.1-0.2': 0, '0.2-0.3': 0, '>0.3': 0}\n if len(features_vectors) > 1:\n for i in range(len(features_vectors)):\n d = 0\n for j in range(len(features_vectors)):\n if i == j:\n continue\n if distance_algo == 'eucl':\n temp_dist = dist.euclidean(features_vectors[i], features_vectors[j])\n else:\n temp_dist = np.linalg.norm(features_vectors[i] - features_vectors[j])\n\n if temp_dist < 0.1:\n sparse_distance['<0.1'] += 1\n elif temp_dist < 0.2:\n sparse_distance['0.1-0.2'] += 1\n elif temp_dist < 0.3:\n sparse_distance['0.2-0.3'] += 1\n else:\n sparse_distance['>0.3'] += 1\n d += temp_dist\n error += d / (len(features_vectors)-1)\n\n errorAVG = error / len(features_vectors)\n res[name_folder] = {}\n res[name_folder]['tot'] = error\n res[name_folder]['AVG'] = errorAVG\n res[name_folder]['sparse_dist'] = sparse_distance\n tot = sparse_distance['<0.1'] + sparse_distance['0.1-0.2'] + \\\n sparse_distance['0.2-0.3'] + sparse_distance['>0.3']\n res[name_folder]['sparse_distAVG'] = {'<0.1': sparse_distance['<0.1']/tot,\n '0.1-0.2': sparse_distance['0.1-0.2']/tot,\n '0.2-0.3': sparse_distance['0.2-0.3'] / tot,\n '>0.3': sparse_distance['>0.3']/tot}\n\n else:\n zero_distance_families.append(name_folder)\n res[name_folder] = {}\n res[name_folder]['tot'] = 0\n res[name_folder]['AVG'] = 0\n res[name_folder]['sparse_dist'] = {}\n\n return res\n\n\n\n# file = sys.argv[1]\n# temp_path = sys.argv[2]\nnblocks = 4\norientations_per_scale = (8, 8, 4)\n\n# get list of folders with not MIX_ data\nclass_folders_original = [x[0] for x in os.walk(main_path + 'results/images')\n if not x[0].split(os.sep)[-1].startswith(\"MIX_\") and not x[0].endswith(\"images\")]\n\nfold = 1\nresults = {}\nresults2 = {}\nzero_distance_families = []\n\n# calculate distance for family folders\nfor folder in class_folders_original:\n features_list = []\n heatmap_list = []\n print(\"FOLDER {} out of {}\".format(fold, len(class_folders_original)))\n fold += 1\n for file in tqdm(os.listdir(folder)):\n if not file.startswith('heatmap_'):\n continue\n filepath = folder + os.sep + file\n heatmap = cv2.imread(filepath)\n # skip the file if the heatmap contains only zeros\n if np.all((heatmap == 0)):\n continue\n\n # img: A numpy array (an instance of numpy.ndarray) which contains an image and whose shape is (height, width, 3).\n # nblocks: Use a grid of nblocks * nblocks cells.\n # orientations_per_scale: Use len(orientations_per_scale) scales and compute orientations_per_scale[i] orientations\n # for i-th scale.\n features = gist.extract(heatmap, nblocks=nblocks, orientations_per_scale=orientations_per_scale)\n #np.save(temp_path, features)\n # print(features)\n features_list.append(features)\n heatmap_list.append(heatmap)\n\n results = val_distances(features_list, results, folder)\n #results2 = val_distances(heatmap_list, results2, folder, distance_algo='linalg')\n\n# Generate MIXED folders\nprint(\"Generating Mixed Folders\")\nMIXED = True\nif MIXED:\n NUM_HEATMAP = 50\n mix = 2\n while mix <= len(class_folders_original):\n toMix = []\n name = \"\"\n for ind in range(len(class_folders_original)):\n if class_folders_original[ind].split(\"/\")[-1] in zero_distance_families:\n continue\n toMix.append(ind)\n name += \"_\" + class_folders_original[ind].split(\"/\")[-1]\n if len(toMix) % mix == 0:\n HEATMAP_PER_FOLDER = int(NUM_HEATMAP / mix)\n if os.path.isdir(main_path + 'results/images/MIX' + name):\n rmtree(main_path + 'results/images/MIX' + name)\n os.mkdir(main_path + 'results/images/MIX' + name)\n k = 0\n for i in range(len(toMix)):\n for file in range(HEATMAP_PER_FOLDER):\n if os.path.isfile(class_folders_original[i] + \"/heatmap_\" + str(k) + \".png\"):\n copyfile(class_folders_original[i] + \"/heatmap_\" + str(k) + \".png\",\n main_path + 'results/images/MIX' + name + \"/heatmap_\" + str(k) + \".png\")\n elif os.path.isfile(class_folders_original[i] + \"/heatmapWRONG_\" + str(k) + \".png\"):\n copyfile(class_folders_original[i] + \"/heatmapWRONG_\" + str(k) + \".png\",\n main_path + 'results/images/MIX' + name + \"/heatmap_W\" + str(k) + \".png\")\n k += 1\n\n toMix = []\n name = \"\"\n mix += 1\n\nclass_folders_MIX = [x[0] for x in os.walk(main_path + 'results/images')\n if not x[0].endswith(\"images\") and \"MIX_\" in x[0]]\nfold = 1\n\n# calculate distance for MIX folders\nfor folder in class_folders_MIX:\n features_list = []\n heatmap_list = []\n print(\"FOLDER {} out of {}\".format(fold, len(class_folders_MIX)))\n fold += 1\n for file in tqdm(os.listdir(folder)):\n if not file.startswith('heatmap_'):\n continue\n filepath = folder + os.sep + file\n heatmap = cv2.imread(filepath)\n # skip the file if the heatmap contains only zeros\n if np.all((heatmap == 0)):\n continue\n\n # img: A numpy array (an instance of numpy.ndarray) which contains an image and whose shape is (height, width, 3).\n # nblocks: Use a grid of nblocks * nblocks cells.\n # orientations_per_scale: Use len(orientations_per_scale) scales and compute orientations_per_scale[i] orientations\n # for i-th scale.\n features = gist.extract(heatmap, nblocks=nblocks, orientations_per_scale=orientations_per_scale)\n #np.save(temp_path, features)\n # print(features)\n features_list.append(features)\n heatmap_list.append(heatmap)\n\n results = val_distances(features_list, results, folder)\n #results2 = val_distances(heatmap_list, results2, folder, distance_algo='linalg')\n\nprint(results)\n\n# Print results on file, with an incremental 'j' to not overwrite previous results\nj = 0\nwhile os.path.isfile(main_path + 'results/images/results{}.json'.format(j)):\n j += 1\n\nwith open(main_path + 'results/images/results{}.json'.format(j), 'w') as outfile:\n json.dump(results, outfile, indent=4)\n\n",
"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\nimport pandas as pd\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\n\n\ndef plot_confusion_matrix(cm, class_names, title='Confusion matrix'):\n \"\"\"\n This function prints and plots the confusion matrix.\n \"\"\"\n cm = cm.round(decimals=2)\n\n figure = plt.figure(figsize=(30, 25))\n df_cm = pd.DataFrame(cm) # , index=class_names, columns=class_names\n sn.set(font_scale=4) # for label size\n sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 60}, cmap=plt.cm.Blues, fmt='g') # font size\n plt.title(title)\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, horizontalalignment='center', rotation=30)\n plt.yticks(tick_marks, class_names, verticalalignment='top', rotation=30)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n return figure\n\n\ndef multiclass_analysis(model, test_ds, class_names, save_fig=None):\n test_list = []\n labels_list = []\n preds_list = []\n results_classes = []\n to_print = \"\"\n nclasses = len(class_names)\n\n # Convert test_ds to python list and split in labels and test set\n # NB. The labels are converted to the argmax()\n for s in test_ds:\n labels_list.append(np.argmax(s[1].numpy()[0].tolist()))\n test_list.append(s[0].numpy()[0].tolist())\n\n # Get predictions for data in test set and convert predictions to python list\n preds = model.predict(test_list)\n for p in preds:\n preds_list.append(np.argmax(p.tolist()))\n\n # Calculate Confusion Matrix\n cm = tf.math.confusion_matrix(labels_list, preds_list, num_classes=nclasses).numpy()\n to_print += np.array2string(cm) + \" \\n\"\n\n # Print the confusion matrixes (normalized and not)\n plot_confusion_matrix(confusion_matrix(labels_list, preds_list, normalize='true'), class_names=class_names,\n title=\"Confusion Matrix Normalized\")\n # plt.save() save the latest plot created\n if save_fig is not None:\n plt.savefig(save_fig + \"_NORMALIZED.png\")\n plot_confusion_matrix(confusion_matrix(labels_list, preds_list), class_names=class_names, title=\"Confusion Matrix\")\n if save_fig is not None:\n plt.savefig(save_fig + \".png\")\n\n # Compute ROC curve and ROC area for each class\n # Adopting One vs All approach: per each class x, the label x becomes 1 and all the other labels become 0\n for i in range(nclasses):\n i_label_list = []\n i_pred_list = []\n # Converting labels to 1 if i or 0 otherwise\n for k in range(len(labels_list)):\n i_label_list.append(1 if labels_list[k] == i else 0)\n i_pred_list.append(1 if preds_list[k] == i else 0)\n\n # Calculating roc_curve and auc starting from false positive rate and true positive rate\n fpr, tpr, thresholds = roc_curve(i_label_list, i_pred_list)\n roc_auc = auc(fpr, tpr)\n results_classes.append({'AUC': roc_auc, 'ROC': [fpr, tpr, thresholds]})\n\n '''\n Examples of CM for multi-label classification | and index\n NB. Taking into account class at (12) as True Positive\n TN TN FP TN | 00 01 02 03\n FN FN TP FN | 10 11 12 12\n TN TN FP TN | 20 21 22 23\n TN TN FP TN | 30 31 32 33\n '''\n for ind in range(nclasses):\n TP = TN = FP = FN = 0\n for i in range(nclasses):\n for j in range(nclasses):\n if i == j and i == ind:\n TP = cm[i][j]\n elif ind == j:\n FP += cm[i][j]\n elif ind == i:\n FN += cm[i][j]\n else:\n TN += cm[i][j]\n accuracy = (TP+TN)/(TP+TN+FP+FN)\n precision = TP/(TP+FP)\n recall = TP/(TP+FN)\n f1 = (2*precision*recall)/(precision+recall)\n results_classes[ind].update({'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN,\n 'acc': accuracy, 'prec': precision, 'rec': recall, 'fm': f1})\n to_print += \"class {} -> TP: {}, TN: {}, FP: {}, FN: {}\\n\\tacc: {}, prec: {}, rec: {}, fm: {}, auc: {}\\n\"\\\n .format(class_names[ind], TP, TN, FP, FN, accuracy, precision, recall, f1, results_classes[ind]['AUC'])\n\n return cm, results_classes, to_print\n"
] | [
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.metrics.Precision",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.metrics.Recall",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
],
[
"numpy.sort"
],
[
"numpy.all",
"numpy.linalg.norm",
"scipy.spatial.distance.euclidean"
],
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"sklearn.metrics.auc",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylabel",
"tensorflow.math.confusion_matrix",
"numpy.array2string",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ArgonneCPAC/bnlhack19 | [
"d399b2e200ec7dbd733c754b06c4bd368eb00e67"
] | [
"scripts/demo_cupy_rawkernel.py"
] | [
"import os, sys\n\nimport cupy as cp\nimport numpy as np\nfrom numba import cuda\n\nfrom chopperhack19.mock_obs.tests import random_weighted_points\nfrom chopperhack19.mock_obs.tests.generate_test_data import (\n DEFAULT_RBINS_SQUARED)\nfrom chopperhack19.mock_obs import chaining_mesh as cm\nfrom chopperhack19.mock_obs.double_chop_kernel import double_chop_pairs_cuda\n\n########################################################################\n# This demo shows how to compile a CUDA .cu file, load a particular CUDA\n# kernel, launch it with CuPy arrays. Also shows how to make CuPy and \n# Numba work together\n########################################################################\n\n\nfilepath = os.path.abspath(os.path.join(__file__, \"../../chopperhack19/mock_obs/double_chop_kernel.cu\"))\nwith open(filepath) as f:\n source_code = f.read()\n\n# compile and load CUDA kernel using CuPy\n# before CuPy v7.0.0b3:\n# in this case, compilation happens at the first invocation of the kernel, \n# not the declaration time\ndouble_chop_kernel = cp.RawKernel(source_code, 'double_chop_pairs_pure_cuda')\n\n## starting CuPy v7.0.0b3:\n## RawModule is suitable for importing a large CUDA codebase\n## the compilation happens when initializing the RawModule instance\n#mod = cp.RawModule(source_code)\n#double_chop_kernel = mod.get_function('double_chop_pairs_pure_cuda')\n\n# parameters\nblocks = 512\nthreads = 512\nnpoints = 200013\nnmesh1 = 4\nnmesh2 = 16\n\nLbox = 1000.\n\n# array init\n# CuPy functionalities should be used to avoid unnecessary computation\n# and transfer, which I didn't do here as it's midnight...\nresult = np.zeros_like(DEFAULT_RBINS_SQUARED)[:-1].astype(cp.float32)\n\nn1 = npoints\nn2 = npoints\nx1, y1, z1, w1 = random_weighted_points(n1, Lbox, 0)\nx2, y2, z2, w2 = random_weighted_points(n2, Lbox, 1)\n\nnx1 = nmesh1\nny1 = nmesh1\nnz1 = nmesh1\nnx2 = nmesh2\nny2 = nmesh2\nnz2 = nmesh2\nrmax_x = np.sqrt(DEFAULT_RBINS_SQUARED[-1])\nrmax_y = rmax_x\nrmax_z = rmax_y\nxperiod = Lbox\nyperiod = Lbox\nzperiod = Lbox\n(x1out, y1out, z1out, w1out, cell1out,\n x2out, y2out, z2out, w2out, indx2) = (\n cm.get_double_chopped_data(\n x1, y1, z1, w1, x2, y2, z2, w2, nx1, ny1, nz1, nx2, ny2, nz2,\n rmax_x, rmax_y, rmax_z, xperiod, yperiod, zperiod))\n\nd_x1 = cp.asarray(x1out, dtype=cp.float32)\nd_y1 = cp.asarray(y1out, dtype=cp.float32)\nd_z1 = cp.asarray(z1out, dtype=cp.float32)\nd_w1 = cp.asarray(w1out, dtype=cp.float32)\nd_cell1out = cp.asarray(cell1out, dtype=cp.int32)\n\nd_x2 = cp.asarray(x2out, dtype=cp.float32)\nd_y2 = cp.asarray(y2out, dtype=cp.float32)\nd_z2 = cp.asarray(z2out, dtype=cp.float32)\nd_w2 = cp.asarray(w2out, dtype=cp.float32)\nd_indx2 = cp.asarray(indx2, dtype=cp.int32)\n\nd_rbins_squared = cp.asarray(DEFAULT_RBINS_SQUARED, dtype=cp.float32)\nd_result = cp.asarray(result, dtype=cp.float32)\n\n# for GPU timing using CuPy\nstart = cp.cuda.Event()\nend = cp.cuda.Event()\ntiming_cp = 0\n\n# running the kernel using CuPy's functionality\nfor i in range(4):\n d_result[...] = 0.\n if i > 0: # warm-up not needed if using RawModule\n start.record()\n double_chop_kernel((blocks,), (threads,),\n (d_x1, d_y1, d_z1, d_w1, d_cell1out,\n d_x2, d_y2, d_z2, d_w2, d_indx2,\n d_rbins_squared, d_result,\n cp.int32(d_x1.shape[0]), cp.int32(d_rbins_squared.shape[0]))\n )\n if i > 0: # warm-up not needed if using RawModule\n end.record()\n end.synchronize()\n timing_cp += cp.cuda.get_elapsed_time(start, end)\n#cp.cuda.Stream.null.synchronize()\nprint('launching CUDA kernel from CuPy took', timing_cp/3, 'ms in average')\nd_result_cp = d_result.copy()\n\n# for GPU timing using Numba\nstart = cuda.event()\nend = cuda.event()\ntiming_nb = 0\n\n# running the Numba jit kernel\n# this works because CuPy arrays have the __cuda_array_interface__ attribute,\n# which is accepted by Numba kernels, so you don't have to create the arrays\n# again using Numba's API\nfor i in range(4):\n d_result[...] = 0.\n if i > 0:\n start.record()\n double_chop_pairs_cuda[blocks, threads](d_x1, d_y1, d_z1, d_w1, d_cell1out,\n d_x2, d_y2, d_z2, d_w2, d_indx2,\n d_rbins_squared, d_result)\n if i > 0:\n end.record()\n end.synchronize()\n timing_nb += cuda.event_elapsed_time(start, end)\nprint('launching Numba jit kernel took', timing_nb/3, 'ms in average')\nd_result_nb = d_result.copy()\n\n# check that the CUDA kernel agrees with the Numba kernel\nassert cp.allclose(d_result_cp, d_result_nb, rtol=5E-4)\n"
] | [
[
"numpy.zeros_like",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fdlm/ignite | [
"a22a0f5e909ac70d2a1f76a60b6e84b2134f196c",
"6a2460e59055b3547b590a9ff7f72e07d1714164"
] | [
"tests/ignite/contrib/engines/test_common.py",
"examples/mnist/mnist_with_tensorboard.py"
] | [
"import os\n\nimport torch\nimport torch.nn as nn\n\nfrom ignite.engine import Events, Engine\nfrom ignite.contrib.engines.common import (\n setup_common_training_handlers,\n save_best_model_by_val_score,\n add_early_stopping_by_val_score,\n setup_tb_logging,\n setup_visdom_logging,\n)\n\nfrom ignite.handlers import TerminateOnNan\nimport ignite.contrib.handlers.tensorboard_logger as tb_logger_module\nimport ignite.contrib.handlers.visdom_logger as visdom_logger_module\n\nimport pytest\nfrom unittest.mock import MagicMock\n\n\nclass DummyModel(nn.Module):\n def __init__(self):\n super(DummyModel, self).__init__()\n self.net = nn.Linear(1, 1)\n\n def forward(self, x):\n return self.net(x)\n\n\[email protected]\ndef visdom_server():\n\n import time\n import subprocess\n\n from visdom.server import download_scripts\n\n download_scripts()\n\n hostname = \"localhost\"\n port = 8099\n p = subprocess.Popen(\"visdom --hostname {} -port {}\".format(hostname, port), shell=True)\n time.sleep(5)\n yield (hostname, port)\n p.terminate()\n\n\ndef _test_setup_common_training_handlers(dirname, device, rank=0, local_rank=0, distributed=False):\n\n lr = 0.01\n step_size = 100\n gamma = 0.5\n\n model = DummyModel().to(device)\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank)\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)\n\n def update_fn(engine, batch):\n optimizer.zero_grad()\n x = torch.tensor([batch], requires_grad=True, device=device)\n y_pred = model(x)\n loss = y_pred.mean()\n loss.backward()\n optimizer.step()\n return loss\n\n train_sampler = MagicMock()\n train_sampler.set_epoch = MagicMock()\n\n trainer = Engine(update_fn)\n setup_common_training_handlers(\n trainer,\n train_sampler=train_sampler,\n to_save={\"model\": model, \"optimizer\": optimizer},\n save_every_iters=75,\n output_path=dirname,\n lr_scheduler=lr_scheduler,\n with_gpu_stats=False,\n output_names=[\"batch_loss\",],\n with_pbars=True,\n with_pbar_on_iters=True,\n log_every_iters=50,\n device=device,\n )\n\n num_iters = 100\n num_epochs = 10\n data = [i * 0.1 for i in range(num_iters)]\n trainer.run(data, max_epochs=num_epochs)\n\n # check handlers\n handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]\n for cls in [\n TerminateOnNan,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n assert \"batch_loss\" in trainer.state.metrics\n\n # Check saved checkpoint\n if rank == 0:\n checkpoints = list(os.listdir(dirname))\n assert len(checkpoints) == 1\n for v in [\n \"training_checkpoint\",\n ]:\n assert any([v in c for c in checkpoints])\n\n # Check LR scheduling\n assert optimizer.param_groups[0][\"lr\"] <= lr * gamma ** (num_iters * num_epochs / step_size), \"{} vs {}\".format(\n optimizer.param_groups[0][\"lr\"], lr * gamma ** (num_iters * num_epochs / step_size)\n )\n\n\ndef test_asserts_setup_common_training_handlers():\n trainer = Engine(lambda e, b: None)\n\n with pytest.raises(\n ValueError, match=r\"If to_save argument is provided then output_path argument should be \" r\"also defined\"\n ):\n setup_common_training_handlers(trainer, to_save={})\n\n with pytest.warns(\n UserWarning, match=r\"Argument train_sampler distributed sampler used to call \" r\"`set_epoch` method on epoch\"\n ):\n train_sampler = MagicMock()\n setup_common_training_handlers(trainer, train_sampler=train_sampler, with_gpu_stats=False)\n\n\ndef test_setup_common_training_handlers(dirname, capsys):\n\n _test_setup_common_training_handlers(dirname, device=\"cpu\")\n\n # Check epoch-wise pbar\n captured = capsys.readouterr()\n out = captured.err.split(\"\\r\")\n out = list(map(lambda x: x.strip(), out))\n out = list(filter(None, out))\n assert \"Epoch:\" in out[-1], \"{}\".format(out[-1])\n\n\ndef test_save_best_model_by_val_score(dirname, capsys):\n\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n model = DummyModel()\n\n acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.5, 0.6, 0.61, 0.7, 0.5]\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def validate(engine):\n evaluator.run(\n [0,]\n )\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def set_eval_metric(engine):\n engine.state.metrics = {\"acc\": acc_scores[trainer.state.epoch - 1]}\n\n save_best_model_by_val_score(dirname, evaluator, model, metric_name=\"acc\", n_saved=2, trainer=trainer)\n\n data = [\n 0,\n ]\n trainer.run(data, max_epochs=len(acc_scores))\n\n assert set(os.listdir(dirname)) == set([\"best_model_8_val_acc=0.6100.pth\", \"best_model_9_val_acc=0.7000.pth\"])\n\n\ndef test_add_early_stopping_by_val_score():\n trainer = Engine(lambda e, b: None)\n evaluator = Engine(lambda e, b: None)\n\n acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def validate(engine):\n evaluator.run(\n [0,]\n )\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def set_eval_metric(engine):\n engine.state.metrics = {\"acc\": acc_scores[trainer.state.epoch - 1]}\n\n add_early_stopping_by_val_score(patience=3, evaluator=evaluator, trainer=trainer, metric_name=\"acc\")\n\n data = [\n 0,\n ]\n state = trainer.run(data, max_epochs=len(acc_scores))\n\n assert state.epoch == 7\n\n\ndef test_setup_tb_logging(dirname):\n def _test(with_eval, with_optim):\n trainer = Engine(lambda e, b: b)\n evaluators = None\n optimizers = None\n\n if with_eval:\n evaluator = Engine(lambda e, b: None)\n acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def validate(engine):\n evaluator.run(\n [0,]\n )\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def set_eval_metric(engine):\n engine.state.metrics = {\"acc\": acc_scores[trainer.state.epoch - 1]}\n\n evaluators = {\"validation\": evaluator}\n\n if with_optim:\n t = torch.tensor([0,])\n optimizers = {\"optimizer\": torch.optim.SGD([t,], lr=0.01)}\n\n setup_tb_logging(dirname, trainer, optimizers=optimizers, evaluators=evaluators, log_every_iters=1)\n\n handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]\n for cls in [\n tb_logger_module.OutputHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n if with_optim:\n handlers = trainer._event_handlers[Events.ITERATION_STARTED]\n for cls in [\n tb_logger_module.OptimizerParamsHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n if with_eval:\n handlers = evaluator._event_handlers[Events.COMPLETED]\n for cls in [\n tb_logger_module.OutputHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n data = [0, 1, 2]\n trainer.run(data, max_epochs=10)\n\n tb_files = list(os.listdir(dirname))\n assert len(tb_files) == 1\n for v in [\n \"events\",\n ]:\n assert any([v in c for c in tb_files]), \"{}\".format(tb_files)\n\n _test(with_eval=False, with_optim=False)\n _test(with_eval=True, with_optim=True)\n\n\ndef test_setup_visdom_logging(visdom_server):\n def _test(with_eval, with_optim):\n trainer = Engine(lambda e, b: b)\n evaluators = None\n optimizers = None\n\n if with_eval:\n evaluator = Engine(lambda e, b: None)\n acc_scores = [0.1, 0.2, 0.3, 0.4, 0.3, 0.3, 0.2, 0.1, 0.1, 0.0]\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def validate(engine):\n evaluator.run(\n [0,]\n )\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def set_eval_metric(engine):\n engine.state.metrics = {\"acc\": acc_scores[trainer.state.epoch - 1]}\n\n evaluators = {\"validation\": evaluator}\n\n if with_optim:\n t = torch.tensor([0,])\n optimizers = {\"optimizer\": torch.optim.SGD([t,], lr=0.01)}\n\n # import os\n # os.environ[\"VISDOM_SERVER_URL\"] = visdom_server[0]\n # os.environ[\"VISDOM_PORT\"] = str(visdom_server[1])\n\n vis_logger = setup_visdom_logging(\n trainer,\n optimizers=optimizers,\n evaluators=evaluators,\n log_every_iters=1,\n server=visdom_server[0],\n port=str(visdom_server[1]),\n )\n\n handlers = trainer._event_handlers[Events.ITERATION_COMPLETED]\n for cls in [\n visdom_logger_module.OutputHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n if with_optim:\n handlers = trainer._event_handlers[Events.ITERATION_STARTED]\n for cls in [\n visdom_logger_module.OptimizerParamsHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n if with_eval:\n handlers = evaluator._event_handlers[Events.COMPLETED]\n for cls in [\n visdom_logger_module.OutputHandler,\n ]:\n assert any([isinstance(h[0], cls) for h in handlers]), \"{}\".format(handlers)\n\n data = [0, 1, 2]\n trainer.run(data, max_epochs=10)\n return vis_logger\n\n vis_logger_optim = _test(with_eval=False, with_optim=False)\n vis_logger_all = _test(with_eval=True, with_optim=True)\n\n vis_logger_optim.close()\n vis_logger_all.close()\n\n\[email protected]\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_distrib_gpu(dirname, distributed_context_single_node_nccl):\n local_rank = distributed_context_single_node_nccl[\"local_rank\"]\n device = \"cuda:{}\".format(local_rank)\n _test_setup_common_training_handlers(dirname, device, rank=local_rank, local_rank=local_rank, distributed=True)\n test_add_early_stopping_by_val_score()\n\n\[email protected]\ndef test_distrib_cpu(dirname, distributed_context_single_node_gloo):\n device = \"cpu\"\n local_rank = distributed_context_single_node_gloo[\"local_rank\"]\n _test_setup_common_training_handlers(dirname, device, rank=local_rank)\n test_add_early_stopping_by_val_score()\n\n\[email protected]_distributed\[email protected](\"MULTINODE_DISTRIB\" not in os.environ, reason=\"Skip if not multi-node distributed\")\ndef test_multinode_distrib_cpu(dirname, distributed_context_multi_node_gloo):\n device = \"cpu\"\n rank = distributed_context_multi_node_gloo[\"rank\"]\n _test_setup_common_training_handlers(dirname, device, rank=rank)\n test_add_early_stopping_by_val_score()\n\n\[email protected]_distributed\[email protected](\"GPU_MULTINODE_DISTRIB\" not in os.environ, reason=\"Skip if not multi-node distributed\")\ndef test_multinode_distrib_gpu(dirname, distributed_context_multi_node_nccl):\n local_rank = distributed_context_multi_node_nccl[\"local_rank\"]\n rank = distributed_context_multi_node_nccl[\"rank\"]\n device = \"cuda:{}\".format(local_rank)\n _test_setup_common_training_handlers(dirname, device, rank=rank, local_rank=local_rank, distributed=True)\n test_add_early_stopping_by_val_score()\n",
"\"\"\"\n MNIST example with training and validation monitoring using Tensorboard.\n Requirements:\n TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`\n or PyTorch >= 1.2 which supports Tensorboard\n Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)\n Usage:\n Start tensorboard:\n ```bash\n tensorboard --logdir=/tmp/tensorboard_logs/\n ```\n Run the example:\n ```bash\n python mnist_with_tensorboard.py --log_dir=/tmp/tensorboard_logs\n ```\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import Compose, ToTensor, Normalize\n\ntry:\n from tensorboardX import SummaryWriter\nexcept ImportError:\n try:\n from torch.utils.tensorboard import SummaryWriter\n except ImportError:\n raise RuntimeError(\n \"This module requires either tensorboardX or torch >= 1.2.0. \"\n \"You may install tensorboardX with command: \\n pip install tensorboardX \\n\"\n \"or upgrade PyTorch using your package manager of choice (pip or conda).\"\n )\n\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=-1)\n\n\ndef get_data_loaders(train_batch_size, val_batch_size):\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n\n train_loader = DataLoader(\n MNIST(download=True, root=\".\", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True\n )\n\n val_loader = DataLoader(\n MNIST(download=False, root=\".\", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False\n )\n return train_loader, val_loader\n\n\ndef create_summary_writer(model, data_loader, log_dir):\n writer = SummaryWriter(logdir=log_dir)\n data_loader_iter = iter(data_loader)\n x, y = next(data_loader_iter)\n try:\n writer.add_graph(model, x)\n except Exception as e:\n print(\"Failed to save model graph: {}\".format(e))\n return writer\n\n\ndef run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):\n train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)\n model = Net()\n writer = create_summary_writer(model, train_loader, log_dir)\n device = \"cpu\"\n\n if torch.cuda.is_available():\n device = \"cuda\"\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)\n trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)\n evaluator = create_supervised_evaluator(\n model, metrics={\"accuracy\": Accuracy(), \"nll\": Loss(F.nll_loss)}, device=device\n )\n\n @trainer.on(Events.ITERATION_COMPLETED(every=log_interval))\n def log_training_loss(engine):\n print(\n \"Epoch[{}] Iteration[{}/{}] Loss: {:.2f}\"\n \"\".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output)\n )\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n evaluator.run(train_loader)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics[\"accuracy\"]\n avg_nll = metrics[\"nll\"]\n print(\n \"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\".format(\n engine.state.epoch, avg_accuracy, avg_nll\n )\n )\n writer.add_scalar(\"training/avg_loss\", avg_nll, engine.state.epoch)\n writer.add_scalar(\"training/avg_accuracy\", avg_accuracy, engine.state.epoch)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n evaluator.run(val_loader)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics[\"accuracy\"]\n avg_nll = metrics[\"nll\"]\n print(\n \"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\".format(\n engine.state.epoch, avg_accuracy, avg_nll\n )\n )\n writer.add_scalar(\"valdation/avg_loss\", avg_nll, engine.state.epoch)\n writer.add_scalar(\"valdation/avg_accuracy\", avg_accuracy, engine.state.epoch)\n\n # kick everything off\n trainer.run(train_loader, max_epochs=epochs)\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"input batch size for training (default: 64)\")\n parser.add_argument(\n \"--val_batch_size\", type=int, default=1000, help=\"input batch size for validation (default: 1000)\"\n )\n parser.add_argument(\"--epochs\", type=int, default=10, help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learning rate (default: 0.01)\")\n parser.add_argument(\"--momentum\", type=float, default=0.5, help=\"SGD momentum (default: 0.5)\")\n parser.add_argument(\n \"--log_interval\", type=int, default=10, help=\"how many batches to wait before logging training status\"\n )\n parser.add_argument(\n \"--log_dir\", type=str, default=\"tensorboard_logs\", help=\"log directory for Tensorboard log output\"\n )\n\n args = parser.parse_args()\n\n run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)\n"
] | [
[
"torch.tensor",
"torch.nn.Linear",
"torch.optim.SGD",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.optim.lr_scheduler.StepLR"
],
[
"torch.nn.Dropout2d",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stjordanis/catalyst-1 | [
"84bc7576c981278f389279d87dda85dd66a758b6",
"84bc7576c981278f389279d87dda85dd66a758b6",
"84bc7576c981278f389279d87dda85dd66a758b6"
] | [
"catalyst/contrib/datasets/mnist.py",
"tests/pipelines/test_mnist_custom.py",
"catalyst/utils/mixup.py"
] | [
"from typing import Any, Callable, Dict, List, Optional\nimport os\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom catalyst.contrib.datasets.functional import (\n download_and_extract_archive,\n read_sn3_pascalvincent_tensor,\n)\nfrom catalyst.data.dataset.metric_learning import MetricLearningTrainDataset, QueryGalleryDataset\n\n\ndef _read_label_file(path):\n with open(path, \"rb\") as f:\n x = read_sn3_pascalvincent_tensor(f, strict=False)\n assert x.dtype == torch.uint8\n assert x.ndimension() == 1\n return x.long()\n\n\ndef _read_image_file(path):\n with open(path, \"rb\") as f:\n x = read_sn3_pascalvincent_tensor(f, strict=False)\n assert x.dtype == torch.uint8\n assert x.ndimension() == 3\n return x\n\n\nclass MNIST(Dataset):\n \"\"\"`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.\"\"\"\n\n _repr_indent = 4\n\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n resources = [\n (\n \"https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz\",\n \"f68b3c2dcbeaaa9fbdd348bbdeb94873\",\n ),\n (\n \"https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz\",\n \"d53e105ee54ea40749a09fcbcd1e9432\",\n ),\n (\n \"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz\",\n \"9fb629c4189551a2d022fa330f9573f3\",\n ),\n (\n \"https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz\",\n \"ec29112dd5afa0611ce80d1b7f02629c\",\n ),\n ]\n\n training_file = \"training.pt\"\n test_file = \"test.pt\"\n classes = [\n \"0 - zero\",\n \"1 - one\",\n \"2 - two\",\n \"3 - three\",\n \"4 - four\",\n \"5 - five\",\n \"6 - six\",\n \"7 - seven\",\n \"8 - eight\",\n \"9 - nine\",\n ]\n\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False):\n \"\"\"\n Args:\n root: Root directory of dataset where\n ``MNIST/processed/training.pt``\n and ``MNIST/processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from\n ``training.pt``, otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from\n the internet and puts it in root directory. If dataset\n is already downloaded, it is not downloaded again.\n transform (callable, optional): A function/transform that\n takes in an image and returns a transformed version.\n target_transform (callable, optional): A function/transform\n that takes in the target and transforms it.\n \"\"\"\n if isinstance(root, torch._six.string_classes):\n root = os.path.expanduser(root)\n self.root = root\n self.train = train # training set or test set\n self.transform = transform\n self.target_transform = target_transform\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError(\"Dataset not found. You can use download=True to download it\")\n\n if self.train:\n data_file = self.training_file\n else:\n data_file = self.test_file\n self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index: Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index].numpy(), int(self.targets[index])\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return len(self.data)\n\n def __repr__(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n head = \"Dataset \" + self.__class__.__name__\n body = [\"Number of datapoints: {}\".format(self.__len__())]\n if self.root is not None:\n body.append(\"Root location: {}\".format(self.root))\n body += self.extra_repr().splitlines()\n if hasattr(self, \"transforms\") and self.transforms is not None:\n body += [repr(self.transforms)]\n lines = [head] + [\" \" * self._repr_indent + line for line in body]\n return \"\\n\".join(lines)\n\n @property\n def raw_folder(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return os.path.join(self.root, self.__class__.__name__, \"raw\")\n\n @property\n def processed_folder(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return os.path.join(self.root, self.__class__.__name__, \"processed\")\n\n @property\n def class_to_idx(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return {_class: i for i, _class in enumerate(self.classes)}\n\n def _check_exists(self):\n return os.path.exists(\n os.path.join(self.processed_folder, self.training_file)\n ) and os.path.exists(os.path.join(self.processed_folder, self.test_file))\n\n def download(self):\n \"\"\"Download the MNIST data if it doesn't exist in processed_folder.\"\"\"\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url, md5 in self.resources:\n filename = url.rpartition(\"/\")[2]\n download_and_extract_archive(\n url, download_root=self.raw_folder, filename=filename, md5=md5\n )\n\n # process and save as torch files\n print(\"Processing...\")\n\n training_set = (\n _read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n _read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n )\n test_set = (\n _read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n _read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n )\n with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n torch.save(test_set, f)\n\n print(\"Done!\")\n\n def extra_repr(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return \"Split: {}\".format(\"Train\" if self.train is True else \"Test\")\n\n\nclass MnistMLDataset(MetricLearningTrainDataset, MNIST):\n \"\"\"\n Simple wrapper for MNIST dataset for metric learning train stage.\n This dataset can be used only for training. For test stage\n use MnistQGDataset.\n\n For this dataset we use only training part of the MNIST and only\n those images that are labeled as 0, 1, 2, 3, 4.\n \"\"\"\n\n _split = 5\n classes = [\n \"0 - zero\",\n \"1 - one\",\n \"2 - two\",\n \"3 - three\",\n \"4 - four\",\n ]\n\n def __init__(self, **kwargs):\n \"\"\"\n Raises:\n ValueError: if train argument is False (MnistMLDataset\n should be used only for training)\n \"\"\"\n if \"train\" in kwargs:\n if kwargs[\"train\"] is False:\n raise ValueError(\"MnistMLDataset can be used only for training stage.\")\n else:\n kwargs[\"train\"] = True\n super(MnistMLDataset, self).__init__(**kwargs)\n self._filter()\n\n def get_labels(self) -> List[int]:\n \"\"\"\n Returns:\n labels of digits\n \"\"\"\n return self.targets.tolist()\n\n def _filter(self) -> None:\n \"\"\"Filter MNIST dataset: select images of 0, 1, 2, 3, 4 classes.\"\"\"\n mask = self.targets < self._split\n self.data = self.data[mask]\n self.targets = self.targets[mask]\n\n\nclass MnistQGDataset(QueryGalleryDataset):\n \"\"\"\n MNIST for metric learning with query and gallery split.\n MnistQGDataset should be used for test stage.\n\n For this dataset we used only test part of the MNIST and only\n those images that are labeled as 5, 6, 7, 8, 9.\n \"\"\"\n\n _split = 5\n classes = [\n \"5 - five\",\n \"6 - six\",\n \"7 - seven\",\n \"8 - eight\",\n \"9 - nine\",\n ]\n\n def __init__(\n self, root: str, transform: Optional[Callable] = None, gallery_fraq: Optional[float] = 0.2\n ) -> None:\n \"\"\"\n Args:\n root: root directory for storing dataset\n transform: transform\n gallery_fraq: gallery size\n \"\"\"\n self._mnist = MNIST(root, train=False, download=True, transform=transform)\n self._filter()\n\n self._gallery_size = int(gallery_fraq * len(self._mnist))\n self._query_size = len(self._mnist) - self._gallery_size\n\n self._is_query = torch.zeros(len(self._mnist)).type(torch.bool)\n self._is_query[: self._query_size] = True\n\n def _filter(self) -> None:\n \"\"\"Filter MNIST dataset: select images of 5, 6, 7, 8, 9 classes.\"\"\"\n mask = self._mnist.targets >= self._split\n self._mnist.data = self._mnist.data[mask]\n self._mnist.targets = self._mnist.targets[mask]\n\n def __getitem__(self, idx: int) -> Dict[str, Any]:\n \"\"\"\n Get item method for dataset\n\n\n Args:\n idx: index of the object\n\n Returns:\n Dict with features, targets and is_query flag\n \"\"\"\n image, label = self._mnist[idx]\n return {\n \"features\": image,\n \"targets\": label,\n \"is_query\": self._is_query[idx],\n }\n\n def __len__(self) -> int:\n \"\"\"Length\"\"\"\n return len(self._mnist)\n\n def __repr__(self) -> None:\n \"\"\"Print info about the dataset\"\"\"\n return self._mnist.__repr__()\n\n @property\n def gallery_size(self) -> int:\n \"\"\"Query Gallery dataset should have gallery_size property\"\"\"\n return self._gallery_size\n\n @property\n def query_size(self) -> int:\n \"\"\"Query Gallery dataset should have query_size property\"\"\"\n return self._query_size\n\n @property\n def data(self) -> torch.Tensor:\n \"\"\"Images from MNIST\"\"\"\n return self._mnist.data\n\n @property\n def targets(self) -> torch.Tensor:\n \"\"\"Labels of digits\"\"\"\n return self._mnist.targets\n\n\n__all__ = [\"MNIST\", \"MnistMLDataset\", \"MnistQGDataset\"]\n",
"# flake8: noqa\n\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\n\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl, metrics\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\n\nclass CustomRunner(dl.Runner):\n def predict_batch(self, batch):\n # model inference step\n return self.model(batch[0].to(self.device))\n\n def on_loader_start(self, runner):\n super().on_loader_start(runner)\n self.meters = {\n key: metrics.AdditiveMetric(compute_on_call=False)\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]\n }\n\n def handle_batch(self, batch):\n # model train/valid step\n # unpack the batch\n x, y = batch\n # run model forward pass\n logits = self.model(x)\n # compute the loss\n loss = F.cross_entropy(logits, y)\n # compute other metrics of interest\n accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))\n # log metrics\n self.batch_metrics.update(\n {\"loss\": loss, \"accuracy01\": accuracy01, \"accuracy03\": accuracy03}\n )\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]:\n self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)\n # run model backward pass\n if self.is_train_loader:\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def on_loader_end(self, runner):\n for key in [\"loss\", \"accuracy01\", \"accuracy03\"]:\n self.loader_metrics[key] = self.meters[key].compute()[0]\n super().on_loader_end(runner)\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n optimizer = optim.Adam(model.parameters(), lr=0.02)\n\n loaders = {\n \"train\": DataLoader(\n MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32\n ),\n \"valid\": DataLoader(\n MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32\n ),\n }\n\n runner = CustomRunner()\n # model training\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n optimizer=optimizer,\n loaders=loaders,\n logdir=logdir,\n num_epochs=1,\n verbose=False,\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n )\n\n\n# Torch\ndef test_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\")\ndef test_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\")\ndef test_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >=2),\n# reason=\"No CUDA>=2 found\",\n# )\n# def test_on_ddp():\n# train_experiment(None, dl.DistributedDataParallelEngine())\n\n# AMP\[email protected](not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason=\"No CUDA or AMP found\")\ndef test_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n# reason=\"No CUDA>=2 or AMP found\",\n# )\n# def test_on_amp_ddp():\n# train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n# APEX\[email protected](not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason=\"No CUDA or Apex found\")\ndef test_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n# reason=\"No CUDA>=2 or Apex found\",\n# )\n# def test_on_apex_ddp():\n# train_experiment(None, dl.DistributedDataParallelApexEngine())\n",
"from typing import List\n\nimport numpy as np\n\nimport torch\n\n\ndef mixup_batch(\n batch: List[torch.Tensor], alpha: float = 0.2, mode: str = \"replace\"\n) -> List[torch.Tensor]:\n \"\"\"\n\n Args:\n batch: batch to which you want to apply augmentation\n alpha: beta distribution a=b parameters. Must be >=0. The closer alpha to zero the\n less effect of the mixup.\n mode: algorithm used for muxup: ``\"replace\"`` | ``\"add\"``. If \"replace\"\n then replaces the batch with a mixed one, while the batch size is not changed\n If \"add\", concatenates mixed examples to the current ones, the batch size increases\n by 2 times.\n\n Returns:\n augmented batch\n\n \"\"\"\n assert alpha >= 0, \"alpha must be>=0\"\n assert mode in (\"add\", \"replace\"), f\"mode must be in 'add', 'replace', get: {mode}\"\n\n batch_size = batch[0].shape[0]\n beta = np.random.beta(alpha, alpha, batch_size).astype(np.float32)\n indexes = np.arange(batch_size)\n # index shift by 1\n indexes_2 = (indexes + 1) % batch_size\n for idx, targets in enumerate(batch):\n device = targets.device\n targets_shape = [batch_size] + [1] * len(targets.shape[1:])\n key_beta = torch.as_tensor(beta.reshape(targets_shape), device=device)\n targets = targets * key_beta + targets[indexes_2] * (1 - key_beta)\n\n if mode == \"replace\":\n batch[idx] = targets\n else:\n # mode == 'add'\n batch[idx] = torch.cat([batch[idx], targets])\n return batch\n"
] | [
[
"torch.save"
],
[
"torch.nn.Linear",
"torch.nn.functional.cross_entropy",
"torch.nn.Flatten"
],
[
"numpy.arange",
"numpy.random.beta",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BalderOdinson/Deep-Learning-Lab | [
"70786ff1be40fc829d64a644585c1d5683c76538"
] | [
"deep-learning-lab-01/tf_logreg.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 28 22:39:01 2019\n\n@author: Oshikuru\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport data\n\nclass TFLogreg:\n def __init__(self, D, C, param_delta=0.5, param_lambda=1e-3):\n \"\"\"Arguments:\n - D: dimensions of each datapoint \n - C: number of classes\n - param_delta: training step\n \"\"\"\n # definicija podataka i parametara:\n self.X = tf.placeholder(tf.float32, [None, D])\n self.Y_ = tf.placeholder(tf.float32, [None, C])\n self.W = tf.Variable(tf.random_normal([D, C], stddev=0.35), tf.float32)\n self.b = tf.Variable(tf.zeros([C]), tf.float32)\n self.param_lambda = tf.constant(param_lambda, tf.float32)\n\n # formulacija modela: izračunati self.probs\n # koristiti: tf.matmul, tf.nn.softmax\n self.probs = tf.nn.softmax(tf.matmul(self.X, self.W) + self.b)\n \n # formulacija gubitka: self.loss\n reg_loss = 0.5*self.param_lambda*tf.reduce_sum(self.W*self.W)\n self.loss = tf.reduce_mean(-tf.reduce_sum(self.Y_ * tf.log(self.probs), reduction_indices=1)) + reg_loss\n\n # formulacija operacije učenja: self.train_step\n self.train_step = tf.train.GradientDescentOptimizer(param_delta).minimize(self.loss)\n\n # instanciranje izvedbenog konteksta: self.session\n self.session = tf.Session()\n\n def train(self, X, Yoh_, param_niter):\n \"\"\"Arguments:\n - X: actual datapoints [NxD]\n - Yoh_: one-hot encoded labels [NxC]\n - param_niter: number of iterations\n \"\"\"\n # incijalizacija parametara\n # koristiti: tf.initializers.global_variables \n self.session.run(tf.initializers.global_variables())\n \n # optimizacijska petlja\n # koristiti: tf.Session.run\n for i in range(param_niter):\n loss,_ = self.session.run([self.loss, self.train_step], \n feed_dict={self.X: X, self.Y_: Yoh_})\n if i % 10 == 0:\n print(\"iteration {}: loss {}\".format(i, loss))\n\n def eval(self, X):\n \"\"\"Arguments:\n - X: actual datapoints [NxD]\n Returns: predicted class probabilites [NxC]\n \"\"\"\n return self.session.run(self.probs, \n feed_dict={self.X: X})\n \ndef calc_class(X):\n y = tflr.eval(X)\n return np.argmax(y, axis=1) * np.max(y, axis=1)\n \nif __name__ == \"__main__\":\n # inicijaliziraj generatore slučajnih brojeva\n np.random.seed(100)\n tf.set_random_seed(100)\n\n # instanciraj podatke X i labele Yoh_\n X,Y_ = data.sample_gmm_2d(6, 2, 10)\n Yoh_ = data.class_to_onehot(Y_)\n\n # izgradi graf:\n tflr = TFLogreg(X.shape[1], Yoh_.shape[1], 0.06,1)\n\n # nauči parametre:\n tflr.train(X, Yoh_, 1000)\n\n # dohvati vjerojatnosti na skupu za učenje\n probs = tflr.eval(X)\n Y = np.argmax(probs, axis=1)\n\n # ispiši performansu (preciznost i odziv po razredima)\n accuracy, recall, precision = data.eval_perf_multi(Y, Y_)\n AP = data.eval_AP(Y_)\n print (accuracy, recall, precision, AP)\n\n # iscrtaj rezultate, decizijsku plohu\n rect=(np.min(X, axis=0), np.max(X, axis=0))\n data.graph_surface(calc_class, rect, offset=0.5)\n data.graph_data(X, Y_, Y, special=[])\n plt.show()"
] | [
[
"tensorflow.matmul",
"tensorflow.constant",
"numpy.random.seed",
"numpy.min",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"numpy.max",
"numpy.argmax",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.set_random_seed",
"matplotlib.pyplot.show",
"tensorflow.initializers.global_variables",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
gdsa-upc/K-LERA | [
"3f4b5fb1a6c4b3df4fde05eb55fbf3dc3815cce4"
] | [
"Scripts21-12-17/get_params.py"
] | [
"import os,sys\nimport pandas as pd\nimport numpy as np\n\ndef get_params():\n\n '''\n Define dictionary with parameters\n '''\n params = {} \n\n params['src'] = '/home/dani/Escritorio/K-LERA-master/Semana4_ok'\n \n # Source data\n params['root'] = '/home/dani/Escritorio/K-LERA-master/Semana4_ok'\n params['database'] = 'TB2016'\n\n # To generate\n \n # 'root_save' directory goes under 'root':\n params['root_save'] = 'save'\n \n # All the following go under 'root_save':\n params['image_lists'] = 'image_lists'\n params['feats_dir'] = 'features'\n params['rankings_dir'] = 'rankings'\n params['classification_dir'] = 'classification'\n params['codebooks_dir'] = 'codebooks'\n params['classifiers_dir'] = 'classifiers'\n params['kaggle_dir'] = 'kaggle'\n \n\n # Parameters\n params['split'] = 'val'\n params['descriptor_size'] = 1024 # Number of clusters\n params['descriptor_type'] = 'SIFT'\n params['keypoint_type'] = 'SIFT'\n params['max_size'] = 500 # Widht size\n params['distance_type'] = 'euclidean'\n params['save_for_kaggle'] = True\n \n # Classification\n params['classifier'] = 'SVM'\n params['svm_tune'] =[{'kernel': ['rbf'], 'gamma': [1e-1, 1e-2, 1e-3, 1e-4, 1e-5],\n 'C': [0.1, 1, 10, 100, 1000]},\n {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}] # Parameters to tune the SVM\n \n params['num_neighbors'] = 3 # For KNN\n params['manual_balance'] = False\n \n # Normalization of local descriptors\n params['whiten'] = False\n params['normalize_feats'] = False\n params['scale'] = False\n \n \n # We read the training annotations to know the set of possible labels\n data = pd.read_csv(os.path.join(params['root'],params['database'],'train','annotation.txt'), sep='\\t', header = 0)\n \n # Store them in the parameters dictionary for later use\n params['possible_labels'] = np.unique(data['ClassID'])\n\n create_dirs(params)\n\n return params\n\n\ndef make_dir(dir):\n '''\n Creates a directory if it does not exist\n dir: absolute path to directory to create\n '''\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\ndef create_dirs(params):\n\n '''\n Create directories specified in params\n '''\n save_dir = os.path.join(params['root'], params['root_save'])\n\n make_dir(save_dir)\n make_dir(os.path.join(save_dir,params['image_lists']))\n make_dir(os.path.join(save_dir,params['feats_dir']))\n make_dir(os.path.join(save_dir,params['rankings_dir']))\n make_dir(os.path.join(save_dir,params['classification_dir']))\n make_dir(os.path.join(save_dir,params['codebooks_dir']))\n make_dir(os.path.join(save_dir,params['classifiers_dir']))\n make_dir(os.path.join(save_dir,params['kaggle_dir']))\n \n make_dir(os.path.join(save_dir,params['rankings_dir'],params['descriptor_type']))\n make_dir(os.path.join(save_dir,params['rankings_dir'],params['descriptor_type'],params['split']))\n make_dir(os.path.join(save_dir,params['classification_dir'],params['descriptor_type']))\n\nif __name__ == \"__main__\":\n\n\tparams = get_params()"
] | [
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
betaros/traffic_sign | [
"6f5ef4afb7093c929cc2e94c7f72daebbd149b7e"
] | [
"src/traffic_sign.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\n\nimport cv2\n\nimport roslib\nroslib.load_manifest('traffic_sign')\nimport rospy\n\nfrom sensor_msgs.msg import CompressedImage\nfrom std_msgs.msg import String\n\nclass image_feature:\n def __init__(self):\n self.counter = 1;\n self.raspi_subscriber = rospy.Subscriber(\"/raspicam_node/image/compressed\", CompressedImage, self.callback)\n rospy.loginfo(\"Subscribed to /raspicam_node/image/compressed\")\n\n self.detection_publisher = rospy.Publisher(\"/traffic_sign/detected\", String, queue_size=10)\n rospy.loginfo(\"Publishing /traffic_sign/detected\")\n\n self.image_publisher = rospy.Publisher(\"/traffic_sign/image/compressed\", CompressedImage, queue_size=10)\n rospy.loginfo(\"Publishing /traffic_sign/image/compressed\")\n\n def callback(self, ros_data):\n # rospy.loginfo(type(ros_data))\n \"\"\"\n Shows live images with marked detections\n \"\"\"\n if self.counter%10 != 0:\n self.counter = self.counter + 1\n else:\n self.counter = 1\n\n np_arr = np.fromstring(ros_data.data, np.uint8)\n # img = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)\n img = cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED) # OpenCV >= 3.0:\n center = (205, 154)\n\n #if not img is None:\n # rospy.logwarn(\"No image received\")\n # return\n\n # img = cv2.resize(img, (960, 540))\n M = cv2.getRotationMatrix2D(center, 180, 1.0)\n img = cv2.warpAffine(img, M, (410, 308))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n biggest_value = 0\n found_msg = \"nothing\"\n\n #face_cascade = cv2.CascadeClassifier('/home/user/catkin_ws/src/traffic_sign/cascades/haarcascade_frontalface_default.xml')\n #faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n #for (x, y, w, h) in faces:\n # cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # if biggest_value < x*y:\n # found_msg = \"face\"\n # biggest_value = x * y\n # y = y - 5\n # self.write_text_on_image(img, \"Faces\", x, y)\n\n # No parking\n no_parking_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/cascade_no_parking.xml')\n no_parking = no_parking_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in no_parking:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 128), 2)\n if biggest_value < x * y:\n found_msg = \"no_parking\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"no parking\", x, y)\n\n # Entry forbidden\n entry_forbidden_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/cascade_entry_forbidden.xml')\n entry_forbidden = entry_forbidden_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in entry_forbidden:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n if biggest_value < x * y:\n found_msg = \"entry_forbidden\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"entry forbidden\", x, y)\n\n # Bus stop\n bus_stop_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/cascade_bus_stop.xml')\n bus_stop = bus_stop_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in bus_stop:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n if biggest_value < x * y:\n found_msg = \"bus_stop\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"bus stop\", x, y)\n\n form_triangle_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/red_triangle/cascade.xml')\n form_triangle = form_triangle_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in form_triangle:\n # pedestrians\n pedestrians_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/pedestrians/cascade.xml')\n pedestrians = pedestrians_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in pedestrians:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 128, 0), 2)\n if biggest_value < x * y:\n found_msg = \"pedestrians\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"pedestrians\", x, y)\n\n # turn right\n turn_right_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/turn_right/cascade.xml')\n turn_right = turn_right_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in turn_right:\n cv2.rectangle(img, (x, y), (x + w, y + h), (128, 255, 0), 2)\n if biggest_value < x * y:\n found_msg = \"turn_right\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"turn right\", x, y)\n\n # turn left\n turn_left_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/turn_left/cascade.xml')\n turn_left = turn_left_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in turn_left:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 128, 0), 2)\n if biggest_value < x * y:\n found_msg = \"turn_left\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"turn left\", x, y)\n\n # warning\n warning_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/warning/cascade.xml')\n warning = warning_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in warning:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 128, 128), 2)\n if biggest_value < x * y:\n found_msg = \"warning\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"warning\", x, y)\n\n # crossing\n crossing_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/cross/cascade.xml')\n crossing = crossing_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in crossing:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n if biggest_value < x * y:\n found_msg = \"entry_crossing\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"crossing\", x, y)\n\n # slippery\n slippery_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/slippery/cascade.xml')\n slippery = slippery_cascade.detectMultiScale(gray, 1.3, 5)\n for (xa, ya, wa, ha) in slippery:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n if biggest_value < x * y:\n found_msg = \"entry_slippery\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"slippery\", x, y)\n\n # main road\n main_road_cascade = cv2.CascadeClassifier(\n '/home/user/catkin_ws/src/traffic_sign/cascades/cascade_main_road.xml')\n main_road = main_road_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in main_road:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)\n if biggest_value < x * y:\n found_msg = \"main_road\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"main road\", x, y)\n\n # road closed\n road_closed_cascade = cv2.CascadeClassifier('/home/user/catkin_ws/src/traffic_sign/cascades/cascade_road_closed.xml')\n road_closed = road_closed_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in road_closed:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n if biggest_value < x * y:\n found_msg = \"entry_road_closed\"\n biggest_value = x * y\n y = y - 5\n self.write_text_on_image(img, \"road closed\", x, y)\n\n rospy.loginfo(found_msg)\n \n #### Create CompressedIamge ####\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', img)[1]).tostring()\n\n # Publish new image\n self.image_publisher.publish(msg)\n self.detection_publisher.publish(found_msg)\n #random_number = str(random.randint(1,101))\n #self.detection_publisher.publish(random_number)\n\n def write_text_on_image(self, img, message, x, y):\n \"\"\"\n Writes text above the recognized field\n\n :param img:\n :param message:\n :return:\n \"\"\"\n\n bottom_left_corner_of_text=(x,y)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 1\n font_color = (0, 255, 0)\n line_type = 2\n\n cv2.putText(img, message,\n bottom_left_corner_of_text,\n font,\n font_scale,\n font_color,\n line_type)\n\nif __name__ == '__main__':\n image = image_feature()\n rospy.init_node('traffic_sign', log_level=rospy.DEBUG)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n rospy.loginfo(\"Shutting down traffic sign node\")\n cv2.destroyAllWindows()\n"
] | [
[
"numpy.fromstring"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yongpi-scu/TPRNet | [
"bc97169ebe4d123a64da6b0fdc787ecb89c7372f"
] | [
"utils/metrics.py"
] | [
"import numpy as np\r\ndef get_confusion_matrix(output,target):\r\n confusion_matrix = np.zeros((output[0].shape[0],output[0].shape[0]))\r\n for i in range(len(output)):\r\n true_idx = target[i]\r\n pred_idx = np.argmax(output[i])\r\n confusion_matrix[true_idx][pred_idx] += 1.0\r\n return confusion_matrix\r\n\r\ndef get_confusion_matrix_logits(output,target):\r\n confusion_matrix = np.zeros((2,2))\r\n for i in range(len(output)):\r\n true_idx = target[i]\r\n pred_idx = 1 if output[i]>0.5 else 0\r\n confusion_matrix[true_idx][pred_idx] += 1.0\r\n return confusion_matrix\r\n"
] | [
[
"numpy.argmax",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ninastijepovic/MasterThesis | [
"2579f1e74c0ce404f350a6d441e273b6aef4eadc"
] | [
"train_unet.py"
] | [
"# import the necessary packages\nimport os\nimport argparse\nimport random\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import image, pyplot as plt\nfrom random import sample,randint\nmatplotlib.use(\"Agg\")\n\nfrom preprocessor import preprocessor\nfrom tqdm import tqdm_notebook, tnrange\nfrom itertools import chain\nfrom sklearn.metrics import roc_curve, auc \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom unet import get_unet \n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.optimizers import Adam, SGD, RMSprop\nfrom tensorflow.python.keras.layers import Input, Activation, Reshape, Dropout, Flatten, Conv2D, MaxPooling2D, Dense, BatchNormalization, GlobalAveragePooling2D\nimport wandb\nfrom wandb.keras import WandbCallback\n\n# initialize the number of epochs to train for, initial learning rate,\n# batch size, and image dimensions\n\n# set parameters\ndefaults=dict(\n learn_rate = 0.001,\n batch_size = 256,\n epochs = 100,\n )\nwandb.init(project=\"master_thesis\", config=defaults, name=\"unet_mask4_100samples\")\nconfig = wandb.config\n\n#load data\nf = open(\"/var/scratch/nsc400/hera_data/HERA_masks29-07-2020.pkl\",\"rb\")\ndataset = pickle.load(f,encoding='latin1')\ndata = dataset[0]\nlabels = dataset[2]\nmask1 = dataset[4]\nmask2 = dataset[5]\nmask3 = dataset[6]\nmask4 = dataset[7]\n\n\nd_height = data.shape[1]\nd_width = data.shape[2]\n\n# partition the data into training and testing splits using 80% of\n# the data for training and the remaining 20% for testing\n\ntrainX, testX, trainY, testY = train_test_split(data,\n mask4, train_size=0.004, random_state=42)\n\n# initialize the model using a sigmoid activation as the final layer\n\nprint(\"[INFO] compiling model...\")\ninput_data = Input((d_height, d_width, 1), name='data')\nmodel = get_unet(input_data, n_filters=16, dropout=0.05, batchnorm=True)\n\n# initialize the optimizer\nopt = Adam(lr=config.learn_rate,decay = config.learn_rate/config.epochs)\n#decay = config.learn_rate/config.epochs\n#opt = SGD(lr=config.learn_rate)\n#opt = RMSprop(lr=config.learn_rate)\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n#print(\"[INFO] summary of model...\")\n#print(model.summary())\n\ncallbacks = [\n WandbCallback(),\n EarlyStopping(patience=50, verbose=1, monitor='val_loss'),\n ReduceLROnPlateau(factor=0.1, patience=30, verbose=1),\n ModelCheckpoint('model-unet-mask1-100.h5', verbose=1, save_best_only=True,save_weights_only=False)\n]\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, batch_size=config.batch_size,\n validation_data=(testX, testY), epochs=config.epochs, verbose=1, callbacks=callbacks)\n\n# log the number of total parameters\nconfig.total_params = model.count_params()\nprint(\"Total params: \", config.total_params)\n\n# save the model to disk\nprint(\"[INFO] serializing network...\")\nmodel.save(\"model_unet_mask4_100\")\n\n#save model\nwandb.save('model_unet_rfi_impulse.h5')\n\n# Predict on train, val and test\npreds_train = model.predict(trainX, verbose=1)\npreds_val = model.predict(testX, verbose=1)\n\npreds_train_t = (preds_train > 0.5).astype(np.uint8)\npreds_val_t = (preds_val > 0.5).astype(np.uint8)\n\n#cf = ClassificationReport(ix)\n#cf_mean = cf.generate(trainY, preds_train_t)\n#print(\"Classification report mean : {}\".format(cf_mean))\n#classification report\n#print(classification_report(testY, preds_val))\n\nprint('Classification report:\\n', classification_report(testY.flatten(), preds_val_t.flatten()))\n\ndef plot_io(model,data,mask):\n\n mask = mask \n output = model.predict(data)\n binaryp = (output >0.04).astype(np.uint8)\n print(model.evaluate(data, mask, verbose=1))\n it = 1\n if isinstance(data,list):\n it = 2\n shape = output[0].shape[0]\n else:\n shape = output.shape[0]\n\n for i in range(it):\n fig,axs = plt.subplots(3,2,figsize=(10,10))\n\n if isinstance(data,list):\n inp = data[i]\n msk = mask[i]\n outp = output[i]\n bp = binaryp[i]\n else:\n inp = data\n msk = mask\n outp = output\n bp = binaryp\n\n for j in range(2):\n r = randint(0,shape-1)\n has_mask = msk[r,...,0].max() > 0\n\n axs[0,j].imshow(inp[r,...,0]);\n #if has_mask:\n #axs[0,j].contour(msk[r,...,0].squeeze(), levels=[0.1])\n axs[0,j].set_title(f' {labels[r]}',fontsize=10)\n\n axs[1,j].imshow(msk[r,...,0].squeeze(), vmin=0, vmax=1);\n axs[1,j].title.set_text('Mask {}'.format(r))\n\n #axs[2,j].imshow(outp[r,...,0]);\n #if has_mask:\n #axs[2,j].contour(msk[r,...,0].squeeze(),levels=[0.1])\n #axs[2,j].title.set_text('Mask Predicted{}'.format(r))\n\n axs[2,j].imshow(bp[r,...,0].squeeze(), vmin=0, vmax=1);\n if has_mask:\n axs[2,j].contour(msk[r,...,0].squeeze(),levels=[0.09])\n axs[2,j].title.set_text('Mask Binary Predicted{}'.format(r))\n\n\n return plt\n\n\nwandb.log({'Analysis':plot_io(model,testX,testY)})\n\n\nrealm=testY.ravel()\npredicted=preds_val.ravel()\nfpr, tpr, _ = roc_curve(realm, predicted)\nroc_auc = auc(fpr,tpr)\n\nfig, ax = plt.subplots(1,1)\nax.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\nax.plot([0, 1], [0, 1], 'k--')\nax.set_xlim([0.0, 1.0])\nax.set_ylim([0.0, 1.05])\nax.set_xlabel('False Positive Rate')\nax.set_ylabel('True Positive Rate')\nax.legend(loc=\"lower right\")\nplt.grid()\nplt.savefig('rocunet_mask4_100.png')\nwandb.Image(plt)\nwandb.log({\"ROC\": plt})\n"
] | [
[
"tensorflow.python.keras.callbacks.EarlyStopping",
"matplotlib.use",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"tensorflow.python.keras.callbacks.ReduceLROnPlateau",
"matplotlib.pyplot.grid",
"tensorflow.python.keras.optimizers.Adam",
"sklearn.metrics.auc",
"tensorflow.python.keras.callbacks.ModelCheckpoint",
"tensorflow.python.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"2.3",
"1.5",
"1.7",
"2.2"
]
}
] |
philippgualdi/PyQMRI | [
"5de3a7da5feb2d01b746acd47d1dba91a8a1417e",
"5de3a7da5feb2d01b746acd47d1dba91a8a1417e"
] | [
"test/unittests/test_symmetrized_gradient_double.py",
"pyqmri/models/ImageReco.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 12 11:26:41 2019\n\n@author: omaier\n\"\"\"\n\nimport pyqmri\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\nfrom pyqmri._helper_fun import CLProgram as Program\nfrom pkg_resources import resource_filename\nimport pyopencl.array as clarray\nimport numpy as np\n\n\nDTYPE = np.complex128\nDTYPE_real = np.float64\nATOL=1e-14\nRTOL=1e-12\n\nclass tmpArgs():\n pass\n\n\ndef setupPar(par):\n par[\"NScan\"] = 10\n par[\"NC\"] = 15\n par[\"NSlice\"] = 10\n par[\"dimX\"] = 128\n par[\"dimY\"] = 128\n par[\"Nproj\"] = 21\n par[\"N\"] = 256\n par[\"unknowns_TGV\"] = 2\n par[\"unknowns_H1\"] = 0\n par[\"unknowns\"] = 2\n par[\"dz\"] = 1\n par[\"weights\"] = np.array([1, 0.1])\n\n\nclass SymmetrizedGradientTest(unittest.TestCase):\n def setUp(self):\n parser = tmpArgs()\n parser.streamed = False\n parser.devices = -1\n parser.use_GPU = True\n\n par = {}\n pyqmri.pyqmri._setupOCL(parser, par)\n setupPar(par)\n if DTYPE == np.complex128:\n file = open(\n resource_filename(\n 'pyqmri', 'kernels/OpenCL_Kernels_double.c'))\n else:\n file = open(\n resource_filename(\n 'pyqmri', 'kernels/OpenCL_Kernels.c'))\n prg = Program(\n par[\"ctx\"][0],\n file.read())\n file.close()\n\n self.weights = par[\"weights\"]\n\n self.symgrad = pyqmri.operator.OperatorFiniteSymGradient(\n par, prg,\n DTYPE=DTYPE,\n DTYPE_real=DTYPE_real)\n\n self.symgradin = np.random.randn(par[\"unknowns\"], par[\"NSlice\"],\n par[\"dimY\"], par[\"dimX\"], 4) +\\\n 1j * np.random.randn(par[\"unknowns\"], par[\"NSlice\"],\n par[\"dimY\"], par[\"dimX\"], 4)\n self.symdivin = np.random.randn(par[\"unknowns\"], par[\"NSlice\"],\n par[\"dimY\"], par[\"dimX\"], 8) +\\\n 1j * np.random.randn(par[\"unknowns\"], par[\"NSlice\"],\n par[\"dimY\"], par[\"dimX\"], 8)\n self.symgradin = self.symgradin.astype(DTYPE)\n self.symdivin = self.symdivin.astype(DTYPE)\n self.dz = par[\"dz\"]\n self.queue = par[\"queue\"][0]\n\n def test_sym_grad_outofplace(self):\n gradx = np.zeros_like(self.symgradin)\n grady = np.zeros_like(self.symgradin)\n gradz = np.zeros_like(self.symgradin)\n\n gradx[..., 1:, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-2), axis=-2), axis=-2)\n grady[..., 1:, :, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-3), axis=-3), axis=-3)\n gradz[:, 1:, ...] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-4), axis=-4), axis=-4)\n\n symgrad = np.stack((gradx[..., 0],\n grady[..., 1],\n gradz[..., 2]*self.dz,\n 1/2 * (gradx[..., 1] + grady[..., 0]),\n 1/2 * (gradx[..., 2] + gradz[..., 0]*self.dz),\n 1/2 * (grady[..., 2] + gradz[..., 1]*self.dz)),\n axis=-1)\n symgrad *= self.weights[:, None, None, None, None]\n\n inp = clarray.to_device(self.queue, self.symgradin)\n outp = self.symgrad.fwdoop(inp)\n outp = outp.get()\n\n np.testing.assert_allclose(outp[..., :6], symgrad, rtol=RTOL, atol=ATOL)\n\n def test_sym_grad_inplace(self):\n gradx = np.zeros_like(self.symgradin)\n grady = np.zeros_like(self.symgradin)\n gradz = np.zeros_like(self.symgradin)\n\n gradx[..., 1:, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-2), axis=-2), axis=-2)\n grady[..., 1:, :, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-3), axis=-3), axis=-3)\n gradz[:, 1:, ...] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-4), axis=-4), axis=-4)\n\n symgrad = np.stack((gradx[..., 0],\n grady[..., 1],\n gradz[..., 2]*self.dz,\n 1/2 * (gradx[..., 1] + grady[..., 0]),\n 1/2 * (gradx[..., 2] + gradz[..., 0]*self.dz),\n 1/2 * (grady[..., 2] + gradz[..., 1]*self.dz)),\n axis=-1)\n symgrad *= self.weights[:, None, None, None, None]\n inp = clarray.to_device(self.queue, self.symgradin)\n outp = clarray.to_device(self.queue, self.symdivin)\n outp.add_event(self.symgrad.fwd(outp, inp))\n outp = outp.get()\n\n np.testing.assert_allclose(outp[..., :6], symgrad, rtol=RTOL, atol=ATOL)\n\n def test_adj_outofplace(self):\n inpgrad = clarray.to_device(self.queue, self.symgradin)\n inpdiv = clarray.to_device(self.queue, self.symdivin)\n\n outgrad = self.symgrad.fwdoop(inpgrad)\n outdiv = self.symgrad.adjoop(inpdiv)\n\n outgrad = outgrad.get()\n outdiv = outdiv.get()\n a1 = np.vdot(outgrad[..., :3].flatten(),\n self.symdivin[..., :3].flatten())/self.symgradin.size*4\n a2 = 2*np.vdot(outgrad[..., 3:6].flatten(),\n self.symdivin[..., 3:6].flatten())/self.symgradin.size*4\n a = a1+a2\n b = np.vdot(self.symgradin[..., :3].flatten(),\n -outdiv[..., :3].flatten())/self.symgradin.size*4\n\n print(\"Adjointness: %.2e +1j %.2e\" % ((a - b).real, (a - b).imag))\n\n np.testing.assert_allclose(a, b, rtol=RTOL, atol=ATOL)\n\n def test_adj_inplace(self):\n inpgrad = clarray.to_device(self.queue, self.symgradin)\n inpdiv = clarray.to_device(self.queue, self.symdivin)\n\n outgrad = clarray.zeros_like(inpdiv)\n outdiv = clarray.zeros_like(inpgrad)\n\n outgrad.add_event(self.symgrad.fwd(outgrad, inpgrad))\n outdiv.add_event(self.symgrad.adj(outdiv, inpdiv))\n\n outgrad = outgrad.get()\n outdiv = outdiv.get()\n\n a1 = np.vdot(outgrad[..., :3].flatten(),\n self.symdivin[..., :3].flatten())/self.symgradin.size*4\n a2 = 2*np.vdot(outgrad[..., 3:6].flatten(),\n self.symdivin[..., 3:6].flatten())/self.symgradin.size*4\n a = a1+a2\n b = np.vdot(self.symgradin[..., :3].flatten(),\n -outdiv[..., :3].flatten())/self.symgradin.size*4\n\n print(\"Adjointness: %.2e +1j %.2e\" % ((a - b).real, (a - b).imag))\n\n np.testing.assert_allclose(a, b, rtol=RTOL, atol=ATOL)\n\n\nclass SymmetrizedGradientStreamedTest(unittest.TestCase):\n def setUp(self):\n parser = tmpArgs()\n parser.streamed = True\n parser.devices = -1\n parser.use_GPU = True\n\n par = {}\n pyqmri.pyqmri._setupOCL(parser, par)\n setupPar(par)\n if DTYPE == np.complex128:\n file = resource_filename(\n 'pyqmri', 'kernels/OpenCL_Kernels_double_streamed.c')\n else:\n file = resource_filename(\n 'pyqmri', 'kernels/OpenCL_Kernels_streamed.c')\n\n prg = []\n for j in range(len(par[\"ctx\"])):\n with open(file) as myfile:\n prg.append(Program(\n par[\"ctx\"][j],\n myfile.read()))\n\n par[\"par_slices\"] = 1\n\n self.weights = par[\"weights\"]\n\n self.symgrad = pyqmri.operator.OperatorFiniteSymGradientStreamed(\n par, prg,\n DTYPE=DTYPE,\n DTYPE_real=DTYPE_real)\n\n self.symgradin = np.random.randn(par[\"NSlice\"], par[\"unknowns\"],\n par[\"dimY\"], par[\"dimX\"], 4) +\\\n 1j * np.random.randn(par[\"NSlice\"], par[\"unknowns\"],\n par[\"dimY\"], par[\"dimX\"], 4)\n self.symdivin = np.random.randn(par[\"NSlice\"], par[\"unknowns\"],\n par[\"dimY\"], par[\"dimX\"], 8) +\\\n 1j * np.random.randn(par[\"NSlice\"], par[\"unknowns\"],\n par[\"dimY\"], par[\"dimX\"], 8)\n self.symgradin = self.symgradin.astype(DTYPE)\n self.symdivin = self.symdivin.astype(DTYPE)\n self.dz = par[\"dz\"]\n\n def test_grad_outofplace(self):\n gradx = np.zeros_like(self.symgradin)\n grady = np.zeros_like(self.symgradin)\n gradz = np.zeros_like(self.symgradin)\n\n gradx[..., 1:, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-2), axis=-2), axis=-2)\n grady[..., 1:, :, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-3), axis=-3), axis=-3)\n gradz[1:, ...] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=0), axis=0), axis=0)\n\n symgrad = np.stack((gradx[..., 0],\n grady[..., 1],\n gradz[..., 2]*self.dz,\n 1/2 * (gradx[..., 1] + grady[..., 0]),\n 1/2 * (gradx[..., 2] + gradz[..., 0]*self.dz),\n 1/2 * (grady[..., 2] + gradz[..., 1]*self.dz)),\n axis=-1)\n symgrad *= self.weights[None, :, None, None, None]\n outp = self.symgrad.fwdoop([[self.symgradin]])\n\n np.testing.assert_allclose(outp[..., :6], symgrad, rtol=RTOL, atol=ATOL)\n\n def test_grad_inplace(self):\n gradx = np.zeros_like(self.symgradin)\n grady = np.zeros_like(self.symgradin)\n gradz = np.zeros_like(self.symgradin)\n\n gradx[..., 1:, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-2), axis=-2), axis=-2)\n grady[..., 1:, :, :] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=-3), axis=-3), axis=-3)\n gradz[1:, ...] = -np.flip(\n np.diff(\n np.flip(self.symgradin, axis=0), axis=0), axis=0)\n\n symgrad = np.stack((gradx[..., 0],\n grady[..., 1],\n gradz[..., 2]*self.dz,\n 1/2 * (gradx[..., 1] + grady[..., 0]),\n 1/2 * (gradx[..., 2] + gradz[..., 0]*self.dz),\n 1/2 * (grady[..., 2] + gradz[..., 1]*self.dz)),\n axis=-1)\n symgrad *= self.weights[None, :, None, None, None]\n outp = np.zeros_like(self.symdivin)\n\n self.symgrad.fwd([outp], [[self.symgradin]])\n\n np.testing.assert_allclose(outp[..., :6], symgrad, rtol=RTOL, atol=ATOL)\n\n def test_adj_outofplace(self):\n\n outgrad = self.symgrad.fwdoop([[self.symgradin]])\n outdiv = self.symgrad.adjoop([[self.symdivin]])\n\n a1 = np.vdot(outgrad[..., :3].flatten(),\n self.symdivin[..., :3].flatten())/self.symgradin.size*4\n a2 = 2*np.vdot(outgrad[..., 3:6].flatten(),\n self.symdivin[..., 3:6].flatten())/self.symgradin.size*4\n a = a1+a2\n b = np.vdot(self.symgradin[..., :3].flatten(),\n -outdiv[..., :3].flatten())/self.symgradin.size*4\n\n print(\"Adjointness: %.2e +1j %.2e\" % ((a - b).real, (a - b).imag))\n\n np.testing.assert_allclose(a, b, rtol=RTOL, atol=ATOL)\n\n def test_adj_inplace(self):\n\n outgrad = np.zeros_like(self.symdivin)\n outdiv = np.zeros_like(self.symgradin)\n\n self.symgrad.fwd([outgrad], [[self.symgradin]])\n self.symgrad.adj([outdiv], [[self.symdivin]])\n\n a1 = np.vdot(outgrad[..., :3].flatten(),\n self.symdivin[..., :3].flatten())/self.symgradin.size*4\n a2 = 2*np.vdot(outgrad[..., 3:6].flatten(),\n self.symdivin[..., 3:6].flatten())/self.symgradin.size*4\n a = a1+a2\n b = np.vdot(self.symgradin[..., :3].flatten(),\n -outdiv[..., :3].flatten())/self.symgradin.size*4\n\n print(\"Adjointness: %.2e +1j %.2e\" % ((a - b).real, (a - b).imag))\n\n np.testing.assert_allclose(a, b, rtol=RTOL, atol=ATOL)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Module holding the simple image model for image reconstruction.\"\"\"\nimport numpy as np\nfrom pyqmri.models.template import BaseModel, constraints\n\n\nclass Model(BaseModel):\n \"\"\"Image reconstruction model for MRI.\n\n A simple linear image model to perform image reconstruction with\n joint regularization on all Scans.\n\n Parameters\n ----------\n par : dict\n A python dict containing the necessary information to\n setup the object. Needs to contain the sequence related parametrs,\n e.g. TR, TE, TI, to fully describe the acquisitio process\n Attributes\n ----------\n guess : numpy.array, None\n Initial guess for the images. Set after object creation using\n \"computeInitialGuess\"\n \"\"\"\n\n def __init__(self, par):\n super().__init__(par)\n\n par[\"unknowns_TGV\"] = self.NScan\n par[\"unknowns_H1\"] = 0\n par[\"unknowns\"] = par[\"unknowns_TGV\"]+par[\"unknowns_H1\"]\n\n for j in range(par[\"unknowns\"]):\n self.uk_scale.append(1)\n\n for j in range(par[\"unknowns\"]):\n self.constraints.append(\n constraints(-100 / self.uk_scale[j],\n 100 / self.uk_scale[j],\n False))\n \n self.guess = None\n\n def rescale(self, x):\n \"\"\"Rescale the unknowns with the scaling factors.\n\n Rescales each unknown with the corresponding scaling.\n\n Parameters\n ----------\n x : numpy.array\n The array of unknowns to be rescaled\n\n Returns\n -------\n numpy.array:\n The rescaled unknowns\n \"\"\"\n tmp_x = np.copy(x)\n uk_names = []\n for j in range(self.NScan):\n tmp_x[j] *= self.uk_scale[j]\n uk_names.append(\"Image_\"+str(j))\n\n const = []\n for constrained in self.constraints:\n const.append(constrained.real)\n\n return {\"data\": tmp_x,\n \"unknown_name\": uk_names,\n \"real_valued\": const}\n\n def _execute_forward_3D(self, x):\n S = np.zeros_like(x)\n for j in range(self.NScan):\n S[j, ...] = x[j, ...] * self.uk_scale[j]\n S[~np.isfinite(S)] = 1e-20\n return S\n\n def _execute_gradient_3D(self, x):\n grad_M0 = np.zeros(((self.NScan, )+x.shape), dtype=self._DTYPE)\n for j in range(self.NScan):\n grad_M0[j, j, ...] = self.uk_scale[j]*np.ones_like(x[j])\n grad_M0[~np.isfinite(grad_M0)] = 1e-20\n return grad_M0\n\n def computeInitialGuess(self, *args):\n \"\"\"Initialize unknown array for the fitting.\n\n This function provides an initial guess for the fitting.\n\n Parameters\n ----------\n args : list of objects\n Assumes the images series at position 0 and uses it as initial\n guess.\n \"\"\"\n self.guess = np.zeros_like((args[0]).astype(self._DTYPE))\n"
] | [
[
"numpy.stack",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.flip"
],
[
"numpy.ones_like",
"numpy.isfinite",
"numpy.copy",
"numpy.zeros_like",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
g-mitu/timeseries | [
"3b2daf33f9af022d1aae7c4a9caf69b8abd58348",
"3b2daf33f9af022d1aae7c4a9caf69b8abd58348"
] | [
"WritingNovel/show_matplotlib.py",
"doubleYline.py"
] | [
"import matplotlib.pyplot as plt\r\nimport numpy as np # 导入包\r\n\r\nt1 = np.arange(0.0, 4.0, 0.1)\r\nt2 = np.arange(0.0, 4.0, 0.05) # 准备一些数据\r\n\r\nfig = plt.figure() # 准备好这张纸,并把句柄传给fig\r\nax1 = fig.add_subplot(211) # 使用句柄fig添加一个子图\r\nline1, = plt.plot(t1, np.sin(2 * np.pi * t1), '--*') # 绘图,将句柄返给line1\r\nplt.title('sine function demo')\r\nplt.xlabel('time(s)')\r\nplt.ylabel('votage(mV)')\r\nplt.xlim([0.0, 5.0])\r\nplt.ylim([-1.2, 1.2])\r\nplt.grid('on')\r\n\r\nplt.setp(line1, lw=2, c='g') # 通过setp函数,设置句柄为line1的线的属性,c是color的简写\r\nline1.set_antialiased(False) # 通过line1句柄的set_*属性设置line1的属性\r\nplt.text(4, 0, '$\\mu=100,\\\\sigma=15$') # 添加text,注意,它能接受LaTeX哟!\r\n\r\nax2 = fig.add_subplot(212)\r\nplt.plot(t2, np.exp(-t2), ':r')\r\n\r\nplt.plot(t2, np.cos(2 * np.pi * t2), '--b')\r\n\r\nplt.xlabel('time')\r\nplt.ylabel('amplitude')\r\nplt.show()\r\n\r\n## sample 2\r\n\"\"\"\r\n==================\r\nggplot style sheet\r\n==================\r\n\r\nThis example demonstrates the \"ggplot\" style, which adjusts the style to\r\nemulate ggplot_ (a popular plotting package for R_).\r\n\r\nThese settings were shamelessly stolen from [1]_ (with permission).\r\n\r\n.. [1] http://www.huyng.com/posts/sane-color-scheme-for-matplotlib/\r\n\r\n.. _ggplot: http://ggplot2.org/\r\n.. _R: https://www.r-project.org/\r\n\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.style.use('ggplot')\r\n\r\nfig, axes = plt.subplots(ncols=2, nrows=2)\r\nax1, ax2, ax3, ax4 = axes.ravel()\r\n\r\n# scatter plot (Note: `plt.scatter` doesn't use default colors)\r\nx, y = np.random.normal(size=(2, 200))\r\nax1.plot(x, y, 'o')\r\n\r\n# sinusoidal lines with colors from default color cycle\r\nL = 2*np.pi\r\nx = np.linspace(0, L)\r\nncolors = len(plt.rcParams['axes.prop_cycle'])\r\nshift = np.linspace(0, L, ncolors, endpoint=False)\r\nfor s in shift:\r\n ax2.plot(x, np.sin(x + s), '-')\r\nax2.margins(0)\r\n\r\n# bar graphs\r\nx = np.arange(5)\r\ny1, y2 = np.random.randint(1, 25, size=(2, 5))\r\nwidth = 0.25\r\nax3.bar(x, y1, width)\r\nax3.bar(x + width, y2, width,\r\n color=list(plt.rcParams['axes.prop_cycle'])[2]['color'])\r\nax3.set_xticks(x + width)\r\nax3.set_xticklabels(['a', 'b', 'c', 'd', 'e'])\r\n\r\n# circles with colors from default color cycle\r\nfor i, color in enumerate(plt.rcParams['axes.prop_cycle']):\r\n xy = np.random.normal(size=2)\r\n ax4.add_patch(plt.Circle(xy, radius=0.3, color=color['color']))\r\nax4.axis('equal')\r\nax4.margins(0)\r\n\r\nplt.show()",
"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nts = pd.Series(\r\n np.random.randn(1000), index=pd.date_range(\"3/1/2022\", periods=1000)\r\n) # 月/日/年\r\n# ts = ts.cumsum()\r\ndf = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list(\"ABCD\"))\r\ndf = df.cumsum()\r\nprint(df)\r\n# 图1:其中A图用左Y轴标注,B图用右Y轴标注,二者共用一个X轴\r\ndf.A.plot() # 对A列作图,同理可对行做图\r\ndf.B.plot(secondary_y=True, style=\"g\") # 设置第二个y轴(右y轴)\r\n# 图2\r\nax = df.plot(\r\n secondary_y=[\"A\", \"B\"]\r\n) # 定义column A B使用右Y轴。ax(axes)可以理解为子图,也可以理解成对黑板进行切分,每一个板块就是一个axes\r\n# ax = df.plot(secondary_y=['A', 'B'], mark_right=False)#上一行默认图列会显示(right), mark_right=False即关闭显示\r\nax.set_ylabel(\"CD scale\")\r\nax.right_ax.set_ylabel(\"AB scale\")\r\nax.legend(loc=\"upper left\") # 设置图例的位置\r\nax.right_ax.legend(loc=\"upper right\")\r\n# ax.legend(loc='1')\r\n# plt.legend(loc='2')zhem\r\n# 展示\r\nplt.show()\r\n"
] | [
[
"numpy.linspace",
"numpy.exp",
"numpy.random.randint",
"numpy.arange",
"numpy.sin",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.text",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots",
"numpy.cos",
"matplotlib.pyplot.xlim",
"numpy.random.normal",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
],
[
"matplotlib.pyplot.show",
"numpy.random.randn",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
muntashir/movie-review-sentiment-classifier | [
"f850f4902186b4ac6bc62126fa333d8e9975a759"
] | [
"data.py"
] | [
"import os\r\nimport random\r\nimport json\r\nimport torch\r\nimport numpy as np\r\n\r\nVOCAB_SIZE = 89528\r\nMAX_LENGTH = 150\r\n\r\nclass Dataset:\r\n\r\n def __init__(self, data_dir):\r\n test_percent = 0.1\r\n validation_percent = 0.1\r\n\r\n # Index for minibatches\r\n self.data_index = {'train': 0, 'validation': 0, 'test': 0}\r\n self.epoch_count = 0\r\n\r\n dataset_filepath = os.path.join(data_dir, 'dataset.json')\r\n if os.path.isfile(dataset_filepath):\r\n print('Loading data split from cache')\r\n\r\n with open(dataset_filepath) as dataset_file:\r\n self.dataset = json.load(dataset_file)\r\n else:\r\n print('Generating data split')\r\n data_and_labels = []\r\n\r\n for folder, _, filenames in os.walk(data_dir):\r\n for filename in filenames:\r\n if data_dir == folder:\r\n continue\r\n\r\n label = folder.split(os.sep)[-1]\r\n full_path = os.path.join(folder, filename)\r\n data_and_label = {}\r\n data_and_label['path'] = full_path\r\n data_and_label['label'] = label\r\n data_and_labels.append(data_and_label)\r\n\r\n random.shuffle(data_and_labels)\r\n\r\n test_slice = int(len(data_and_labels) * test_percent)\r\n validation_slice = -int(len(data_and_labels) * validation_percent)\r\n\r\n self.dataset = {}\r\n self.dataset['test'] = data_and_labels[:test_slice]\r\n self.dataset['train'] = data_and_labels[test_slice:validation_slice]\r\n self.dataset['validation'] = data_and_labels[validation_slice:]\r\n\r\n with open(dataset_filepath, 'w') as dataset_file:\r\n json.dump(self.dataset, dataset_file)\r\n\r\n vocab_filepath = os.path.join(data_dir, 'imdb.vocab')\r\n if not os.path.isfile(vocab_filepath):\r\n print('vocab.txt file missing in dataset/')\r\n else:\r\n with open(vocab_filepath, 'r') as vocab_file:\r\n self.word_to_index = [line.rstrip('\\n') for line in vocab_file]\r\n\r\n def __load_text_as_vectors(self, batch):\r\n batch_size = len(batch)\r\n vectors_and_labels = []\r\n time_steps = 0\r\n\r\n for data in batch:\r\n vectors_and_label = {}\r\n label = np.zeros(2)\r\n if (data['label'] == 'pos'):\r\n label[0] = 1\r\n elif (data['label'] == 'neg'):\r\n label[1] = 1\r\n vectors_and_label['label'] = label\r\n vectors_and_label['vectors'] = []\r\n\r\n filepath = data['path']\r\n with open(filepath, 'r') as f:\r\n words = f.read() \\\r\n .replace('<br />', ' ') \\\r\n .replace('(', '') \\\r\n .replace(')', '') \\\r\n .replace('--', '') \\\r\n .replace('.', ' ') \\\r\n .replace('\"', ' ') \\\r\n .replace('\\'', ' ') \\\r\n .replace('!', '') \\\r\n .replace('?', '') \\\r\n .replace('_', '') \\\r\n .replace('/', '') \\\r\n .replace(',', '') \\\r\n .replace(':', '') \\\r\n .replace(';', '') \\\r\n .replace('*', '') \\\r\n .replace('`', '') \\\r\n .replace('&', '') \\\r\n .replace('\\\\', '') \\\r\n .split(' ')\r\n words = list(filter(None, words))\r\n words = list(filter(lambda x: x != '-', words))\r\n\r\n for word in words:\r\n word = word.lower()\r\n try:\r\n index = self.word_to_index.index(word)\r\n except ValueError:\r\n if __name__ == '__main__':\r\n print('Unknown word: ' + word)\r\n index = self.word_to_index.index('UNKNOWN_WORD_TOKEN')\r\n word_vector = np.zeros(VOCAB_SIZE)\r\n word_vector[index] = 1\r\n vectors_and_label['vectors'].append(word_vector)\r\n\r\n time_steps = np.max([len(vectors_and_label['vectors']), time_steps])\r\n vectors_and_labels.append(vectors_and_label)\r\n\r\n batch_matrix = torch.zeros(batch_size, int(np.min([time_steps, MAX_LENGTH])), VOCAB_SIZE)\r\n label_matrix = torch.zeros(batch_size, 2).type(torch.LongTensor)\r\n\r\n for batch_number, vectors_and_label in enumerate(vectors_and_labels):\r\n vectors = vectors_and_label['vectors']\r\n # Pad vectors to max length in batch and limit to MAX_LENGTH\r\n vectors += [np.zeros(VOCAB_SIZE)] * (time_steps - len(vectors))\r\n if time_steps > MAX_LENGTH:\r\n vectors = vectors[:MAX_LENGTH]\r\n\r\n label = vectors_and_label['label']\r\n label_matrix[batch_number, :] = torch.from_numpy(label).type(torch.LongTensor)\r\n\r\n for time_step, vector in enumerate(vectors):\r\n batch_matrix[batch_number, time_step, :] = torch.from_numpy(vector)\r\n\r\n return batch_matrix, label_matrix\r\n\r\n def get_next_minibatch(self, dataset_split, batch_size):\r\n epoch_end = False\r\n\r\n if self.data_index[dataset_split] == 0:\r\n random.shuffle(self.dataset[dataset_split])\r\n if dataset_split == 'train':\r\n self.epoch_count += 1\r\n print('\\nEpoch %i' % self.epoch_count)\r\n\r\n start_pos = self.data_index[dataset_split]\r\n end_pos = start_pos + batch_size\r\n\r\n if end_pos >= len(self.dataset[dataset_split]):\r\n end_pos = len(self.dataset[dataset_split])\r\n self.data_index[dataset_split] = 0\r\n epoch_end = True\r\n else:\r\n self.data_index[dataset_split] += batch_size\r\n\r\n minibatch = self.dataset[dataset_split][start_pos:end_pos]\r\n return self.__load_text_as_vectors(minibatch), epoch_end\r\n\r\n\r\ndef test():\r\n dataset = Dataset('dataset')\r\n assert(len(dataset.word_to_index) == VOCAB_SIZE)\r\n\r\n minibatch = dataset.get_next_minibatch('train', 3)\r\n assert(minibatch[0][0].size()[0] == 3)\r\n assert(minibatch[0][0].size()[2] == VOCAB_SIZE)\r\n assert(minibatch[0][1].size()[0] == 3)\r\n assert(minibatch[0][1].size()[1] == 2)\r\n\r\nif __name__ == '__main__':\r\n test()\r\n"
] | [
[
"torch.zeros",
"numpy.zeros",
"torch.from_numpy",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rmothukuru/probability | [
"24352279e5e255e054bfe9c7bdc7080ecb280fba",
"24352279e5e255e054bfe9c7bdc7080ecb280fba",
"24352279e5e255e054bfe9c7bdc7080ecb280fba"
] | [
"tensorflow_probability/python/bijectors/reciprocal.py",
"tensorflow_probability/python/bijectors/cholesky_to_inv_cholesky.py",
"tensorflow_probability/python/internal/prefer_static.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"A `Bijector` that computes `b(x) = 1. / x`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import auto_composite_tensor\nfrom tensorflow_probability.python.internal import dtype_util\n\n__all__ = ['Reciprocal']\n\n\n@auto_composite_tensor.auto_composite_tensor(omit_kwargs=('name',))\nclass Reciprocal(bijector.AutoCompositeTensorBijector):\n \"\"\"A `Bijector` that computes the reciprocal `b(x) = 1. / x` entrywise.\n\n This bijector accepts any non-zero values for both `forward` and `inverse`.\n\n #### Examples\n\n ```python\n bijector.Reciprocal().forward(x=[[1., 2.], [4., 5.]])\n # Result: [[1., .5], [.25, .2]], i.e., 1 / x\n\n bijector.Reciprocal().forward(x=[[0., 2.], [4., 5.]])\n # Result: AssertionError, doesn't accept zero.\n\n bijector.Square().inverse(y=[[1., 2.], [4., 5.]])\n # Result: [[1., .5], [.25, .2]], i.e. 1 / x\n\n ```\n \"\"\"\n\n _type_spec_id = 366918664\n\n def __init__(self, validate_args=False, name='reciprocal'):\n \"\"\"Instantiates the `Reciprocal`.\n\n Args:\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n super(Reciprocal, self).__init__(\n forward_min_event_ndims=0,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _is_increasing(cls):\n return False\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict()\n\n def _forward(self, x):\n with tf.control_dependencies(self._assertions(x)):\n return 1. / x\n\n _inverse = _forward\n\n def _forward_log_det_jacobian(self, x):\n with tf.control_dependencies(self._assertions(x)):\n return -2. * tf.math.log(tf.math.abs(x))\n\n _inverse_log_det_jacobian = _forward_log_det_jacobian\n\n def _assertions(self, t):\n if not self.validate_args:\n return []\n return [assert_util.assert_none_equal(\n t, dtype_util.as_numpy_dtype(t.dtype)(0.),\n message='All elements must be non-zero.')]\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"CholeskyToInvCholesky bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.bijectors.cholesky_outer_product import CholeskyOuterProduct\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import auto_composite_tensor\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\n\n\n__all__ = [\n 'CholeskyToInvCholesky',\n]\n\n\n@auto_composite_tensor.auto_composite_tensor(omit_kwargs=('name',))\nclass CholeskyToInvCholesky(bijector.AutoCompositeTensorBijector):\n \"\"\"Maps the Cholesky factor of `M` to the Cholesky factor of `M^{-1}`.\n\n The `forward` and `inverse` calculations are conceptually identical to:\n\n ```python\n def forward(x):\n return tf.linalg.cholesky(tf.linalg.inv(tf.matmul(x, x, adjoint_b=True)))\n\n inverse = forward\n ```\n\n or, similarly,\n\n ```python\n tfb = tfp.bijectors\n CholeskyToInvCholesky = tfb.Chain([\n tfb.Invert(tfb.CholeskyOuterProduct()),\n tfb.MatrixInverse(),\n tfb.CholeskyOuterProduct(),\n ])\n ```\n\n However, the actual calculations exploit the triangular structure of the\n matrices.\n \"\"\"\n\n _type_spec_id = 366918637\n\n def __init__(self, validate_args=False, name='cholesky_to_inv_cholesky'):\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n self._cholesky = CholeskyOuterProduct()\n super(CholeskyToInvCholesky, self).__init__(\n forward_min_event_ndims=2,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict()\n\n def _forward(self, x):\n with tf.control_dependencies(self._assertions(x)):\n x_shape = ps.shape(x)\n identity_matrix = tf.eye(\n x_shape[-1],\n batch_shape=x_shape[:-2],\n dtype=dtype_util.base_dtype(x.dtype))\n # Note `matrix_triangular_solve` implicitly zeros upper triangular of `x`.\n y = tf.linalg.triangular_solve(x, identity_matrix)\n y = tf.matmul(y, y, adjoint_a=True)\n return tf.linalg.cholesky(y)\n\n _inverse = _forward\n\n def _forward_log_det_jacobian(self, x):\n # CholeskyToInvCholesky.forward(X) is equivalent to\n # 1) M = CholeskyOuterProduct.forward(X)\n # 2) N = invert(M)\n # 3) Y = CholeskyOuterProduct.inverse(N)\n #\n # For step 1,\n # |Jac(outerprod(X))| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.\n # For step 2,\n # |Jac(inverse(M))| = |M|^{-(p+1)} (because M is symmetric)\n # = |X|^{-2(p+1)} = (prod_{j=0}^{p-1} X[j,j])^{-2(p+1)}\n # (see http://web.mit.edu/18.325/www/handouts/handout2.pdf sect 3.0.2)\n # For step 3,\n # |Jac(Cholesky(N))| = -|Jac(outerprod(Y)|\n # = 2^p prod_{j=0}^{p-1} Y[j,j]^{p-j}\n n = ps.cast(ps.shape(x)[-1], x.dtype)\n y = self._forward(x)\n return (\n (self._cholesky.forward_log_det_jacobian(x, event_ndims=2) -\n (n + 1.) * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(x)), axis=-1))\n -\n (self._cholesky.forward_log_det_jacobian(y, event_ndims=2) -\n (n + 1.) * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(y)), axis=-1))\n )\n\n _inverse_log_det_jacobian = _forward_log_det_jacobian\n\n def _assertions(self, x):\n if not self.validate_args:\n return []\n x_shape = tf.shape(x)\n is_matrix = assert_util.assert_rank_at_least(\n x, 2, message='Input must have rank at least 2.')\n is_square = assert_util.assert_equal(\n x_shape[-2], x_shape[-1], message='Input must be a square matrix.')\n diag_part_x = tf.linalg.diag_part(x)\n is_lower_triangular = assert_util.assert_equal(\n tf.linalg.band_part(x, 0, -1), # Preserves triu, zeros rest.\n tf.linalg.diag(diag_part_x),\n message='Input must be lower triangular.')\n is_positive_diag = assert_util.assert_positive(\n diag_part_x, message='Input must have all positive diagonal entries.')\n return [is_matrix, is_square, is_lower_triangular, is_positive_diag]\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Operations that use static values when possible.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport decorator\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal.backend import numpy as nptf\n\n# Try catch required to avoid breaking Probability opensource presubmits.\n# TODO(amitpatankar): Remove this once tf-nightly has latest code.\n# pylint: disable=g-import-not-at-top\ntry:\n from tensorflow.python.client import pywrap_tf_session as c_api # pylint: disable=g-direct-tensorflow-import\nexcept ImportError:\n from tensorflow.python import pywrap_tensorflow as c_api # pylint: disable=g-direct-tensorflow-import\n\nfrom tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops import control_flow_ops # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import\n\nJAX_MODE = False\n\n# Enable converting TF TensorShape and Dimension into np.array. This allows TF\n# code to pass TensorShapes into prefer_static functions. We can also re-use the\n# nptf methods.\nnptf.register_tensor_conversion_function(\n tf1.Dimension, nptf.ops._convert_dimension_to_tensor) # pylint: disable=protected-access\nnptf.register_tensor_conversion_function(\n tf.TensorShape, nptf.ops._convert_tensorshape_to_tensor) # pylint: disable=protected-access\n\n\ndef _prefer_static(original_fn, static_fn):\n \"\"\"Wraps original_fn, preferring to call static_fn when inputs are static.\"\"\"\n original_spec = tf_inspect.getfullargspec(original_fn)\n static_spec = tf_inspect.getfullargspec(static_fn)\n if original_spec != static_spec:\n raise ValueError(\n 'Arg specs do not match: original={}, static={}, fn={}'.format(\n original_spec, static_spec, original_fn))\n @decorator.decorator\n def wrap(wrapped_fn, *args, **kwargs):\n \"\"\"The actual wrapper.\"\"\"\n del wrapped_fn\n flat_args = tf.nest.flatten([args, kwargs])\n # N.B.: This `get_static_value` is nontrivial even in Eager mode, because\n # Keras's symbolic Tensors can exist when executing eagerly, and their\n # static values can be `None`.\n flat_args_ = [tf.get_static_value(a) for a in flat_args]\n all_static = all(arg is None or arg_ is not None\n for arg, arg_ in zip(flat_args, flat_args_))\n if all_static:\n [args_, kwargs_] = tf.nest.pack_sequence_as([args, kwargs], flat_args_)\n return static_fn(*args_, **kwargs_)\n return original_fn(*args, **kwargs)\n return wrap(original_fn)\n\n\ndef _copy_docstring(original_fn, new_fn):\n \"\"\"Wraps new_fn with the doc of original_fn.\"\"\"\n original_spec = tf_inspect.getfullargspec(original_fn)\n new_spec = tf_inspect.getfullargspec(new_fn)\n if original_spec != new_spec:\n raise ValueError(\n 'Arg specs do not match: original={}, new={}, fn={}'.format(\n original_spec, new_spec, original_fn))\n @decorator.decorator\n def wrap(wrapped_fn, *args, **kwargs):\n del wrapped_fn\n return new_fn(*args, **kwargs)\n return wrap(original_fn)\n\n\ndef _numpy_dtype(dtype):\n if dtype is None:\n return None\n return dtype_util.as_numpy_dtype(dtype)\n\n\ndef _get_static_value(pred):\n \"\"\"Helper function for getting static values from maybe-tensor objects.\"\"\"\n if JAX_MODE:\n try:\n return np.asarray(pred)\n except: # JAX sometimes raises raw Exception in __array__. # pylint: disable=bare-except\n return None\n if tf.is_tensor(pred):\n pred_value = tf.get_static_value(tf.convert_to_tensor(pred))\n\n # TODO(jamieas): remove the dependency on `pywrap_tensorflow`.\n # Explicitly check for ops.Tensor, to avoid an AttributeError\n # when requesting `KerasTensor.graph`.\n # pylint: disable=protected-access\n if pred_value is None and isinstance(pred, ops.Tensor):\n pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,\n pred._as_tf_output())\n # pylint: enable=protected-access\n return pred_value\n return pred\n\n\ndef _get_static_predicate(pred):\n \"\"\"Helper function for statically evaluating predicates in `cond`.\"\"\"\n pred_value = _get_static_value(pred)\n if pred_value in (0, 1, True, False): # Accept 1/0 as valid boolean values.\n # This branch also casts np.array(False), tf.EagerTensor(True), etc.\n pred_value = bool(pred_value)\n elif pred_value is not None:\n raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. '\n 'Found instead: {}'.format(pred))\n return pred_value\n\n\ndef _convert_to_shape_tensor_jax(value, dtype=None, dtype_hint=None, name=None): # pylint: disable=unused-argument\n \"\"\"Converts vectors and scalars of `int`-like to `ndarray`.\"\"\"\n dtype = dtype_util.as_numpy_dtype(dtype or dtype_hint or np.int32)\n try:\n return np.array([_convert_to_shape_tensor_jax(v, dtype) for v in value],\n dtype=dtype)\n except: # JAX throws raw Exception in some cases. # pylint: disable=bare-except\n pass\n return np.array(int(value), dtype=dtype)\n\n\ndef smart_where(condition, x_fn, y_fn):\n \"\"\"As tf.where, but only calls x_fn/y_fn when condition not statically known.\n\n IMPORTANT: Since this avoids executing the inoperative branch when possible,\n it will not necessarily broadcast `x_fn()` with `y_fn()`, so it is imperative\n that they return `Tensor`s which broadcast with `condition` to the same final\n shape.\n\n Args:\n condition: A `bool` Tensor.\n x_fn: A callable returning a `Tensor`, for locations where `condition` is\n `True`.\n y_fn: A callable returning a `Tensor`, for locations where `condition` is\n `False`.\n\n Returns:\n A `Tensor` equivalent to `tf.where(condition, x_fn(), y_fn())`.\n \"\"\"\n cond_static = _get_static_value(condition)\n if cond_static is not None:\n if np.size(cond_static) == 1 and cond_static in (0, 1, False, True):\n return x_fn() if bool(cond_static) else y_fn()\n elif isinstance(cond_static, (np.ndarray, np.generic)):\n if np.all(cond_static):\n x = x_fn()\n return tf.broadcast_to(\n x, tf.broadcast_dynamic_shape(tf.shape(x), tf.shape(condition)))\n elif not np.any(cond_static):\n y = y_fn()\n return tf.broadcast_to(\n y, tf.broadcast_dynamic_shape(tf.shape(y), tf.shape(condition)))\n return tf.where(condition, x_fn(), y_fn())\n\n\ndef rank_from_shape(shape_tensor_fn, tensorshape=None):\n \"\"\"Computes `rank` given a `Tensor`'s `shape`.\"\"\"\n # Note: this function will implicitly interpret scalar \"shapes\" as length-1\n # vectors.\n if tensorshape is None:\n shape_tensor = (shape_tensor_fn() if callable(shape_tensor_fn)\n else shape_tensor_fn)\n shape_tensor_ = tf.get_static_value(shape_tensor)\n if shape_tensor_ is not None:\n shape_tensor = np.int32(shape_tensor_)\n elif not hasattr(shape_tensor, 'shape'):\n shape_tensor = tf.convert_to_tensor(shape_tensor)\n ndims_ = tensorshape_util.num_elements(shape_tensor.shape)\n ndims_fn = lambda: tf.size(shape_tensor)\n else:\n ndims_ = tensorshape_util.rank(tensorshape)\n ndims_fn = lambda: tf.size( # pylint: disable=g-long-lambda\n shape_tensor_fn() if callable(shape_tensor_fn) else shape_tensor_fn)\n return ndims_fn() if ndims_ is None else np.int32(ndims_)\n\n\ndef broadcast_shape(x_shape, y_shape):\n \"\"\"Computes the shape of a broadcast.\n\n When both arguments are statically-known, the broadcasted shape will be\n computed statically and returned as a `TensorShape`. Otherwise, a rank-1\n `Tensor` will be returned.\n\n Args:\n x_shape: A `TensorShape` or rank-1 integer `Tensor`. The input `Tensor` is\n broadcast against this shape.\n y_shape: A `TensorShape` or rank-1 integer `Tensor`. The input `Tensor` is\n broadcast against this shape.\n\n Returns:\n shape: A `TensorShape` or rank-1 integer `Tensor` representing the\n broadcasted shape.\n \"\"\"\n x_shape_static = tf.get_static_value(x_shape)\n y_shape_static = tf.get_static_value(y_shape)\n if (x_shape_static is None) or (y_shape_static is None):\n return tf.broadcast_dynamic_shape(x_shape, y_shape)\n\n return tf.broadcast_static_shape(\n tf.TensorShape(x_shape_static), tf.TensorShape(y_shape_static))\n\n\ndef cond(pred, true_fn=None, false_fn=None, name=None):\n \"\"\"Return either `true_fn()` if predicate `pred` is true else `false_fn()`.\n\n If `pred` is a bool or has a constant value, we return either `true_fn()`\n or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.\n\n Args:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n name: Optional name prefix when using `tf.cond`.\n\n Returns:\n Tensors returned by the call to either `true_fn` or `false_fn`.\n\n Raises:\n TypeError: If `true_fn` or `false_fn` is not callable.\n \"\"\"\n if not callable(true_fn):\n raise TypeError('`true_fn` must be callable.')\n if not callable(false_fn):\n raise TypeError('`false_fn` must be callable.')\n\n pred_value = _get_static_predicate(pred)\n if pred_value is not None:\n if pred_value:\n return true_fn()\n else:\n return false_fn()\n else:\n return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn, name=name)\n\n\ndef case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):\n \"\"\"Like tf.case, except attempts to statically evaluate predicates.\n\n If any predicate in `pred_fn_pairs` is a bool or has a constant value, the\n associated callable will be called or omitted depending on its value.\n Otherwise this functions like tf.case.\n\n Args:\n pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a\n callable which returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/dictionary.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n \"\"\"\n if isinstance(pred_fn_pairs, (list, tuple)):\n # We don't expect much usage of the `dict` option, esp. with unhashable\n # Tensors, but could always add another branch for that if it comes up.\n def maybe_static(pred):\n p = _get_static_predicate(pred)\n if p is None:\n return pred\n return p\n pred_fn_pairs = [(maybe_static(pred), fn) for pred, fn in pred_fn_pairs]\n return control_flow_ops._case_helper( # pylint: disable=protected-access\n cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)\n\n\ndef size0(x, name=None):\n \"\"\"Returns the size of the first dimension (0 if scalar).\"\"\"\n with tf.name_scope(name or 'size0'):\n # First, ensure hasattr(x, 'shape').\n x_ = tf.get_static_value(x)\n if x_ is not None:\n x = np.array(x_)\n if not hasattr(x, 'shape'):\n x = tf.convert_to_tensor(x)\n # Next, try to read shape[0].\n ndims = tensorshape_util.rank(x.shape)\n if ndims is None or ndims == 0:\n n = ndims\n else:\n n = tf.compat.dimension_value(x.shape[0])\n if n is not None:\n return np.int32(n)\n return pad(shape(x)[:1], paddings=[[0, 1]], constant_values=0)[0]\n\n\ndef _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin\n s = _shape(input)\n s_ = tf.get_static_value(s)\n if s_ is not None:\n return np.ones(s_, dtype_util.as_numpy_dtype(dtype or input.dtype))\n return tf.ones(s, dtype or input.dtype, name)\nones_like = _copy_docstring(tf.ones_like, _ones_like)\n\n\ndef _rank(input, name=None): # pylint: disable=redefined-builtin,unused-argument\n if not hasattr(input, 'shape'):\n input = (tf.convert_to_tensor(input) if tf.get_static_value(input) is None\n else np.array(input))\n ndims_ = tensorshape_util.rank(getattr(input, 'shape', None))\n return tf.rank(input) if ndims_ is None else np.int32(ndims_)\nrank = _copy_docstring(\n tf.rank,\n _rank)\n\n\ndef _setdiff1d(a, b, aminusb=True, validate_indices=True):\n \"\"\"Compute set difference of elements in last dimension of `a` and `b`.\"\"\"\n if not aminusb:\n raise NotImplementedError(\n 'Argument `aminusb != True` is currently unimplemented.')\n if not validate_indices:\n raise NotImplementedError(\n 'Argument `validate_indices != True` is currently unimplemented.')\n with tf.name_scope('setdiff1d'):\n dtype = dtype_util.as_numpy_dtype(\n dtype_util.common_dtype([a, b], dtype_hint=tf.int32))\n a_ = tf.get_static_value(a)\n b_ = tf.get_static_value(b)\n if a_ is None or b_ is None:\n a = tf.convert_to_tensor(a, dtype=dtype, name='a')\n b = tf.convert_to_tensor(b, dtype=dtype, name='b')\n return tf.sparse.to_dense(tf.sets.difference(\n a[tf.newaxis], b[tf.newaxis]))[0]\n a_ = np.array(a_, dtype=dtype)\n b_ = np.array(b_, dtype=dtype)\n # TODO(https://github.com/google/jax/issues/70): Jax lacks setdiff1d\n return np.setdiff1d(a_, b_)\nsetdiff1d = _copy_docstring(\n tf.sets.difference,\n _setdiff1d)\n\n\ndef _size(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin\n if not hasattr(input, 'shape'):\n x = np.array(input)\n input = tf.convert_to_tensor(input) if x.dtype is np.object else x\n n = tensorshape_util.num_elements(tf.TensorShape(input.shape))\n if n is None:\n return tf.size(input, out_type=out_type, name=name)\n return np.array(n).astype(_numpy_dtype(out_type))\nsize = _copy_docstring(tf.size, _size)\n\n\ndef _shape(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin,missing-docstring\n if not hasattr(input, 'shape'):\n x = np.array(input)\n input = tf.convert_to_tensor(input) if x.dtype is np.object else x\n if tensorshape_util.is_fully_defined(input.shape):\n return np.array(tensorshape_util.as_list(input.shape)).astype(\n _numpy_dtype(out_type))\n # NOTE: tf.shape(x) can call `tf.convert_to_tensor(x)` **twice**, so we\n # pre-emptively convert-to-tensor.\n return tf.shape(tf.convert_to_tensor(input), out_type=out_type, name=name)\nshape = _copy_docstring(tf.shape, _shape)\n\n\ndef _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin\n s = _shape(input)\n s_ = tf.get_static_value(s)\n if s_ is not None:\n return np.zeros(s, _numpy_dtype(dtype or input.dtype))\n return tf.zeros(s, dtype or s.dtype, name)\nzeros_like = _copy_docstring(tf.zeros_like, _zeros_like)\n\n\ndef non_negative_axis(axis, rank, name=None): # pylint:disable=redefined-outer-name\n \"\"\"Make (possibly negatively indexed) `axis` argument non-negative.\"\"\"\n with tf.name_scope(name or 'non_negative_axis'):\n if axis is None:\n return None\n if rank is None:\n raise ValueError('Argument `rank` cannot be `None`.')\n dtype = dtype_util.as_numpy_dtype(\n dtype_util.common_dtype([axis, rank], dtype_hint=tf.int32))\n rank_ = tf.get_static_value(rank)\n axis_ = tf.get_static_value(axis)\n if rank_ is None or axis_ is None:\n axis = tf.convert_to_tensor(axis, dtype=dtype, name='axis')\n rank = tf.convert_to_tensor(rank, dtype=dtype, name='rank')\n return tf.where(axis < 0, rank + axis, axis)\n axis_ = np.array(axis_, dtype=dtype)\n rank_ = np.array(rank_, dtype=dtype)\n return np.where(axis_ < 0, axis_ + rank_, axis_)\n\n\ndef is_numpy(x):\n \"\"\"Returns true if `x` is a numpy object.\"\"\"\n return isinstance(x, (np.ndarray, np.generic))\n\n\n# The following functions only work in numpy if the inputs' *values are known\n# statically*. Often (e.g., above) we dont need static values, just static\n# properties.\nabs = _prefer_static(tf.abs, nptf.abs) # pylint: disable=redefined-builtin\nadd = _prefer_static(tf.add, nptf.add)\nargmax = _prefer_static(tf.math.argmax, nptf.math.argmax)\nargmin = _prefer_static(tf.math.argmin, nptf.math.argmin)\nargsort = _prefer_static(tf.argsort, nptf.argsort)\nbroadcast_to = _prefer_static(tf.broadcast_to, nptf.broadcast_to)\ncast = _prefer_static(tf.cast, nptf.cast)\nceil = _prefer_static(tf.math.ceil, nptf.math.ceil)\nconcat = _prefer_static(tf.concat, nptf.concat)\nconvert_to_shape_tensor = (\n _prefer_static(tf.convert_to_tensor, _convert_to_shape_tensor_jax)\n if JAX_MODE else tf.convert_to_tensor)\ncumprod = _prefer_static(tf.math.cumprod, nptf.math.cumprod)\ncumsum = _prefer_static(tf.math.cumsum, nptf.math.cumsum)\nequal = _prefer_static(tf.equal, nptf.equal)\nnot_equal = _prefer_static(tf.not_equal, nptf.not_equal)\nexpm1 = _prefer_static(tf.math.expm1, nptf.math.expm1)\nfloor = _prefer_static(tf.math.floor, nptf.math.floor)\nfill = _prefer_static(tf.fill, nptf.fill)\ngather = _prefer_static(tf.gather, nptf.gather)\ngreater = _prefer_static(tf.greater, nptf.greater)\nidentity = _prefer_static(tf.identity, nptf.identity)\ninvert_permutation = _prefer_static(\n tf.math.invert_permutation, nptf.invert_permutation)\nis_finite = _prefer_static(tf.math.is_finite, nptf.math.is_finite)\nis_inf = _prefer_static(tf.math.is_inf, nptf.math.is_inf)\nis_nan = _prefer_static(tf.math.is_nan, nptf.math.is_nan)\nless = _prefer_static(tf.less, nptf.less)\nlinspace = _prefer_static(tf.linspace, nptf.linspace)\nlog = _prefer_static(tf.math.log, nptf.math.log)\nlog1p = _prefer_static(tf.math.log1p, nptf.math.log1p)\nlogical_and = _prefer_static(tf.logical_and, nptf.logical_and)\nlogical_not = _prefer_static(tf.logical_not, nptf.logical_not)\nlogical_or = _prefer_static(tf.logical_or, nptf.logical_or)\nmaximum = _prefer_static(tf.maximum, nptf.maximum)\nminimum = _prefer_static(tf.minimum, nptf.minimum)\nnextafter = _prefer_static(tf.math.nextafter, nptf.math.nextafter)\none_hot = _prefer_static(tf.one_hot, nptf.one_hot)\nones = _prefer_static(tf.ones, nptf.ones)\npad = _prefer_static(tf.pad, nptf.pad)\npow = _prefer_static(tf.math.pow, nptf.pow) # pylint: disable=redefined-builtin\nrange = _prefer_static(tf.range, nptf.range) # pylint: disable=redefined-builtin\nreduce_all = _prefer_static(tf.reduce_all, nptf.reduce_all)\nreduce_any = _prefer_static(tf.reduce_any, nptf.reduce_any)\nreduce_max = _prefer_static(tf.reduce_max, nptf.reduce_max)\nreduce_min = _prefer_static(tf.reduce_min, nptf.reduce_min)\nreduce_prod = _prefer_static(tf.reduce_prod, nptf.reduce_prod)\nreduce_sum = _prefer_static(tf.reduce_sum, nptf.reduce_sum)\nreshape = _prefer_static(tf.reshape, nptf.reshape)\nreverse = _prefer_static(tf.reverse, nptf.reverse)\nround = _prefer_static(tf.math.round, nptf.math.round) # pylint: disable=redefined-builtin\nrsqrt = _prefer_static(tf.math.rsqrt, nptf.math.rsqrt)\nslice = _prefer_static(tf.slice, nptf.slice) # pylint: disable=redefined-builtin\nsort = _prefer_static(tf.sort, nptf.sort)\nsplit = _prefer_static(tf.split, nptf.split)\nsqrt = _prefer_static(tf.sqrt, nptf.sqrt)\nstack = _prefer_static(tf.stack, nptf.stack)\ntensor_scatter_nd_add = _prefer_static(\n tf.tensor_scatter_nd_add, nptf.tensor_scatter_nd_add)\ntensor_scatter_nd_sub = _prefer_static(\n tf.tensor_scatter_nd_sub, nptf.tensor_scatter_nd_sub)\ntensor_scatter_nd_update = _prefer_static(\n tf.tensor_scatter_nd_update, nptf.tensor_scatter_nd_update)\ntop_k = _prefer_static(tf.math.top_k, nptf.math.top_k)\nunique = _prefer_static(tf.unique, nptf.unique)\nunstack = _prefer_static(tf.unstack, nptf.unstack)\nwhere = _prefer_static(tf.where, nptf.where)\nzeros = _prefer_static(tf.zeros, nptf.zeros)\n"
] | [
[
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.math.abs"
],
[
"tensorflow.compat.v2.linalg.diag_part",
"tensorflow.compat.v2.linalg.diag",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.linalg.triangular_solve",
"tensorflow.compat.v2.linalg.cholesky",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.linalg.band_part"
],
[
"tensorflow.compat.v2.cond",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.compat.v2.rank",
"numpy.asarray",
"numpy.all",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.ones",
"numpy.any",
"tensorflow.compat.v2.TensorShape",
"numpy.where",
"tensorflow.compat.v2.is_tensor",
"tensorflow.compat.v2.sets.difference",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"numpy.size",
"tensorflow.compat.v2.zeros",
"tensorflow.python.ops.control_flow_ops._case_helper",
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"numpy.array",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.broadcast_dynamic_shape",
"numpy.int32",
"numpy.setdiff1d",
"tensorflow.compat.v2.compat.dimension_value"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SchetininVitaliy/AeroPy | [
"65a4c68fafcbd0bd04ee70ffa9d4a98302a45c6b",
"65a4c68fafcbd0bd04ee70ffa9d4a98302a45c6b"
] | [
"aeropy/filehandling/vtk.py",
"aeropy/CST_3D/meshing.py"
] | [
"import numpy as np\n\ndef generate_surface(data, filename='panair') :\n '''\n Function to generate vtk files from a panair input mesh \n INPUT :\n - data is a list of networks which are 3D arrays with the dimensions being \n columns, rows, and coordinates)\n - 'filename' is a string to use in filenames. \n For example 'panair' will result in files called 'panair_network_1', etc.\n \n OUTPUT : \n The function will produce one or several files, one for each network, \n in the folder it's run from.\n '''\n from evtk.hl import gridToVTK\n # TODO: Currently not working when meshes seeds are not the same\n def _write_network(points_array, multiple_networks = False):\n n_columns = int(points_array.shape[0])\n n_rows = int(points_array.shape[1])\n \n X = np.zeros((n_rows, n_columns, 1))\n Y = np.zeros((n_rows, n_columns, 1))\n Z = np.zeros((n_rows, n_columns, 1))\n \n for i in range(n_columns) :\n for j in range(n_rows) :\n X[j, i, 0] = points_array[i, j, 0]\n Y[j, i, 0] = points_array[i, j, 1]\n Z[j, i, 0] = points_array[i, j, 2]\n\n if multiple_networks:\n gridToVTK(filename+'_network'+str(n+1), X, Y, Z)\n else:\n gridToVTK(filename+'_network', X, Y, Z)\n \n if type(data) == dict:\n networks = len(data.keys())\n else:\n networks = len(data)\n\n if type(data) != dict:\n try:\n #check to see if list of networks or just a single one\n check = data[0][0][0][0]\n for n in range(networks) :\n points_array = data[n]\n _write_network(points_array, multiple_networks = True)\n except:\n _write_network(data, multiple_networks = False)\n else:\n for n in range(networks) :\n points_array = data[list(data.keys())[n]]\n _write_network(points_array, multiple_networks = True)\n\ndef generate_points(data, filename):\n #TODO: Still cannot visualize points well\n from evtk.hl import pointsToVTK\n x,y,z = data.T\n # Sometimes np.arrays could have manipulated to no longer\n # be c-continuous os we have to impose it\n x = np.ascontiguousarray(x)\n y = np.ascontiguousarray(y)\n z = np.ascontiguousarray(z)\n pointsToVTK(filename, x, y, z)",
"import numpy as np\n\ndef uniform_mesh_generator(mesh):\n '''Default genrator for uniform meshes. If \n len(mesh)==2, upper and lower have same\n mesh'''\n\n # Defining dimensions matrix\n if len(mesh) == 2:\n dimensions = {'upper': list(mesh),\n 'lower': list(mesh)}\n elif len(mesh) == 4:\n dimensions = {'upper': list(mesh[:2]),\n 'lower': list(mesh[2:])}\n # Calculating grid\n upper_x, upper_y = np.meshgrid(np.linspace(0,1,dimensions['upper'][0]), \n np.linspace(0,1,dimensions['upper'][1]))\n lower_x, lower_y = np.meshgrid(np.linspace(0,1,dimensions['lower'][0]), \n np.linspace(0,1,dimensions['lower'][1]))\n\n mesh = {'upper': np.concatenate([upper_x.reshape(1,np.size(upper_x)),\n upper_y.reshape(1,np.size(upper_y))]).T,\n 'lower': np.concatenate([lower_x.reshape(1,np.size(lower_x)),\n lower_y.reshape(1,np.size(lower_y))]).T}\n\n return mesh, dimensions"
] | [
[
"numpy.ascontiguousarray",
"numpy.zeros"
],
[
"numpy.size",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zamanashiq3/code-DNN | [
"c6133740fa272f9cac005b9ee754642b5bb20975"
] | [
"time_dis_cnn.py"
] | [
"\"\"\"\nMultiple stacked lstm implemeation on the lip movement data.\n\nAkm Ashiquzzaman\[email protected]\nFall 2016\n\n\"\"\"\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337)\n#random seed fixing for reproducibility\n\n#data load & preprocessing \nX_train = np.load('../data/videopart43.npy').astype('float32')\nY_train = np.load('../data/audiopart43.npy').astype('float32')\n\n#normalizing data\nX_train = X_train/255\nY_train = Y_train/32767\n\nX_train = X_train.reshape((826,13,1,53,53)).astype('float32')\nY_train = Y_train.reshape((826,13*4702)).astype('float32')\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Activation,Dropout,TimeDistributed,LSTM,Bidirectional\nfrom keras.layers import Convolution2D,Flatten,MaxPooling2D\nimport time\n\nprint(\"Building Model.....\")\nmodel_time = time.time()\n\nmodel = Sequential()\n\nmodel.add(TimeDistributed(Convolution2D(64, 3, 3,border_mode='valid'),batch_input_shape=(14,13,1,53,53),input_shape=(13,1,53,53)))\nmodel.add(Activation('tanh'))\nmodel.add(Dropout(0.25))\n\nmodel.add(TimeDistributed(Convolution2D(32, 2, 2, border_mode='valid')))\nmodel.add(Activation('tanh'))\n\n\nmodel.add(TimeDistributed(Flatten()))\n\nmodel.add(Bidirectional(LSTM(256,return_sequences=True,stateful=True)))\nmodel.add(Dropout(0.20))\nmodel.add(Bidirectional(LSTM(128,return_sequences=True,stateful=True)))\nmodel.add(Dropout(0.20))\nmodel.add((LSTM(64,stateful=True)))\nmodel.add(Dropout(0.20))\n\nmodel.add((Dense(512)))\nmodel.add(Activation('tanh'))\nmodel.add(Dropout(0.5))\n\nmodel.add((Dense(13*4702)))\nmodel.add(Activation('tanh'))\n\nmodel.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])\n\n#checkpoint import\nfrom keras.callbacks import ModelCheckpoint\nfrom os.path import isfile, join\n#weight file name\nweight_file = '../weights/time-dis-cnn_weight.h5'\n\n#loading previous weight file for resuming training \nif isfile(weight_file):\n\tmodel.load_weights(weight_file)\n\n#weight-checkmark\ncheckpoint = ModelCheckpoint(weight_file, monitor='acc', verbose=1, save_best_only=True, mode='max')\n\ncallbacks_list = [checkpoint]\n\nprint(\"model compile time: \"+str(time.time()-model_time)+'s')\n\n# fit the model\nmodel.fit(X_train,Y_train, nb_epoch=1, batch_size=14,callbacks=callbacks_list)\n\npred = model.predict(X_train,batch_size=14,verbose=1)\n\npred = pred*32767\npred = pred.reshape(826*13,4702)\nprint('pred shape',pred.shape)\nprint('pred dtype',pred.dtype)\nnp.save('../predictions/pred-time-cnn.npy',pred)\n"
] | [
[
"numpy.load",
"numpy.save",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skat00sh/Handcrafted-DP | [
"d1f8bc004adc240d5c424a10bdcc30fc266c8218"
] | [
"log.py"
] | [
"import numpy as np\nimport os\nimport shutil\nimport sys\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\n\n\ndef model_input(data, device):\n datum = data.data[0:1]\n if isinstance(datum, np.ndarray):\n return torch.from_numpy(datum).float().to(device)\n else:\n return datum.float().to(device)\n\n\ndef get_script():\n py_script = os.path.basename(sys.argv[0])\n return os.path.splitext(py_script)[0]\n\n\ndef get_specified_params(hparams):\n keys = [k.split(\"=\")[0][2:] for k in sys.argv[1:]]\n specified = {k: hparams[k] for k in keys}\n return specified\n\n\ndef make_hparam_str(hparams, exclude):\n return \",\".join([f\"{key}_{value}\"\n for key, value in sorted(hparams.items())\n if key not in exclude])\n\n\nclass Logger(object):\n def __init__(self, logdir):\n\n if logdir is None:\n self.writer = None\n else:\n if os.path.exists(logdir) and os.path.isdir(logdir):\n shutil.rmtree(logdir)\n\n self.writer = SummaryWriter(log_dir=logdir)\n\n def log_model(self, model, input_to_model):\n if self.writer is None:\n return\n self.writer.add_graph(model, input_to_model)\n\n def log_epoch(self, epoch, train_loss, train_acc, test_loss, test_acc, epsilon=None):\n if self.writer is None:\n return\n self.writer.add_scalar(\"Loss/train\", train_loss, epoch)\n self.writer.add_scalar(\"Loss/test\", test_loss, epoch)\n self.writer.add_scalar(\"Accuracy/train\", train_acc, epoch)\n self.writer.add_scalar(\"Accuracy/test\", test_acc, epoch)\n\n if epsilon is not None:\n self.writer.add_scalar(\"Acc@Eps/train\", train_acc, 100*epsilon)\n self.writer.add_scalar(\"Acc@Eps/test\", test_acc, 100*epsilon)\n\n def log_scalar(self, tag, scalar_value, global_step):\n if self.writer is None or scalar_value is None:\n return\n self.writer.add_scalar(tag, scalar_value, global_step)\n"
] | [
[
"torch.from_numpy",
"torch.utils.tensorboard.SummaryWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
njcuk9999/jwst-mtl | [
"81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596",
"81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596"
] | [
"SOSS/dms/soss_engine.py",
"SOSS/extract/engine_legacy.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# TODO remove use of args and kwargs as much as possible for clearer code.\n\n# General imports.\nimport numpy as np\nfrom scipy.sparse import issparse, csr_matrix, diags\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.interpolate import interp1d, Akima1DInterpolator\nfrom scipy.optimize import minimize_scalar\n\n# Local imports.\nfrom SOSS.dms import engine_utils\n\n# Plotting.\nimport matplotlib.pyplot as plt\n\n\nclass _BaseOverlap: # TODO Merge with TrpzOverlap?\n \"\"\"\n Base class for overlaping extraction of the form:\n (B_T * B) * f = (data/sig)_T * B\n where B is a matrix and f is an array.\n The matrix multiplication B * f is the 2d model of the detector.\n We want to solve for the array f.\n The elements of f are labelled by 'k'.\n The pixels are labeled by 'i'.\n Every pixel 'i' is covered by a set of 'k' for each order\n of diffraction.\n The classes inheriting from this class should specify the\n methods get_w which computes the 'k' associated to each pixel 'i'.\n These depends of the type of interpolation used.\n \"\"\"\n def __init__(self, wave_map, aperture, throughput, kernels, # TODO rename aperture\n orders=None, global_mask=None,\n wave_grid=None, wave_bounds=None, n_os=2,\n threshold=1e-5, c_kwargs=None,\n verbose=False):\n \"\"\"\n Parameters\n ----------\n wave_map : (N_ord, N, M) list or array of 2-D arrays\n A list or array of the central wavelength position for each\n order on the detector.\n It has to have the same (N, M) as `data`.\n aperture : (N_ord, N, M) list or array of 2-D arrays\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n throughput : (N_ord [, N_k]) list of array or callable\n A list of functions or array of the throughput at each order.\n If callable, the functions depend on the wavelength.\n If array, projected on `wave_grid`.\n kernels : array, callable or sparse matrix\n Convolution kernel to be applied on the spectrum (f_k) for each orders.\n Can be array of the shape (N_ker, N_k_c).\n Can be a callable with the form f(x, x0) where x0 is\n the position of the center of the kernel. In this case, it must\n return a 1D array (len(x)), so a kernel value\n for each pairs of (x, x0). If array or callable,\n it will be passed to `convolution.get_c_matrix` function\n and the `c_kwargs` can be passed to this function.\n If sparse, the shape has to be (N_k_c, N_k) and it will\n be used directly. N_ker is the length of the effective kernel\n and N_k_c is the length of the spectrum (f_k) convolved.\n global_mask : (N, M) array_like boolean, optional\n Boolean Mask of the detector pixels to mask for every extraction.\n orders: list, optional:\n List of orders considered. Default is orders = [1, 2]\n wave_grid : (N_k) array_like, optional\n The grid on which f(lambda) will be projected.\n Default is a grid from `utils.get_soss_grid`.\n `n_os` will be passed to this function.\n wave_bounds : list or array-like (N_ord, 2), optional\n Boundary wavelengths covered by each orders.\n Default is the wavelength covered by `wave_map`.\n n_os : int, optional\n if `wave_grid`is None, it will be used to generate\n a grid. Default is 2.\n threshold : float, optional:\n The pixels where the estimated spatial profile is less than\n this value will be masked. Default is 1e-5.\n c_kwargs : list of N_ord dictionnaries or dictionnary, optional\n Inputs keywords arguments to pass to\n `convolution.get_c_matrix` function for each orders.\n If dictionnary, the same c_kwargs will be used for each orders.\n verbose : bool, optional\n Print steps. Default is False.\n \"\"\"\n\n # If no orders specified extract on orders 1 and 2.\n if orders is None:\n orders = [1, 2]\n\n ###########################\n # Save basic parameters\n ###########################\n\n # Spectral orders and number of orders.\n self.data_shape = wave_map[0].shape\n self.orders = orders\n self.n_orders = len(orders)\n self.threshold = threshold\n self.verbose = verbose\n\n # Raise error if the number of orders is not consistent.\n if self.n_orders != len(wave_map):\n msg = (\"The number of orders specified {} and the number of \"\n \"wavelength maps provided {} do not match.\")\n raise ValueError(msg.format(self.n_orders, len(wave_map)))\n\n # Detector image.\n self.data = np.full(self.data_shape, fill_value=np.nan)\n\n # Error map of each pixels.\n self.error = np.ones(self.data_shape)\n\n # Set all reference file quantities to None.\n self.wave_map = None\n self.aperture = None\n self.throughput = None\n self.kernels = None\n\n # Set the wavelength map and aperture for each order.\n self.update_wave_map(wave_map)\n self.update_aperture(aperture)\n\n # Generate a wavelength grid if none was provided. TODO Requires self.aperture self.wave_map\n if wave_grid is None:\n\n if self.n_orders == 2: # TODO should this be mandatory input.\n wave_grid = engine_utils.get_soss_grid(wave_map, aperture, n_os=n_os) # TODO check difference between get_soss_grid and grid_from_map\n else:\n wave_grid, _ = self.grid_from_map()\n\n # Set the wavelength grid and its size.\n self.wave_grid = wave_grid.copy()\n self.n_wavepoints = len(wave_grid)\n\n # Set the throughput for each order.\n self.update_throughput(throughput) # TODO requires self.wave_grid\n\n ###################################\n # Build detector mask\n ###################################\n\n # Assign a first estimate of i_bounds to be able to compute mask.\n self.i_bounds = [[0, len(wave_grid)] for _ in range(self.n_orders)] # TODO double check how the i_bounds and mask interact.\n\n # First estimate of a global mask and masks for each orders\n self.mask, self.mask_ord = self._get_masks(global_mask)\n\n # Correct i_bounds if it was not specified\n self.i_bounds = self._get_i_bnds(wave_bounds)\n\n # Re-build global mask and masks for each orders\n self.mask, self.mask_ord = self._get_masks(global_mask)\n\n # Save mask here as the general mask,\n # since `mask` attribute can be changed.\n self.general_mask = self.mask.copy()\n\n ####################################\n # Build convolution matrix\n ####################################\n self.update_kernels(kernels, c_kwargs) # TODO requires self.wave_grid self.i_bounds\n\n #############################\n # Compute integration weights\n #############################\n # The weights depend on the integration method used solve\n # the integral of the flux over a pixel and are encoded\n # in the class method `get_w()`.\n self.weights, self.weights_k_idx = self.compute_weights() # TODO put shapes in commments, name of indices.\n\n #########################\n # Save remaining inputs\n #########################\n\n # Set masked values to zero. TODO may not be necessary.\n self.data[self.mask] = 0\n\n # Init the pixel mapping (b_n) matrices. Matrices that transforms the 1D spectrum to a the image pixels.\n self.pixel_mapping = [None for _ in range(self.n_orders)]\n self.i_grid = None\n self.tikho = None\n self.tikho_mat = None\n self.w_t_wave_c = None\n\n return\n\n def verbose_print(self, *args, **kwargs):\n \"\"\"Print if verbose is True. Same as `print` function.\"\"\"\n\n if self.verbose:\n print(*args, **kwargs)\n\n return\n\n def get_attributes(self, *args, i_order=None):\n \"\"\"Return list of attributes\n\n Parameters\n ----------\n args: str\n All attributes to return.\n i_order: None or int, optionoal\n Index of order to extract. If specified, it will\n be applied to all attributes in args, so it cannot\n be mixed with non-order dependent attributes).\n \"\"\"\n\n if i_order is None:\n out = [getattr(self, arg) for arg in args]\n else:\n out = [getattr(self, arg)[i_order] for arg in args]\n\n if len(out) == 1:\n out = out[0]\n\n return out\n\n def update_wave_map(self, wave_map):\n\n self.wave_map = [wave_n.copy() for wave_n in wave_map] # TODO make dict with order number as key.\n\n return\n\n def update_aperture(self, aperture):\n \"\"\"Update the aperture maps.\"\"\"\n\n # Update the aperture profile.\n self.aperture = [aperture_n.copy() for aperture_n in aperture] # TODO make dict with order number as key.\n\n return\n\n def update_throughput(self, throughput):\n \"\"\"Update the throughput values.\"\"\"\n\n # Update the throughput values.\n throughput_new = [] # TODO make dict with order number as key.\n for throughput_n in throughput: # Loop over orders.\n\n if callable(throughput_n):\n\n # Througput was given as a callable function.\n throughput_new.append(throughput_n(self.wave_grid))\n\n elif throughput_n.shape == self.wave_grid.shape:\n\n # Throughput was given as an array.\n throughput_new.append(throughput_n)\n\n else:\n msg = 'Throughputs must be given as callable or arrays matching the extraction grid.'\n raise ValueError(msg)\n\n # Set the attribute to the new values.\n self.throughput = throughput_new\n\n return\n\n def update_kernels(self, kernels, c_kwargs):\n\n # Verify the c_kwargs. TODO Be explict here?\n if c_kwargs is None:\n c_kwargs = [{} for _ in range(self.n_orders)]\n\n elif isinstance(c_kwargs, dict):\n c_kwargs = [c_kwargs for _ in range(self.n_orders)]\n\n # Define convolution sparse matrix. TODO make dict with order number as key.\n kernels_new = []\n for i_order, kernel_n in enumerate(kernels):\n\n if not issparse(kernel_n):\n kernel_n = engine_utils.get_c_matrix(kernel_n, self.wave_grid,\n i_bounds=self.i_bounds[i_order],\n **c_kwargs[i_order])\n\n kernels_new.append(kernel_n)\n\n self.kernels = kernels_new\n\n return\n\n def get_mask_wave(self, i_order):\n \"\"\"Mask according to wavelength grid \"\"\"\n\n wave = self.wave_map[i_order]\n imin, imax = self.i_bounds[i_order]\n wave_min = self.wave_grid[imin]\n wave_max = self.wave_grid[imax - 1] # TODO change so -1 not needed?\n\n mask = (wave <= wave_min) | (wave >= wave_max)\n\n return mask\n\n def _get_masks(self, global_mask):\n \"\"\"\n Compute a general mask on the detector and for each orders.\n Depends on the spatial profile, the wavelength grid\n and the user defined mask (optional). These are all specified\n when initiating the object.\n \"\"\"\n\n # Get needed attributes\n threshold, n_orders = self.get_attributes('threshold', 'n_orders')\n throughput, aperture, wave_map = self.get_attributes('throughput', 'aperture', 'wave_map')\n\n # Mask according to the spatial profile.\n mask_aperture = np.array([aperture_n < threshold for aperture_n in aperture])\n\n # Mask pixels not covered by the wavelength grid.\n mask_wave = np.array([self.get_mask_wave(i_order) for i_order in range(n_orders)])\n\n # Apply user defined mask.\n if global_mask is None:\n mask_ord = np.any([mask_aperture, mask_wave], axis=0)\n else:\n mask = [global_mask for _ in range(n_orders)] # For each orders\n mask_ord = np.any([mask_aperture, mask_wave, mask], axis=0)\n\n # Find pixels that are masked in each order.\n general_mask = np.all(mask_ord, axis=0)\n\n # Mask pixels if mask_aperture not masked but mask_wave is.\n # This means that an order is contaminated by another\n # order, but the wavelength range does not cover this part\n # of the spectrum. Thus, it cannot be treated correctly.\n general_mask |= (np.any(mask_wave, axis=0)\n & np.all(~mask_aperture, axis=0))\n\n # Apply this new general mask to each orders.\n mask_ord = (mask_wave | general_mask[None, :, :])\n\n return general_mask, mask_ord\n\n def update_mask(self, mask):\n \"\"\"\n Update `mask` attribute by completing the\n `general_mask` attribute with the input `mask`.\n Everytime the mask is changed, the integration weights\n need to be recomputed since the pixels change.\n \"\"\"\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return\n\n def _get_i_bnds(self, wave_bounds=None):\n \"\"\"\n Define wavelength boundaries for each orders using the order's mask.\n \"\"\"\n\n wave_grid = self.wave_grid\n i_bounds = self.i_bounds\n\n # Check if wave_bounds given\n if wave_bounds is None:\n wave_bounds = []\n for i in range(self.n_orders):\n wave = self.wave_map[i][~self.mask_ord[i]]\n wave_bounds.append([wave.min(), wave.max()])\n\n # What we need is the boundary position\n # on the wavelength grid.\n i_bnds_new = []\n for bounds, i_bnds in zip(wave_bounds, i_bounds):\n\n a = np.min(np.where(wave_grid >= bounds[0])[0])\n b = np.max(np.where(wave_grid <= bounds[1])[0]) + 1\n\n # Take the most restrictive bound\n a = np.maximum(a, i_bnds[0])\n b = np.minimum(b, i_bnds[1])\n\n # Keep value\n i_bnds_new.append([a, b])\n\n return i_bnds_new\n\n def update_i_bnds(self):\n \"\"\"Update the grid limits for the extraction.\n Needs to be done after modification of the mask\n \"\"\"\n\n # Get old and new boundaries.\n i_bnds_old = self.i_bounds\n i_bnds_new = self._get_i_bnds()\n\n for i_order in range(self.n_orders):\n\n # Take most restrictive lower bound.\n low_bnds = [i_bnds_new[i_order][0], i_bnds_old[i_order][0]]\n i_bnds_new[i_order][0] = np.max(low_bnds)\n\n # Take most restrictive upper bound.\n up_bnds = [i_bnds_new[i_order][1], i_bnds_old[i_order][1]]\n i_bnds_new[i_order][1] = np.min(up_bnds)\n\n # Update attribute.\n self.i_bounds = i_bnds_new\n\n return\n\n def wave_grid_c(self, i_order):\n \"\"\"\n Return wave_grid for the convolved flux at a given order.\n \"\"\"\n\n index = slice(*self.i_bounds[i_order])\n\n return self.wave_grid[index]\n\n def get_w(self, i_order):\n \"\"\"Dummy method to be able to init this class\"\"\"\n\n return np.array([]), np.array([])\n\n def compute_weights(self):\n \"\"\"\n Compute integration weights\n\n The weights depend on the integration method used solve\n the integral of the flux over a pixel and are encoded\n in the class method `get_w()`.\n\n Returns the lists of weights and corresponding grid indices\n \"\"\"\n\n # Init lists\n weights, weights_k_idx = [], []\n for i_order in range(self.n_orders): # For each orders\n\n weights_n, k_idx_n = self.get_w(i_order) # Compute weigths\n\n # Convert to sparse matrix\n # First get the dimension of the convolved grid\n n_kc = np.diff(self.i_bounds[i_order]).astype(int)[0]\n\n # Then convert to sparse\n weights_n = engine_utils.sparse_k(weights_n, k_idx_n, n_kc)\n weights.append(weights_n), weights_k_idx.append(k_idx_n)\n\n return weights, weights_k_idx\n\n def _set_w_t_wave_c(self, i_order, product): # TODO better name? prod_mat tmp_mat, intermediate_mat?\n \"\"\"\n Save the matrix product of the weighs (w), the throughput (t),\n the wavelength (lam) and the convolution matrix for faster computation.\n \"\"\"\n\n if self.w_t_wave_c is None:\n self.w_t_wave_c = [[] for _ in range(self.n_orders)] # TODO make dict with order number as key.\n\n # Assign value\n self.w_t_wave_c[i_order] = product.copy()\n\n return\n\n def grid_from_map(self, i_order=0):\n \"\"\"\n Return the wavelength grid and the columns associated\n to a given order index (i_order)\n \"\"\"\n\n attrs = ['wave_map', 'aperture']\n wave_map, aperture = self.get_attributes(*attrs, i_order=i_order)\n\n wave_grid, icol = engine_utils._grid_from_map(wave_map, aperture, out_col=True)\n\n return wave_grid, icol\n\n def get_adapt_grid(self, spectrum=None, n_max=3, **kwargs):\n \"\"\"\n Return an irregular grid needed to reach a\n given precision when integrating over each pixels.\n\n Parameters (all optional)\n ----------\n spectrum (f_k): 1D array-like\n Input flux in the integral to be optimized.\n f_k is the projection of the flux on self.wave_grid\n n_max: int (n_max > 0)\n Maximum number of nodes in each intervals of self.wave_grid.\n Needs to be greater then zero.\n\n kwargs (arguments passed to the function get_n_nodes)\n ------\n tol, rtol : float, optional\n The desired absolute and relative tolerances. Defaults are 1.48e-4.\n divmax : int, optional\n Maximum order of extrapolation. Default is 10.\n\n Returns\n -------\n os_grid : 1D array\n Oversampled grid which minimizes the integration error based on\n Romberg's method\n See Also\n --------\n utils.get_n_nodes\n scipy.integrate.quadrature.romberg\n References\n ----------\n [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method\n\n \"\"\"\n # Generate the spectrum (f_k) if not given.\n if spectrum is None:\n spectrum = self.extract()\n\n # Init output oversampled grid\n os_grid = []\n\n # Iterate starting with the last order\n for i_order in range(self.n_orders - 1, -1, -1): # TODO easier way of inverse loop?\n\n # Grid covered by this order\n grid_ord = self.wave_grid_c(i_order)\n\n # Estimate the flux at this order\n convolved_spectrum = self.kernels[i_order].dot(spectrum)\n # Interpolate with a cubic spline\n fct = interp1d(grid_ord, convolved_spectrum, kind='cubic')\n\n # Find number of nodes to reach the precision\n n_oversample, _ = engine_utils.get_n_nodes(grid_ord, fct, **kwargs)\n\n # Make sure n_oversample is not greater than\n # user's define `n_max`\n n_oversample = np.clip(n_oversample, 0, n_max)\n\n # Generate oversampled grid\n grid_ord = engine_utils.oversample_grid(grid_ord, n_os=n_oversample)\n\n # Keep only wavelength that are not already\n # covered by os_grid.\n if os_grid:\n # Under or above os_grid\n index = (grid_ord < np.min(os_grid))\n index |= (grid_ord > np.max(os_grid))\n else:\n index = slice(None)\n\n # Keep these values\n os_grid.append(grid_ord[index])\n\n # Convert os_grid to 1D array\n os_grid = np.concatenate(os_grid)\n\n # Return sorted and unique.\n wave_grid = np.unique(os_grid)\n\n return wave_grid\n\n def estimate_noise(self, i_order=0, data=None, error=None, mask=None):\n \"\"\"\n Relative noise estimate over columns.\n\n Parameters\n ----------\n i_order: int, optional\n index of diffraction order. Default is 0\n data: 2d array, optional\n map of the detector image\n Default is `self.data`.\n error: 2d array, optional\n map of the estimate of the detector noise.\n Default is `self.sig`\n mask: 2d array, optional\n Bool map of the masked pixels for order `i_order`.\n Default is `self.mask_ord[i_order]`\n\n Returns\n ------\n wave_grid, noise\n \"\"\"\n\n # Use object attributes if not given\n if data is None:\n data = self.data\n\n if error is None:\n error = self.error\n\n if mask is None:\n mask = self.mask_ord[i_order]\n\n # Compute noise estimate only on the trace (mask the rest)\n noise = np.ma.array(error, mask=mask)\n\n # RMS over columns\n noise = np.sqrt((noise**2).sum(axis=0))\n\n # Relative\n noise /= np.ma.array(data, mask=mask).sum(axis=0)\n\n # Convert to array with nans\n noise = noise.filled(fill_value=np.nan)\n\n # Get associated wavelengths\n wave_grid, i_col = self.grid_from_map(i_order)\n\n # Return sorted according to wavelenghts\n return wave_grid, noise[i_col]\n\n def get_pixel_mapping(self, i_order, same=False, error=True, quick=False):\n \"\"\"\n Compute the matrix `b_n = (P/sig).w.T.lambda.c_n` ,\n where `P` is the spatial profile matrix (diag),\n `w` is the integrations weights matrix,\n `T` is the throughput matrix (diag),\n `lambda` is the convolved wavelength grid matrix (diag),\n `c_n` is the convolution kernel.\n The model of the detector at order n (`model_n`)\n is given by the system:\n model_n = b_n.c_n.f ,\n where f is the incoming flux projected on the wavelenght grid.\n This methods updates the `b_n_list` attribute.\n Parameters\n ----------\n i_order: integer\n Label of the order (depending on the initiation of the object).\n same: bool, optional\n Do not recompute, b_n. Take the last b_n computed.\n Useful to speed up code. Default is False.\n error: bool or (N, M) array_like, optional\n If 2-d array, `sig` is the new error estimation map.\n It is the same shape as `sig` initiation input. If bool,\n wheter to apply sigma or not. The method will return\n b_n/sigma if True or array_like and b_n if False. If True,\n the default object attribute `sig` will be use.\n quick: bool, optional\n If True, only perform one matrix multiplication\n instead of the whole system: (P/sig).(w.T.lambda.c_n)\n\n Returns\n ------\n sparse matrix of b_n coefficients\n \"\"\"\n\n # Force to compute if b_n never computed.\n if self.pixel_mapping[i_order] is None:\n same = False\n\n # Take the last b_n computed if nothing changes\n if same:\n pixel_mapping = self.pixel_mapping[i_order]\n\n else:\n pixel_mapping = self._get_pixel_mapping(i_order, error=error, quick=quick)\n\n # Save new pixel mapping matrix.\n self.pixel_mapping[i_order] = pixel_mapping\n\n return pixel_mapping\n\n def _get_pixel_mapping(self, i_order, error=True, quick=False): # TODO merge with get_pixel_mapping?\n \"\"\"\n Compute the matrix `b_n = (P/sig).w.T.lambda.c_n` ,\n where `P` is the spatial profile matrix (diag),\n `w` is the integrations weights matrix,\n `T` is the throughput matrix (diag),\n `lambda` is the convolved wavelength grid matrix (diag),\n `c_n` is the convolution kernel.\n The model of the detector at order n (`model_n`)\n is given by the system:\n model_n = b_n.c_n.f ,\n where f is the incoming flux projected on the wavelenght grid.\n Parameters\n ----------\n i_order : integer\n Label of the order (depending on the initiation of the object).\n error: bool or (N, M) array_like, optional\n If 2-d array, `sig` is the new error estimation map.\n It is the same shape as `sig` initiation input. If bool,\n wheter to apply sigma or not. The method will return\n b_n/sigma if True or array_like and b_n if False. If True,\n the default object attribute `sig` will be use.\n quick: bool, optional\n If True, only perform one matrix multiplication\n instead of the whole system: (P/sig).(w.T.lambda.c_n)\n\n Returns\n ------\n sparse matrix of b_n coefficients\n \"\"\"\n\n # Special treatment for error map\n # Can be bool or array.\n if error is False:\n # Sigma will have no effect\n error = np.ones(self.data_shape)\n else:\n if error is not True:\n # Sigma must be an array so\n # update object attribute\n self.error = error.copy()\n\n # Take sigma from object\n error = self.error\n\n # Get needed attributes ...\n attrs = ['wave_grid', 'mask']\n wave_grid, mask = self.get_attributes(*attrs)\n\n # ... order dependent attributes\n attrs = ['aperture', 'throughput', 'kernels', 'weights', 'i_bounds']\n aperture_n, throughput_n, kernel_n, weights_n, i_bnds = self.get_attributes(*attrs, i_order=i_order)\n\n # Keep only valid pixels (P and sig are still 2-D)\n # And apply direcly 1/sig here (quicker)\n aperture_n = aperture_n[~mask] / error[~mask]\n\n # Compute b_n\n # Quick mode if only `p_n` or `sig` has changed\n if quick:\n # Get pre-computed (right) part of the equation\n right = self.w_t_wave_c[i_order]\n\n # Apply new p_n\n pixel_mapping = diags(aperture_n).dot(right)\n\n else:\n # First (T * lam) for the convolve axis (n_k_c)\n product = (throughput_n * wave_grid)[slice(*i_bnds)]\n\n # then convolution\n product = diags(product).dot(kernel_n)\n\n # then weights\n product = weights_n.dot(product)\n\n # Save this product for quick mode\n self._set_w_t_wave_c(i_order, product)\n\n # Then spatial profile\n pixel_mapping = diags(aperture_n).dot(product)\n\n return pixel_mapping\n\n def get_i_grid(self, d):\n \"\"\" Return the index of the grid that are well defined, so d != 0 \"\"\"\n\n if self.i_grid is None: # TODO Shouldn't this update even if the attribute is already set?\n self.i_grid = np.nonzero(d)[0]\n\n return self.i_grid\n\n def build_sys(self, data=None, error=True, mask=None, aperture=None, throughput=None):\n \"\"\"\n Build linear system arising from the logL maximisation.\n TIPS: To be quicker, only specify the psf (`p_list`) in kwargs.\n There will be only one matrix multiplication:\n (P/sig).(w.T.lambda.c_n).\n Parameters\n ----------\n data : (N, M) array_like, optional\n A 2-D array of real values representing the detector image.\n Default is the object attribute `data`.\n error: bool or (N, M) array_like, optional\n Estimate of the error on each pixel.\n If 2-d array, `sig` is the new error estimation map.\n It is the same shape as `sig` initiation input. If bool,\n wheter to apply sigma or not. The method will return\n b_n/sigma if True or array_like and b_n if False. If True,\n the default object attribute `sig` will be use.\n mask : (N, M) array_like boolean, optional\n Additionnal mask for a given exposure. Will be added\n to the object general mask.\n aperture : (N_ord, N, M) list or array of 2-D arrays, optional\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n Default is the object attribute `p_list`\n throughput : (N_ord [, N_k]) list or array of functions, optional\n A list or array of the throughput at each order.\n The functions depend on the wavelength\n Default is the object attribute `t_list`\n\n Returns\n ------\n A and b from Ax = b beeing the system to solve.\n \"\"\"\n\n # Check if inputs are suited for quick mode;\n # Quick mode if `t_list` is not specified.\n quick = (throughput is None)\n\n # and if mask doesn't change\n quick &= (mask is None)\n quick &= (self.w_t_wave_c is not None) # Pre-computed\n if quick:\n self.verbose_print('Quick mode is on!')\n\n # Use data from object as default\n if data is None:\n data = self.data\n else:\n # Update data\n self.data = data\n\n # Update mask if given\n if mask is not None:\n self.update_mask(mask)\n\n # Take (updated) mask from object\n mask = self.mask\n\n # Get some dimensions infos\n n_wavepoints, n_orders = self.n_wavepoints, self.n_orders\n\n # Update aperture maps and throughput values.\n if aperture is not None:\n self.update_aperture(aperture)\n\n if throughput is not None:\n self.update_throughput(throughput)\n\n # Calculations\n\n # Build matrix B\n # Initiate with empty matrix\n n_i = (~mask).sum() # n good pixels\n b_matrix = csr_matrix((n_i, n_wavepoints))\n\n # Sum over orders\n for i_order in range(n_orders):\n\n # Get sparse pixel mapping matrix.\n b_matrix += self.get_pixel_mapping(i_order, error=error, quick=quick)\n\n # Build system\n # Fisrt get `sig` which have been update`\n # when calling `get_b_n`\n error = self.error\n\n # Take only valid pixels and apply `error` on data\n data = data[~mask]/error[~mask]\n\n # (B_T * B) * f = (data/sig)_T * B\n # (matrix ) * f = result\n matrix = b_matrix.T.dot(b_matrix)\n result = csr_matrix(data.T).dot(b_matrix)\n\n return matrix, result.toarray().squeeze()\n\n def set_tikho_matrix(self, t_mat=None, t_mat_func=None,\n fargs=None, fkwargs=None):\n \"\"\"\n Set the tikhonov matrix attribute.\n The matrix can be directly specified as an input, or\n it can be built using `t_mat_func`\n\n Parameters\n ----------\n t_mat: matrix-like, optional\n TIkhonov regularisation matrix. scipy.sparse matrix\n are recommended.\n t_mat_func: callable, optional\n Function use to generate `t_mat`is not specified.\n Will take `fargs` and `fkwargs`as imput.\n fargs: tuple, optional\n Arguments passed to `t_mat_func`\n fkwargs: dict, optional\n Keywords arguments passed to `t_mat_func`\n \"\"\"\n\n # Generate the matrix with the function\n if t_mat is None:\n\n # Default function if not specified\n if t_mat_func is None:\n\n # Use the nyquist sampled gaussian kernel\n t_mat_func = engine_utils.get_nyquist_matrix\n\n # Default args\n if fargs is None:\n fargs = (self.wave_grid, )\n if fkwargs is None:\n fkwargs = {\"integrate\": True}\n\n # Call function\n t_mat = t_mat_func(*fargs, **fkwargs)\n\n # Set attribute\n self.tikho_mat = t_mat\n\n return\n\n def get_tikho_matrix(self, **kwargs):\n \"\"\"\n Return the tikhonov matrix.\n Generate it with `set_tikho_matrix` method\n if not define yet. If so, all arguments are passed\n to `set_tikho_matrix`. The result is saved as an attribute.\n \"\"\"\n\n if self.tikho_mat is None:\n self.set_tikho_matrix(**kwargs)\n\n return self.tikho_mat\n\n def get_tikho_tests(self, factors, tikho=None, estimate=None,\n tikho_kwargs=None, **kwargs):\n \"\"\"\n Test different factors for Tikhonov regularisation.\n\n Parameters\n ----------\n factors: 1D list or array-like\n Factors to be tested.\n tikho: Tikhonov object, optional\n Tikhonov regularisation object (see regularisation.Tikhonov).\n If not given, an object will be initiated using the linear system\n from `build_sys` method and kwargs will be passed.\n estimate: 1D array-like, optional\n Estimate of the flux projected on the wavelength grid.\n tikho_kwargs:\n passed to init Tikhonov object. Possible options\n are `t_mat`, `grid` and `verbose`\n data : (N, M) array_like, optional\n A 2-D array of real values representing the detector image.\n Default is the object attribute `data`.\n error : (N, M) array_like, optional\n Estimate of the error on each pixel`\n Same shape as `data`.\n Default is the object attribute `sig`.\n aperture : (N_ord, N, M) list or array of 2-D arrays, optional\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n Default is the object attribute `p_list`\n throughput : (N_ord [, N_k]) list or array of functions, optional\n A list or array of the throughput at each order.\n The functions depend on the wavelength\n Default is the object attribute `t_list`\n\n Returns\n ------\n dictonary of the tests results\n \"\"\"\n\n # Build the system to solve\n matrix, result = self.build_sys(**kwargs)\n\n # Get valid grid index\n i_grid = self.get_i_grid(result)\n\n if tikho is None:\n t_mat = self.get_tikho_matrix()\n default_kwargs = {'grid': self.wave_grid,\n 'index': i_grid,\n 't_mat': t_mat}\n if tikho_kwargs is None:\n tikho_kwargs = {}\n tikho_kwargs = {**default_kwargs, **tikho_kwargs}\n tikho = engine_utils.Tikhonov(matrix, result, **tikho_kwargs)\n self.tikho = tikho\n\n # Test all factors\n tests = tikho.test_factors(factors, estimate)\n\n # Generate logl using solutions for each factors\n logl_list = []\n\n # Compute b_n only the first iteration. Then\n # use the same value to rebuild the detector.\n same = False\n for sln in tests['solution']:\n\n # Init the spectrum (f_k) with nan, so it has the adequate shape\n spectrum = np.ones(result.shape[-1]) * np.nan\n spectrum[i_grid] = sln # Assign valid values\n logl_list.append(self.compute_likelihood(spectrum, same=same)) # log_l\n same = True\n\n # Save in tikho's tests\n tikho.test['-logl'] = -1 * np.array(logl_list)\n\n # Save also grid\n tikho.test[\"grid\"] = self.wave_grid[i_grid]\n tikho.test[\"i_grid\"] = i_grid\n\n return tikho.test\n\n def best_tikho_factor(self, tests=None, interpolate=True,\n interp_index=None, i_plot=False):\n \"\"\"Compute the best scale factor for Tikhonov regularisation.\n It is determine by taking the factor giving the highest logL on\n the detector.\n\n Parameters\n ----------\n tests: dictionnary, optional\n Results of tikhonov extraction tests\n for different factors.\n Must have the keys \"factors\" and \"-logl\".\n If not specified, the tests from self.tikho.tests\n are used.\n interpolate: bool, optional\n If True, use akima spline interpolation\n to find a finer minimum. Default is true.\n interp_index: 2 element list, optional\n Index around the minimum value on the tested factors.\n Will be used for the interpolation.\n For example, if i_min is the position of\n the minimum logL value and [i1, i2] = interp_index,\n then the interpolation will be perform between\n i_min + i1 and i_min + i2 - 1\n i_plot: bool, optional\n Plot the result of the minimization\n\n Returns\n -------\n Best scale factor (float)\n \"\"\"\n\n if interp_index is None:\n interp_index = [-2, 4]\n\n # Use pre-run tests if not specified\n if tests is None:\n tests = self.tikho.tests\n\n # Get relevant quantities from tests\n factors = tests[\"factors\"]\n logl = tests[\"-logl\"]\n\n # Get position of the minimum value\n i_min = np.argmin(logl)\n\n # Interpolate to get a finer value\n if interpolate:\n\n # Only around the best value\n i_range = [i_min + d_i for d_i in interp_index]\n\n # Make sure it's still a valid index\n i_range[0] = np.max([i_range[0], 0])\n i_range[-1] = np.min([i_range[-1], len(logl) - 1])\n\n # Which index to use\n index = np.arange(*i_range, 1)\n\n # Akima spline in log space\n x_val, y_val = np.log10(factors[index]), np.log10(logl[index])\n i_sort = np.argsort(x_val)\n x_val, y_val = x_val[i_sort], y_val[i_sort]\n fct = Akima1DInterpolator(x_val, y_val)\n\n # Find min\n bounds = (x_val.min(), x_val.max())\n opt_args = {\"bounds\": bounds,\n \"method\": \"bounded\"}\n min_fac = minimize_scalar(fct, **opt_args).x\n\n # Plot the fit if required\n if i_plot:\n\n # Original grid\n plt.plot(np.log10(factors), np.log10(logl), \":\")\n\n # Fit sub-grid\n plt.plot(x_val, y_val, \".\")\n\n # Show akima spline\n x_new = np.linspace(*bounds, 100)\n plt.plot(x_new, fct(x_new))\n\n # Show minimum found\n plt.plot(min_fac, fct(min_fac), \"x\")\n\n # Labels\n plt.xlabel(r\"$\\log_{10}$(factor)\")\n plt.ylabel(r\"$\\log_{10}( - \\log L)$\")\n plt.tight_layout()\n\n # Return to linear scale\n min_fac = 10.**min_fac\n\n # Simply return the minimum value if no interpolation required\n else:\n min_fac = factors[i_min]\n\n # Return scale factor minimizing the logL\n return min_fac\n\n def rebuild(self, spectrum=None, i_orders=None, same=False):\n \"\"\"Build current model image of the detector.\n\n :param spectrum: flux as a function of wavelength if callable\n or array of flux values corresponding to self.wave_grid.\n :param i_orders: Indices of orders to model. Default is\n all available orders.\n :param same: If True, do not recompute the pixel_mapping matrix (b_n)\n and instead use the most recent pixel_mapping to speed up the computation.\n Default is False.\n\n :type spectrum: callable or array-like\n :type i_orders: List[int]\n :type same: bool\n\n :returns: model - the modelled detector image.\n :rtype: array[float]\n \"\"\"\n\n # If no spectrum given compute it.\n if spectrum is None:\n spectrum = self.extract()\n\n # If flux is callable, evaluate on the wavelength grid.\n if callable(spectrum):\n spectrum = spectrum(self.wave_grid)\n\n # Iterate over all orders by default.\n if i_orders is None:\n i_orders = range(self.n_orders)\n\n # Get required class attribute.\n mask = self.mask\n\n # Evaluate the detector model.\n model = np.zeros(self.data_shape)\n for i_order in i_orders:\n\n # Compute the pixel mapping matrix (b_n) for the current order.\n pixel_mapping = self.get_pixel_mapping(i_order, error=False, same=same)\n\n # Evaluate the model of the current order.\n model[~mask] += pixel_mapping.dot(spectrum)\n\n # Set masked values to NaN.\n model[mask] = np.nan\n\n return model\n\n def compute_likelihood(self, spectrum=None, same=False):\n \"\"\"Return the log likelihood asssociated with a particular spectrum.\n\n :param spectrum: flux as a function of wavelength if callable\n or array of flux values corresponding to self.wave_grid.\n If not given it will be computed by calling self.extract().\n :param same: If True, do not recompute the pixel_mapping matrix (b_n)\n and instead use the most recent pixel_mapping to speed up the computation.\n Default is False.\n\n :type spectrum: array-like\n :type same: bool\n\n :return: logl - The log-likelihood of the spectrum.\n :rtype: array[float]\n\n \"\"\"\n\n # If no spectrum given compute it.\n if spectrum is None:\n spectrum = self.extract()\n\n # Evaluate the model image for the spectrum.\n model = self.rebuild(spectrum, same=same)\n\n # Get data and error attributes.\n data = self.data\n error = self.error\n\n # Compute the log-likelihood for the spectrum.\n logl = -np.nansum((model - data)**2/error**2)\n\n return logl\n\n @staticmethod\n def _solve(matrix, result, index=slice(None)):\n \"\"\"\n Simply pass `matrix` and `result`\n to `scipy.spsolve` and apply index.\n \"\"\"\n\n return spsolve(matrix[index, :][:, index], result[index])\n\n @staticmethod\n def _solve_tikho(matrix, result, index=slice(None), **kwargs):\n \"\"\"Solve system using Tikhonov regularisation\"\"\"\n\n # Note that the indexing is applied inside the function\n return engine_utils.tikho_solve(matrix, result, index=index, **kwargs)\n\n def extract(self, tikhonov=False, tikho_kwargs=None, # TODO merge with __call__.\n factor=None, **kwargs):\n \"\"\"\n Extract underlying flux on the detector.\n All parameters are passed to `build_sys` method.\n TIPS: To be quicker, only specify the psf (`p_list`) in kwargs.\n There will be only one matrix multiplication:\n (P/sig).(w.T.lambda.c_n).\n\n Parameters\n ----------\n tikhonov : bool, optional\n Wheter to use tikhonov extraction\n (see regularisation.tikho_solve function).\n Default is False.\n tikho_kwargs : dictionnary or None, optional\n Arguments passed to `tikho_solve`.\n factor : the tikhonov factor to use of tikhonov is True\n data : (N, M) array_like, optional\n A 2-D array of real values representing the detector image.\n Default is the object attribute `data`.\n error : (N, M) array_like, optional\n Estimate of the error on each pixel`\n Same shape as `data`.\n Default is the object attribute `sig`.\n mask : (N, M) array_like boolean, optional\n Additionnal mask for a given exposure. Will be added\n to the object general mask.\n aperture : (N_ord, N, M) list or array of 2-D arrays, optional\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n Default is the object attribute `p_list`\n throughput : (N_ord [, N_k]) list or array of functions, optional\n A list or array of the throughput at each order.\n The functions depend on the wavelength\n Default is the object attribute `t_list`\n\n Returns\n -----\n spectrum (f_k): solution of the linear system\n \"\"\"\n\n # Build the system to solve\n matrix, result = self.build_sys(**kwargs)\n\n # Get index of `wave_grid` convered by the pixel.\n # `wave_grid` may cover more then the pixels.\n i_grid = self.get_i_grid(result)\n\n # Init spectrum with NaNs.\n spectrum = np.ones(result.shape[-1]) * np.nan\n\n # Solve with the specified solver.\n # Only solve for valid range `i_grid` (on the detector).\n # It will be a singular matrix otherwise.\n if tikhonov:\n\n if factor is None:\n raise ValueError(\"Please specify tikhonov `factor`.\")\n\n t_mat = self.get_tikho_matrix()\n default_kwargs = {'grid': self.wave_grid,\n 'index': i_grid,\n 't_mat': t_mat,\n 'factor': factor}\n\n if tikho_kwargs is None:\n tikho_kwargs = {}\n\n tikho_kwargs = {**default_kwargs, **tikho_kwargs}\n spectrum[i_grid] = self._solve_tikho(matrix, result, **tikho_kwargs)\n\n else:\n spectrum[i_grid] = self._solve(matrix, result, index=i_grid)\n\n return spectrum\n\n def __call__(self, **kwargs):\n \"\"\"\n Extract underlying flux on the detector by calling\n the `extract` method.\n All parameters are passed to `build_sys` method.\n TIPS: To be quicker, only specify the psf (`p_list`) in kwargs.\n There will be only one matrix multiplication:\n (P/sig).(w.T.lambda.c_n).\n Parameters\n ----------\n tikhonov : bool, optional\n Wheter to use tikhonov extraction\n (see regularisation.tikho_solve function).\n Default is False.\n tikho_kwargs : dictionnary or None, optional\n Arguments passed to `tikho_solve`.\n data : (N, M) array_like, optional\n A 2-D array of real values representing the detector image.\n Default is the object attribute `data`.\n error : (N, M) array_like, optional\n Estimate of the error on each pixel`\n Same shape as `data`.\n Default is the object attribute `sig`.\n mask : (N, M) array_like boolean, optional\n Additionnal mask for a given exposure. Will be added\n to the object general mask.\n throughput : (N_ord [, N_k]) list or array of functions, optional\n A list or array of the throughput at each order.\n The functions depend on the wavelength\n Default is the object attribute `t_list`\n aperture : (N_ord, N, M) list or array of 2-D arrays, optional\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n Default is the object attribute `p_list`\n\n Returns\n -----\n spectrum (f_k): solution of the linear system\n \"\"\"\n\n return self.extract(**kwargs)\n\n def bin_to_pixel(self, i_order=0, grid_pix=None, grid_f_k=None, convolved_spectrum=None,\n spectrum=None, bounds_error=False, throughput=None, **kwargs):\n \"\"\"\n Integrate the convolved_spectrum (f_k_c) over a pixel grid using the trapezoidal rule.\n The concoled spectrum (f_k_c) is interpolated using scipy.interpolate.interp1d and the\n kwargs and bounds_error are passed to interp1d.\n i_order: int, optional\n index of the order to be integrated, default is 0, so\n the first order specified.\n grid_pix: tuple of two 1d-arrays or 1d-array\n If a tuple of 2 arrays is given, assume it is the lower and upper\n integration ranges. If 1d-array, assume it is the center\n of the pixels. If not given, the wavelength map and the psf map\n of `i_order` will be used to compute a pixel grid.\n grid_f_k: 1d array, optional\n grid on which the convolved flux is projected.\n Default is the wavelength grid for `i_order`.\n convolved_spectrum (f_k_c): 1d array, optional\n Convolved flux to be integrated. If not given, `spectrum`\n will be used (and convolved to `i_order` resolution)\n spectrum (f_k): 1d array, optional\n non-convolved flux (result of the `extract` method).\n Not used if `convolved_spectrum` is specified.\n bounds_error and kwargs:\n passed to interp1d function to interpolate the convolved_spectrum.\n throughput: callable, optional\n Spectral throughput for a given order (ì_ord).\n Default is given by the list of throughput saved as\n the attribute `t_list`.\n \"\"\"\n # Take the value from the order if not given...\n\n # ... for the flux grid ...\n if grid_f_k is None:\n grid_f_k = self.wave_grid_c(i_order)\n\n # ... for the convolved flux ...\n if convolved_spectrum is None:\n # Use the spectrum (f_k) if the convolved_spectrum (f_k_c) not given.\n if spectrum is None:\n raise ValueError(\"`spectrum` or `convolved_spectrum` must be specified.\")\n else:\n # Convolve the spectrum (f_k).\n convolved_spectrum = self.kernels[i_order].dot(spectrum)\n\n # ... and for the pixel bins\n if grid_pix is None:\n pix_center, _ = self.grid_from_map(i_order)\n\n # Get pixels borders (plus and minus)\n pix_p, pix_m = engine_utils.get_wave_p_or_m(pix_center)\n\n else: # Else, unpack grid_pix\n\n # Could be a scalar or a 2-elements object)\n if len(grid_pix) == 2:\n\n # 2-elements object, so we have the borders\n pix_m, pix_p = grid_pix\n\n # Need to compute pixel center\n d_pix = (pix_p - pix_m)\n pix_center = grid_pix[0] + d_pix\n else:\n\n # 1-element object, so we have the pix centers\n pix_center = grid_pix\n\n # Need to compute the borders\n pix_p, pix_m = engine_utils.get_wave_p_or_m(pix_center)\n\n # Set the throughput to object attribute\n # if not given\n if throughput is None:\n\n # Need to interpolate\n x, y = self.wave_grid, self.throughput[i_order]\n throughput = interp1d(x, y)\n\n # Apply throughput on flux\n convolved_spectrum = convolved_spectrum * throughput(grid_f_k)\n\n # Interpolate\n kwargs['bounds_error'] = bounds_error\n fct_f_k = interp1d(grid_f_k, convolved_spectrum, **kwargs)\n\n # Intergrate over each bins\n bin_val = []\n for x1, x2 in zip(pix_m, pix_p):\n\n # Grid points that fall inside the pixel range\n i_grid = (x1 < grid_f_k) & (grid_f_k < x2)\n x_grid = grid_f_k[i_grid]\n\n # Add boundaries values to the integration grid\n x_grid = np.concatenate([[x1], x_grid, [x2]])\n\n # Integrate\n integrand = fct_f_k(x_grid) * x_grid\n bin_val.append(np.trapz(integrand, x_grid))\n\n # Convert to array and return with the pixel centers.\n return pix_center, np.array(bin_val)\n\n @staticmethod\n def _check_plot_inputs(fig, ax):\n \"\"\"Method to manage inputs for plots methods.\"\"\"\n\n # Use ax or fig if given. Else, init the figure\n if (fig is None) and (ax is None):\n fig, ax = plt.subplots(1, 1, sharex=True)\n elif ax is None:\n ax = fig.subplots(1, 1, sharex=True)\n\n return fig, ax\n\n def plot_tikho_factors(self):\n \"\"\"Plot results of tikhonov tests.\n\n Returns\n ------\n figure and axes (for plot)\n \"\"\"\n\n # Use tikhonov extraction from object\n tikho = self.tikho\n\n # Init figure\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(8, 6))\n\n # logl plot\n tikho.error_plot(ax=ax[0], test_key='-logl')\n\n # Error plot\n tikho.error_plot(ax=ax[1])\n\n # Labels\n ax[0].set_ylabel(r'$\\log{L}$ on detector')\n\n # Other details\n fig.tight_layout()\n\n return fig, ax\n\n def plot_sln(self, spectrum, fig=None, ax=None, i_order=0,\n ylabel='Flux', xlabel=r'Wavelength [$\\mu$m]', **kwargs):\n \"\"\"Plot extracted spectrum\n\n Parameters\n ----------\n spectrum (f_k): array-like\n Flux projected on the wavelength grid\n fig: matplotlib figure, optional\n Figure to use for plot\n If not given and ax is None, new figure is initiated\n ax: matplotlib axis, optional\n axis to use for plot. If not given, a new axis is initiated.\n i_order: int, optional\n index of the order to plot.\n Default is 0 (so the first order given).\n ylabel: str, optional\n Label of y axis\n xlabel: str, optional\n Label of x axis\n kwargs:\n other kwargs to be passed to plt.plot\n\n Returns\n ------\n fig, ax\n \"\"\"\n\n # Manage method's inputs\n fig, ax = self._check_plot_inputs(fig, ax)\n\n # Set values to plot\n x = self.wave_grid_c(i_order)\n y = self.kernels[i_order].dot(spectrum)\n\n # Plot\n ax.plot(x, y, **kwargs)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return fig, ax\n\n def plot_err(self, spectrum, f_th_ord, fig=None, ax=None,\n i_order=0, error='relative', ylabel='Error',\n xlabel=r'Wavelength [$\\mu$m]', **kwargs):\n \"\"\"Plot error on extracted spectrum\n\n Parameters\n ----------\n spectrum (f_k): array-like\n Flux projected on the wavelength grid\n f_th_ord: array-like\n Injected flux projected on the wavelength grid\n and convolved at `i_order` resolution\n fig: matplotlib figure, optional\n Figure to use for plot\n If not given and ax is None, new figure is initiated\n ax: matplotlib axis, optional\n axis to use for plot. If not given, a new axis is initiated.\n i_order: int, optional\n index of the order to plot. Default is 0 (so the first order given)\n error: str, optional\n Which type of error to plot.\n Possibilities: 'relative', 'absolute', 'to_noise'\n Default is 'relative'. To noise is the error relative\n to the expected Poisson noise error\n ylabel: str, optional\n Label of y axis\n xlabel: str, optional\n Label of x axis\n kwargs:\n other kwargs to be passed to plt.plot\n\n Returns\n ------\n fig, ax\n \"\"\"\n\n # Manage method's inputs\n fig, ax = self._check_plot_inputs(fig, ax)\n\n # Set values to plot\n x = self.wave_grid_c(i_order)\n convolved_spectrum = self.kernels[i_order].dot(spectrum)\n\n if error == 'relative':\n y = (convolved_spectrum - f_th_ord) / f_th_ord\n elif error == 'absolute':\n y = convolved_spectrum - f_th_ord\n elif error == 'to_noise':\n y = (convolved_spectrum - f_th_ord) / np.sqrt(f_th_ord)\n else:\n raise ValueError('`error` argument is not valid.')\n\n # Add info to ylabel\n ylabel += ' ({})'.format(error)\n\n # Plot\n ax.plot(x, y, **kwargs)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return fig, ax\n\n\nclass ExtractionEngine(_BaseOverlap): # TODO Merge with _BaseOverlap?\n \"\"\"\n Version of overlaping extraction with oversampled trapezoidal integration\n overlaping extraction solve the equation of the form:\n (B_T * B) * f = (data/sig)_T * B\n where B is a matrix and f is an array.\n The matrix multiplication B * f is the 2d model of the detector.\n We want to solve for the array f.\n The elements of f are labelled by 'k'.\n The pixels are labeled by 'i'.\n Every pixel 'i' is covered by a set of 'k' for each order\n of diffraction.\n \"\"\"\n\n def __init__(self, wave_map, aperture, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n aperture : (N_ord, N, M) list or array of 2-D arrays\n A list or array of the spatial profile for each order\n on the detector. It has to have the same (N, M) as `data`.\n wave_map : (N_ord, N, M) list or array of 2-D arrays\n A list or array of the central wavelength position for each\n order on the detector.\n It has to have the same (N, M) as `data`.\n throughput : (N_ord [, N_k]) list of array or callable\n A list of functions or array of the throughput at each order.\n If callable, the functions depend on the wavelength.\n If array, projected on `wave_grid`.\n kernels : array, callable or sparse matrix\n Convolution kernel to be applied on spectrum (f_k) for each orders.\n Can be array of the shape (N_ker, N_k_c).\n Can be a callable with the form f(x, x0) where x0 is\n the position of the center of the kernel. In this case, it must\n return a 1D array (len(x)), so a kernel value\n for each pairs of (x, x0). If array or callable,\n it will be passed to `convolution.get_c_matrix` function\n and the `c_kwargs` can be passed to this function.\n If sparse, the shape has to be (N_k_c, N_k) and it will\n be used directly. N_ker is the length of the effective kernel\n and N_k_c is the length of the spectrum (f_k) convolved.\n data : (N, M) array_like, optional\n A 2-D array of real values representing the detector image.\n error : (N, M) array_like, optional\n Estimate of the error on each pixel. Default is one everywhere.\n mask : (N, M) array_like boolean, optional\n Boolean Mask of the bad pixels on the detector.\n orders: list, optional:\n List of orders considered. Default is orders = [1, 2]\n wave_grid : (N_k) array_like, optional\n The grid on which f(lambda) will be projected.\n Default still has to be improved.\n wave_bounds : list or array-like (N_ord, 2), optional\n Boundary wavelengths covered by each orders.\n Default is the wavelength covered by `wave_map`.\n tresh : float, optional:\n The pixels where the estimated spatial profile is less than\n this value will be masked. Default is 1e-5.\n c_kwargs : list of N_ord dictionnaries or dictionnary, optional\n Inputs keywords arguments to pass to\n `convolution.get_c_matrix` function for each orders.\n If dictionnary, the same c_kwargs will be used for each orders.\n verbose : bool, optional\n Print steps. Default is False.\n \"\"\"\n\n # Get wavelength at the boundary of each pixel\n # TODO Could also be an input??\n wave_p, wave_m = [], []\n for wave in wave_map: # For each order\n lp, lm = engine_utils.get_wave_p_or_m(wave) # Lambda plus or minus\n wave_p.append(lp), wave_m.append(lm)\n\n self.wave_p, self.wave_m = wave_p, wave_m # Save values\n\n # Init upper class\n super().__init__(wave_map, aperture, *args, **kwargs)\n\n def _get_lo_hi(self, grid, i_order):\n \"\"\"\n Find the lowest (lo) and highest (hi) index\n of wave_grid for each pixels and orders.\n\n Returns:\n -------\n 1d array of the lowest and 1d array of the highest index.\n the length is the number of non-masked pixels\n \"\"\"\n\n self.verbose_print('Compute low high')\n\n # Get needed attributes\n mask = self.mask\n\n # ... order dependent attributes\n attrs = ['wave_p', 'wave_m', 'mask_ord']\n wave_p, wave_m, mask_ord = self.get_attributes(*attrs, i_order=i_order)\n\n # Compute only for valid pixels\n wave_p = wave_p[~mask]\n wave_m = wave_m[~mask]\n\n # Find lower (lo) index in the pixel\n lo = np.searchsorted(grid, wave_m, side='right')\n\n # Find higher (hi) index in the pixel\n hi = np.searchsorted(grid, wave_p) - 1\n\n # Set invalid pixels for this order to lo=-1 and hi=-2\n ma = mask_ord[~mask]\n lo[ma], hi[ma] = -1, -2\n\n self.verbose_print('Done')\n\n return lo, hi\n\n def get_mask_wave(self, i_order):\n \"\"\" Mask according to wavelength grid \"\"\"\n\n attrs = ['wave_p', 'wave_m', 'i_bounds']\n wave_p, wave_m, i_bnds = self.get_attributes(*attrs, i_order=i_order)\n wave_min = self.wave_grid[i_bnds[0]]\n wave_max = self.wave_grid[i_bnds[1]-1]\n\n mask = (wave_m < wave_min) | (wave_p > wave_max)\n\n return mask\n\n def get_w(self, i_order):\n \"\"\"\n Compute integration weights for each grid points and each pixels.\n Depends on the order `n`.\n\n Returns\n ------\n w_n: 2d array\n weights at this specific order `n`. The shape is given by:\n (number of pixels, max number of wavelenghts covered by a pixel)\n k_n: 2d array\n index of the wavelength grid corresponding to the weights.\n Same shape as w_n\n \"\"\"\n\n self.verbose_print('Compute weigths and k')\n\n # Get needed attributes\n wave_grid, mask = self.get_attributes('wave_grid', 'mask')\n\n # ... order dependent attributes\n attrs = ['wave_p', 'wave_m', 'mask_ord', 'i_bounds']\n wave_p, wave_m, mask_ord, i_bnds = self.get_attributes(*attrs, i_order=i_order)\n\n # Use the convolved grid (depends on the order)\n wave_grid = wave_grid[i_bnds[0]:i_bnds[1]]\n\n # Compute the wavelength coverage of the grid\n d_grid = np.diff(wave_grid)\n\n # Get lo hi\n lo, hi = self._get_lo_hi(wave_grid, i_order) # Get indexes\n\n # Compute only valid pixels\n wave_p, wave_m = wave_p[~mask], wave_m[~mask]\n ma = mask_ord[~mask]\n\n # Number of used pixels\n n_i = len(lo)\n i = np.arange(n_i)\n\n self.verbose_print('Compute k')\n\n # Define fisrt and last index of wave_grid\n # for each pixel\n k_first, k_last = -1*np.ones(n_i), -1*np.ones(n_i)\n\n # If lowest value close enough to the exact grid value,\n # NOTE: Could be approximately equal to the exact grid\n # value. It would look like that.\n # >>> lo_dgrid = lo\n # >>> lo_dgrid[lo_dgrid==len(d_grid)] = len(d_grid) - 1\n # >>> cond = (grid[lo]-wave_m)/d_grid[lo_dgrid] <= 1.0e-8\n # But let's stick with the exactly equal\n cond = (wave_grid[lo] == wave_m)\n\n # special case (no need for lo_i - 1)\n k_first[cond & ~ma] = lo[cond & ~ma]\n wave_m[cond & ~ma] = wave_grid[lo[cond & ~ma]]\n\n # else, need lo_i - 1\n k_first[~cond & ~ma] = lo[~cond & ~ma] - 1\n\n # Same situation for highest value. If we follow the note\n # above (~=), the code could look like\n # >>> cond = (wave_p-grid[hi])/d_grid[hi-1] <= 1.0e-8\n # But let's stick with the exactly equal\n cond = (wave_p == wave_grid[hi])\n\n # special case (no need for hi_i - 1)\n k_last[cond & ~ma] = hi[cond & ~ma]\n wave_p[cond & ~ma] = wave_grid[hi[cond & ~ma]]\n\n # else, need hi_i + 1\n k_last[~cond & ~ma] = hi[~cond & ~ma] + 1\n\n # Generate array of all k_i. Set to -1 if not valid\n k_n, bad = engine_utils.arange_2d(k_first, k_last + 1, dtype=int)\n k_n[bad] = -1\n\n # Number of valid k per pixel\n n_k = np.sum(~bad, axis=-1)\n\n # Compute array of all w_i. Set to np.nan if not valid\n # Initialize\n w_n = np.zeros(k_n.shape, dtype=float)\n ####################\n ####################\n # 4 different cases\n ####################\n ####################\n\n self.verbose_print('compute w')\n\n # Valid for every cases\n w_n[:, 0] = wave_grid[k_n[:, 1]] - wave_m\n w_n[i, n_k-1] = wave_p - wave_grid[k_n[i, n_k-2]]\n\n ##################\n # Case 1, n_k == 2\n ##################\n case = (n_k == 2) & ~ma\n if case.any():\n\n self.verbose_print('n_k = 2')\n\n # if k_i[0] != lo_i\n cond = case & (k_n[:, 0] != lo)\n w_n[cond, 1] += wave_m[cond] - wave_grid[k_n[cond, 0]]\n\n # if k_i[-1] != hi_i\n cond = case & (k_n[:, 1] != hi)\n w_n[cond, 0] += wave_grid[k_n[cond, 1]] - wave_p[cond]\n\n # Finally\n part1 = (wave_p[case] - wave_m[case])\n part2 = d_grid[k_n[case, 0]]\n w_n[case, :] *= (part1 / part2)[:, None]\n\n ##################\n # Case 2, n_k >= 3\n ##################\n case = (n_k >= 3) & ~ma\n if case.any():\n\n self.verbose_print('n_k = 3')\n n_ki = n_k[case]\n w_n[case, 1] = wave_grid[k_n[case, 1]] - wave_m[case]\n w_n[case, n_ki-2] += wave_p[case] - wave_grid[k_n[case, n_ki-2]]\n\n # if k_i[0] != lo_i\n cond = case & (k_n[:, 0] != lo)\n nume1 = wave_grid[k_n[cond, 1]] - wave_m[cond]\n nume2 = wave_m[cond] - wave_grid[k_n[cond, 0]]\n deno = d_grid[k_n[cond, 0]]\n w_n[cond, 0] *= (nume1 / deno)\n w_n[cond, 1] += (nume1 * nume2 / deno)\n\n # if k_i[-1] != hi_i\n cond = case & (k_n[i, n_k-1] != hi)\n n_ki = n_k[cond]\n nume1 = wave_p[cond] - wave_grid[k_n[cond, n_ki-2]]\n nume2 = wave_grid[k_n[cond, n_ki-1]] - wave_p[cond]\n deno = d_grid[k_n[cond, n_ki-2]]\n w_n[cond, n_ki-1] *= (nume1 / deno)\n w_n[cond, n_ki-2] += (nume1 * nume2 / deno)\n\n ##################\n # Case 3, n_k >= 4\n ##################\n case = (n_k >= 4) & ~ma\n if case.any():\n self.verbose_print('n_k = 4')\n n_ki = n_k[case]\n w_n[case, 1] += wave_grid[k_n[case, 2]] - wave_grid[k_n[case, 1]]\n w_n[case, n_ki-2] += (wave_grid[k_n[case, n_ki-2]]\n - wave_grid[k_n[case, n_ki-3]])\n\n ##################\n # Case 4, n_k > 4\n ##################\n case = (n_k > 4) & ~ma\n if case.any():\n self.verbose_print('n_k > 4')\n i_k = np.indices(k_n.shape)[-1]\n cond = case[:, None] & (2 <= i_k) & (i_k < n_k[:, None]-2)\n ind1, ind2 = np.where(cond)\n w_n[ind1, ind2] = (d_grid[k_n[ind1, ind2]-1]\n + d_grid[k_n[ind1, ind2]])\n\n # Finally, divide w_n by 2\n w_n /= 2.\n\n # Make sure invalid values are masked\n w_n[k_n < 0] = np.nan\n\n self.verbose_print('Done')\n\n return w_n, k_n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# General imports.\nimport numpy as np\nfrom scipy.interpolate import interp1d, RectBivariateSpline\n\n# Astronoomy imports.\nfrom astropy.io import fits\n\n# Plotting.\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\n\n###############################################\n# Hack to get the path of module. To be changed.\nfrom os.path import abspath, dirname\n\n\ndef get_module_path(file):\n\n dir_path = abspath(file)\n dir_path = dirname(dir_path) + '/'\n\n return dir_path\n###############################################\n\n\n# Default file parameters\nDEF_PATH = get_module_path(__file__) + \"Ref_files/\"\n\nFILE_SOSS = \"NIRISS_Throughput_STScI.fits\"\nDEF_FILE_FRAME = \"spectral_kernel_matrix/spectral_kernel_matrix_os_{}_width_{}pixels.fits\"\n\n\nclass ThroughputSOSS(interp1d):\n \"\"\"\n Callable Throughput of SOSS mode for a given order.\n Function oof wavelength in microns.\n \"\"\"\n path = DEF_PATH\n filename = FILE_SOSS\n\n def __init__(self, order=1):\n \"\"\"\n Parameter:\n order: int\n which order do you want? Default is the first order (1)\n \"\"\"\n # Open file\n hdu = fits.open(self.path + self.filename)\n\n # Get transmission\n key = 'SOSS_order{}'.format(order)\n tr = hdu[1].data[key].squeeze()\n\n # Get wavelength\n wv = hdu[1].data['LAMBDA'].squeeze()\n # nm to microns\n wv /= 1000.\n\n # Interpolate\n super().__init__(wv, tr, kind='cubic',\n fill_value=0, bounds_error=False)\n\n\nclass WebbKer:\n \"\"\"\n Class to load Webb convolution kernel. Once instanciated,\n the object act as a callable (function)\n of wavelength and center wavelength.\n It is also possible to have a look at the kernels with\n the `show` method.\n \"\"\"\n path = DEF_PATH\n file_frame = DEF_FILE_FRAME\n\n def __init__(self, wave_map, n_os=10, n_pix=21,\n bounds_error=False, fill_value=\"extrapolate\"):\n \"\"\"\n Parameters\n ----------\n wave_map: 2d array\n Wavelength map of the detector. Since WebbPSF returns\n kernels in the pixel space, we need a wv_map to convert\n to wavelength space.\n n_os: int, optional\n oversampling of the kernel. Default is 10\n n_pix: int, optional\n Length of the kernel in pixels. Default is 21.\n bounds_error: bool, optional\n If True, raise an error when trying to call the\n function out of the interpolation range. If False,\n the values will be extrapolated. Default is False\n fill_value: str, opotional\n How to extrapolate when needed. Default is \"extrapolate\"\n and it is the oonly option so far. There is the\n possibility to implement other ways like in\n scipy.interp1d, but it is not done yet.\n \"\"\"\n\n # Mask where wv_map is equal to 0\n wave_map = np.ma.array(wave_map, mask=(wave_map == 0))\n\n # Force wv_map to have the red wavelengths\n # at the end of the detector\n if np.diff(wave_map, axis=-1).mean() < 0:\n wave_map = np.flip(wave_map, axis=-1)\n\n # Number of columns\n ncols = wave_map.shape[-1]\n\n # Create filename # TODO change to CRDS/manual input.\n file = self.file_frame.format(n_os, n_pix)\n\n # Read file\n hdu = fits.open(self.path + file)\n header = hdu[0].header\n kernel, wave_kernel = hdu[0].data\n\n # Where is the blue and red end of the kernel\n i_blue, i_red = header[\"BLUINDEX\"], header[\"REDINDEX\"]\n\n # Flip `ker` to put the red part of the kernel at the end.\n if i_blue > i_red:\n kernel = np.flip(kernel, axis=0)\n\n # Create oversampled pixel position array # TODO easier to read form?\n pixels = np.arange(-(n_pix//2), n_pix//2 + 1/n_os, 1/n_os)\n\n # `wave_kernel` has only the value of the central wavelength\n # of the kernel at each points because it's a function\n # of the pixels (so depends on wv solution).\n wave_center = wave_kernel[0, :]\n\n # Use the wavelength solution to create a mapping.\n # First find the kernels that fall on the detector.\n wave_min = np.amin(wave_map[wave_map > 0])\n wave_max = np.amax(wave_map[wave_map > 0])\n i_min = np.searchsorted(wave_center, wave_min) # TODO searchsorted has offsets?\n i_max = np.searchsorted(wave_center, wave_max) - 1\n\n # SAVE FOR LATER ###########\n # Use the next kernels at each extremities to define the\n # boundaries of the interpolation to use in the class\n # RectBivariateSpline (at the end)\n # bbox = [min pixel, max pixel, min wv_center, max wv_center]\n bbox = [None, None,\n wave_center[np.maximum(i_min-1, 0)],\n wave_center[np.minimum(i_max+1, len(wave_center)-1)]]\n #######################\n\n # Keep only kernels that fall on the detector.\n kernel, wave_kernel = kernel[:, i_min:i_max+1], wave_kernel[:, i_min:i_max+1]\n wave_center = np.array(wave_kernel[0, :])\n\n # Then find the pixel closest to each kernel center\n # and use the surrounding pixels (columns)\n # to get the wavelength. At the boundaries,\n # wavelenght might not be defined or falls out of\n # the detector, so fit a 1-order polynomial to\n # extrapolate. The polynomial is also used to interpolate\n # for oversampling.\n i_surround = np.arange(-(n_pix//2), n_pix//2 + 1)\n poly = []\n for i_cen, wv_c in enumerate(wave_center):\n wv = np.ma.masked_all(i_surround.shape)\n\n # Closest pixel wv\n i_row, i_col = np.unravel_index(\n np.argmin(np.abs(wave_map - wv_c)), wave_map.shape\n )\n # Update wavelength center value\n # (take the nearest pixel center value)\n wave_center[i_cen] = wave_map[i_row, i_col]\n\n # Surrounding columns\n index = i_col + i_surround\n\n # Make sure it's on the detector\n i_good = (index >= 0) & (index < ncols)\n\n # Assign wv values\n wv[i_good] = wave_map[i_row, index[i_good]]\n\n # Fit n=1 polynomial\n poly_i = np.polyfit(i_surround[~wv.mask], wv[~wv.mask], 1)\n\n # Project on os pixel grid\n wave_kernel[:, i_cen] = np.poly1d(poly_i)(pixels)\n\n # Save coeffs\n poly.append(poly_i)\n\n # Save attributes\n self.n_pix = n_pix\n self.n_os = n_os\n self.wave_kernel = wave_kernel\n self.kernel = kernel\n self.pixels = pixels\n self.wave_center = wave_center\n self.poly = np.array(poly)\n self.fill_value = fill_value\n self.bounds_error = bounds_error\n\n # 2d Interpolate\n self.f_ker = RectBivariateSpline(pixels, wave_center, kernel, bbox=bbox)\n\n def __call__(self, wave, wave_c):\n \"\"\"\n Returns the kernel value, given the wavelength\n and the kernel center wavelength.\n\n Parameters\n ----------\n wave: 1d array\n wavelenght where the kernel is projected.\n wave_c: 1d array (same shape as `wv`)\n center wavelength of the kernel\n \"\"\"\n\n wave_center = self.wave_center\n poly = self.poly\n fill_value = self.fill_value\n bounds_error = self.bounds_error\n n_wv_c = len(wave_center)\n f_ker = self.f_ker\n n_pix = self.n_pix\n\n # #################################\n # First, convert wv value in pixels\n # using a linear interpolation\n # #################################\n\n # Find corresponding interval\n i_wv_c = np.searchsorted(wave_center, wave_c) - 1\n\n # Deal with values out of bounds\n if bounds_error:\n message = \"Value of wv center out of interpolation range\"\n raise ValueError(message)\n elif fill_value == \"extrapolate\":\n i_wv_c[i_wv_c < 0] = 0\n i_wv_c[i_wv_c >= (n_wv_c - 1)] = n_wv_c - 2\n else:\n message = \"`fill_value`={} is not an valid option.\"\n raise ValueError(message.format(fill_value))\n\n # Compute coefficients that interpolate along wv_centers\n d_wv_c = wave_center[i_wv_c + 1] - wave_center[i_wv_c]\n a_c = (wave_center[i_wv_c + 1] - wave_c) / d_wv_c\n b_c = (wave_c - wave_center[i_wv_c]) / d_wv_c\n\n # Compute a_pix and b_pix from the equation:\n # pix = a_pix * lambda + b_pix\n a_pix = 1 / (a_c * poly[i_wv_c, 0] + b_c * poly[i_wv_c+1, 0])\n b_pix = -(a_c * poly[i_wv_c, 1] + b_c * poly[i_wv_c+1, 1])\n b_pix /= (a_c * poly[i_wv_c, 0] + b_c * poly[i_wv_c+1, 0])\n\n # Compute pixel values\n pix = a_pix * wave + b_pix\n\n # ######################################\n # Second, compute kernel value on the\n # interpolation grid (pixel x wv_center)\n # ######################################\n\n out = f_ker(pix, wave_c, grid=False)\n # Make sure it's not negative\n out[out < 0] = 0\n\n # and put values out of pixel range\n # to zero\n out[pix > n_pix//2] = 0\n out[pix < -(n_pix//2)] = 0\n\n return out\n\n def show(self):\n \"\"\"\n Plot kernels.\n The first figure is a 2d image of the kernels.\n The second figure is a 1d image of the kernels\n in the wavelength space.\n \"\"\"\n\n # 2D figure of the kernels\n fig1 = plt.figure()\n\n # Log plot, so clip values <= 0\n image = np.clip(self.kernel, np.min(self.kernel[self.kernel > 0]), np.inf)\n\n # plot\n plt.pcolormesh(self.wave_center, self.pixels, image, norm=LogNorm())\n\n # Labels and others\n plt.colorbar(label=\"Kernel\")\n plt.ylabel(\"Position relative to center [pixel]\")\n plt.xlabel(r\"Center wavelength [$\\mu m$]\")\n plt.tight_layout()\n\n # 1D figure of all kernels\n fig2 = plt.figure()\n plt.plot(self.wave_kernel, self.kernel)\n\n # Labels and others\n plt.ylabel(\"Kernel\")\n plt.xlabel(r\"Wavelength [$\\mu m$]\")\n plt.tight_layout()\n\n return fig1, fig2\n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.linspace",
"numpy.all",
"numpy.concatenate",
"numpy.max",
"matplotlib.pyplot.plot",
"numpy.argmin",
"numpy.any",
"numpy.searchsorted",
"numpy.ma.array",
"numpy.where",
"numpy.trapz",
"matplotlib.pyplot.tight_layout",
"scipy.sparse.issparse",
"numpy.unique",
"scipy.sparse.linalg.spsolve",
"numpy.arange",
"numpy.clip",
"scipy.interpolate.Akima1DInterpolator",
"scipy.optimize.minimize_scalar",
"scipy.sparse.diags",
"numpy.full",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.nansum",
"numpy.zeros",
"numpy.nonzero",
"numpy.min",
"scipy.sparse.csr_matrix",
"numpy.log10",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.maximum",
"matplotlib.pyplot.subplots",
"numpy.indices",
"numpy.ones",
"matplotlib.pyplot.xlabel"
],
[
"numpy.amax",
"numpy.polyfit",
"numpy.poly1d",
"matplotlib.pyplot.plot",
"numpy.searchsorted",
"numpy.ma.array",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.diff",
"matplotlib.pyplot.figure",
"scipy.interpolate.RectBivariateSpline",
"numpy.min",
"numpy.amin",
"numpy.ma.masked_all",
"numpy.array",
"numpy.flip",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.LogNorm",
"numpy.maximum",
"numpy.abs",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
nevinkjohn/luminol | [
"42e4ab969b774ff98f902d064cb041556017f635"
] | [
"src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py"
] | [
"# coding=utf-8\n\"\"\"\n© 2015 LinkedIn Corp. All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\"\"\"\nimport numpy\n\nfrom luminol import utils\nfrom luminol.algorithms.anomaly_detector_algorithms import AnomalyDetectorAlgorithm\nfrom luminol.modules.time_series import TimeSeries\nfrom luminol.constants import (DEFAULT_EMA_SMOOTHING_FACTOR,\n DEFAULT_EMA_WINDOW_SIZE_PCT)\n\n\nclass ExpAvgDetector(AnomalyDetectorAlgorithm):\n\n \"\"\"\n Exponential Moving Average.\n This method uses a data point's deviation from the exponential moving average of a lagging window\n to determine its anomaly score.\n \"\"\"\n def __init__(self, time_series, baseline_time_series=None, smoothing_factor=0, use_lag_window=False, lag_window_size=None):\n \"\"\"\n Initializer\n :param TimeSeries time_series: a TimeSeries object.\n :param TimeSeries baseline_time_series: baseline TimeSeries.\n :param float smoothing_factor: smoothing factor for computing exponential moving average.\n :param int lag_window_size: lagging window size.\n \"\"\"\n super(ExpAvgDetector, self).__init__(self.__class__.__name__, time_series, baseline_time_series)\n self.use_lag_window = use_lag_window\n self.smoothing_factor = smoothing_factor if smoothing_factor > 0 else DEFAULT_EMA_SMOOTHING_FACTOR\n self.lag_window_size = lag_window_size if lag_window_size else int(self.time_series_length * DEFAULT_EMA_WINDOW_SIZE_PCT)\n self.time_series_items = self.time_series.items()\n\n def _compute_anom_score(self, lag_window_points, point):\n \"\"\"\n Compute anomaly score for a single data point.\n Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)).\n :param list lag_window_points: values in the lagging window.\n :param float point: data point value.\n :return float: the anomaly score.\n \"\"\"\n ema = utils.compute_ema(self.smoothing_factor, lag_window_points)[-1]\n return abs(point - ema)\n\n def _compute_anom_data_using_window(self):\n \"\"\"\n Compute anomaly scores using a lagging window.\n \"\"\"\n anom_scores = {}\n values = self.time_series.values\n stdev = numpy.std(values)\n for i, (timestamp, value) in enumerate(self.time_series_items):\n if i < self.lag_window_size:\n anom_score = self._compute_anom_score(values[:i + 1], value)\n else:\n anom_score = self._compute_anom_score(values[i - self.lag_window_size: i + 1], value)\n if stdev:\n anom_scores[timestamp] = anom_score / stdev\n else:\n anom_scores[timestamp] = anom_score\n self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))\n\n def _compute_anom_data_decay_all(self):\n \"\"\"\n Compute anomaly scores using a lagging window covering all the data points before.\n \"\"\"\n anom_scores = {}\n values = self.time_series.values\n ema = utils.compute_ema(self.smoothing_factor, values)\n stdev = numpy.std(values)\n for i, (timestamp, value) in enumerate(self.time_series_items):\n anom_score = abs((value - ema[i]) / stdev) if stdev else value - ema[i]\n anom_scores[timestamp] = anom_score\n self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))\n\n def _set_scores(self):\n \"\"\"\n Compute anomaly scores for the time series.\n Currently uses a lagging window covering all the data points before.\n \"\"\"\n if self.use_lag_window:\n self._compute_anom_data_using_window()\n self._compute_anom_data_decay_all()\n"
] | [
[
"numpy.std"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akilasadhish/Remote-sensing-scene-classification | [
"18c27648553f0db8c67c7df58b851aa27a4b4942"
] | [
"classification.py"
] | [
"import pandas as pd \r\nimport seaborn as sn\r\nimport numpy as np \r\nimport os\r\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\r\nfrom matplotlib import pyplot as plt\r\n#csv_name:using predict.py and the csv file will be generated.\r\n#labels:A list. The list includes all the category names of the dataset being tested.\r\n#csv_name2:using train.py and the csv file will be gernerated.\r\ndef acc_score(csv_name):\r\n r_c = pd.read_csv('./result_show/' + csv_name)\r\n true_labels = r_c['true_labels']\r\n pred_labels = r_c['pred_labels']\r\n acc = accuracy_score(true_labels, pred_labels)\r\n return acc\r\n\r\ndef report(csv_name, labels):\r\n r_c = pd.read_csv('./result_show/' + csv_name)\r\n true_labels = r_c['true_labels']\r\n pred_labels = r_c['pred_labels']\r\n r = classification_report(true_labels, pred_labels, digits=4, target_names=labels)\r\n return r\r\n\r\ndef matrix(csv_name, labels):\r\n r_c = pd.read_csv('./result_show/' + csv_name)\r\n true_labels = r_c['true_labels']\r\n pred_labels = r_c['pred_labels']\r\n mat = confusion_matrix(true_labels, pred_labels)\r\n mat_2 = np.ndarray((len(labels), len(labels)))\r\n names = []\r\n for n in range(1, len(labels)+1):\r\n name = str(n) + '#'\r\n names.append(name)\r\n\r\n for i in range(len(labels)):\r\n for k in range(len(labels)):\r\n mat_2[i][k] = mat[i][k] / np.sum(mat[i])\r\n\r\n mat_2 = np.round(mat_2, decimals=2)\r\n sn.heatmap(mat_2, annot=True, fmt='.2f', cmap='gray_r', xticklabels=names, yticklabels=labels,\r\n mask=mat_2<0.001, annot_kws={'size':8})\r\n plt.yticks(rotation=360)\r\n plt.show()\r\n\r\ndef plt_acc(csv_name2):\r\n r_c = pd.read_csv(csv_name2)\r\n acc = r_c['acc']\r\n val_acc = r_c['val_acc']\r\n epochs = range(1, len(acc) + 1)\r\n plt.plot(epochs, acc, 'blue', label='train_acc', marker='', linestyle='-')\r\n plt.plot(epochs, val_acc, 'red', label='test_acc', marker='.', linestyle='-')\r\n plt.title('Train and Test Accuracy')\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n\r\ndef plt_loss(csv_name2):\r\n r_c = pd.read_csv(csv_name2)\r\n loss = r_c['loss']\r\n val_loss = r_c['val_loss']\r\n epochs = range(1, len(loss) + 1)\r\n plt.plot(epochs, loss, 'blue', label='train_loss', marker='', linestyle='-')\r\n plt.plot(epochs, val_loss, 'red', label='test_loss', marker='.', linestyle='-')\r\n plt.title('Train and Test Loss')\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n\r\n\r\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"sklearn.metrics.confusion_matrix",
"numpy.round",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"sklearn.metrics.classification_report",
"numpy.sum",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tianjuchen/pyro | [
"d5b0545c4f992d435692080db6969314a2c32f05",
"d5b0545c4f992d435692080db6969314a2c32f05",
"d5b0545c4f992d435692080db6969314a2c32f05",
"d5b0545c4f992d435692080db6969314a2c32f05"
] | [
"tests/distributions/test_zero_inflated.py",
"tests/contrib/gp/test_parameterized.py",
"examples/vae/ss_vae_M2.py",
"tests/infer/reparam/test_unit_jacobian.py"
] | [
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nimport pytest\nimport torch\n\nfrom pyro.distributions import (\n Delta,\n NegativeBinomial,\n Normal,\n Poisson,\n ZeroInflatedDistribution,\n ZeroInflatedNegativeBinomial,\n ZeroInflatedPoisson,\n)\nfrom pyro.distributions.util import broadcast_shape\nfrom tests.common import assert_close\n\n\[email protected](\"gate_shape\", [(), (2,), (3, 1), (3, 2)])\[email protected](\"base_shape\", [(), (2,), (3, 1), (3, 2)])\ndef test_zid_shape(gate_shape, base_shape):\n gate = torch.rand(gate_shape)\n base_dist = Normal(torch.randn(base_shape), torch.randn(base_shape).exp())\n\n d = ZeroInflatedDistribution(base_dist, gate=gate)\n assert d.batch_shape == broadcast_shape(gate_shape, base_shape)\n assert d.support == base_dist.support\n\n d2 = d.expand([4, 3, 2])\n assert d2.batch_shape == (4, 3, 2)\n\n\[email protected](\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_zip_0_gate(rate):\n # if gate is 0 ZIP is Poisson\n zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.zeros(1))\n zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(-99.9))\n pois = Poisson(torch.tensor(rate))\n s = pois.sample((20,))\n zip1_prob = zip1.log_prob(s)\n zip2_prob = zip2.log_prob(s)\n pois_prob = pois.log_prob(s)\n assert_close(zip1_prob, pois_prob)\n assert_close(zip2_prob, pois_prob)\n\n\[email protected](\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_zip_1_gate(rate):\n # if gate is 1 ZIP is Delta(0)\n zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.ones(1))\n zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(math.inf))\n delta = Delta(torch.zeros(1))\n s = torch.tensor([0.0, 1.0])\n zip1_prob = zip1.log_prob(s)\n zip2_prob = zip2.log_prob(s)\n delta_prob = delta.log_prob(s)\n assert_close(zip1_prob, delta_prob)\n assert_close(zip2_prob, delta_prob)\n\n\[email protected](\"gate\", [0.0, 0.25, 0.5, 0.75, 1.0])\[email protected](\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_zip_mean_variance(gate, rate):\n num_samples = 1000000\n zip_ = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.tensor(gate))\n s = zip_.sample((num_samples,))\n expected_mean = zip_.mean\n estimated_mean = s.mean()\n expected_std = zip_.stddev\n estimated_std = s.std()\n assert_close(expected_mean, estimated_mean, atol=1e-02)\n assert_close(expected_std, estimated_std, atol=1e-02)\n\n\[email protected](\"total_count\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\[email protected](\"probs\", [0.1, 0.5, 0.9])\ndef test_zinb_0_gate(total_count, probs):\n # if gate is 0 ZINB is NegativeBinomial\n zinb1 = ZeroInflatedNegativeBinomial(\n total_count=torch.tensor(total_count),\n gate=torch.zeros(1),\n probs=torch.tensor(probs),\n )\n zinb2 = ZeroInflatedNegativeBinomial(\n total_count=torch.tensor(total_count),\n gate_logits=torch.tensor(-99.9),\n probs=torch.tensor(probs),\n )\n neg_bin = NegativeBinomial(torch.tensor(total_count), probs=torch.tensor(probs))\n s = neg_bin.sample((20,))\n zinb1_prob = zinb1.log_prob(s)\n zinb2_prob = zinb2.log_prob(s)\n neg_bin_prob = neg_bin.log_prob(s)\n assert_close(zinb1_prob, neg_bin_prob)\n assert_close(zinb2_prob, neg_bin_prob)\n\n\[email protected](\"total_count\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\[email protected](\"probs\", [0.1, 0.5, 0.9])\ndef test_zinb_1_gate(total_count, probs):\n # if gate is 1 ZINB is Delta(0)\n zinb1 = ZeroInflatedNegativeBinomial(\n total_count=torch.tensor(total_count),\n gate=torch.ones(1),\n probs=torch.tensor(probs),\n )\n zinb2 = ZeroInflatedNegativeBinomial(\n total_count=torch.tensor(total_count),\n gate_logits=torch.tensor(math.inf),\n probs=torch.tensor(probs),\n )\n delta = Delta(torch.zeros(1))\n s = torch.tensor([0.0, 1.0])\n zinb1_prob = zinb1.log_prob(s)\n zinb2_prob = zinb2.log_prob(s)\n delta_prob = delta.log_prob(s)\n assert_close(zinb1_prob, delta_prob)\n assert_close(zinb2_prob, delta_prob)\n\n\[email protected](\"gate\", [0.0, 0.25, 0.5, 0.75, 1.0])\[email protected](\"total_count\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\[email protected](\"logits\", [-0.5, 0.5, -0.9, 1.9])\ndef test_zinb_mean_variance(gate, total_count, logits):\n num_samples = 1000000\n zinb_ = ZeroInflatedNegativeBinomial(\n total_count=torch.tensor(total_count),\n gate=torch.tensor(gate),\n logits=torch.tensor(logits),\n )\n s = zinb_.sample((num_samples,))\n expected_mean = zinb_.mean\n estimated_mean = s.mean()\n expected_std = zinb_.stddev\n estimated_std = s.std()\n assert_close(expected_mean, estimated_mean, atol=1e-01)\n assert_close(expected_std, estimated_std, atol=1e-1)\n",
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.nn import Parameter\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.gp.parameterized import Parameterized\nfrom pyro.nn.module import PyroParam, PyroSample\nfrom tests.common import assert_equal\n\n\ndef test_parameterized():\n class Linear(Parameterized):\n def __init__(self):\n super().__init__()\n self._pyro_name = \"Linear\"\n self.a = PyroParam(torch.tensor(1.0), constraints.positive)\n self.b = PyroSample(dist.Normal(0, 1))\n self.c = PyroSample(dist.Normal(0, 1))\n self.d = PyroSample(dist.Normal(0, 4).expand([1]).to_event())\n self.e = PyroSample(dist.LogNormal(0, 1))\n self.f = PyroSample(dist.MultivariateNormal(torch.zeros(2), torch.eye(2)))\n self.g = PyroSample(dist.Exponential(1))\n\n def forward(self, x):\n return (\n self.a * x\n + self.b\n + self.c\n + self.d\n + self.e\n + self.f\n + self.g\n + self.e\n )\n\n linear = Linear()\n linear.autoguide(\"c\", dist.Normal)\n linear.autoguide(\"d\", dist.MultivariateNormal)\n linear.autoguide(\"e\", dist.Normal)\n\n assert set(dict(linear.named_parameters()).keys()) == {\n \"a_unconstrained\",\n \"b_map\",\n \"c_loc\",\n \"c_scale_unconstrained\",\n \"d_loc\",\n \"d_scale_tril_unconstrained\",\n \"e_loc\",\n \"e_scale_unconstrained\",\n \"f_map\",\n \"g_map_unconstrained\",\n }\n\n def model(x):\n linear.mode = \"model\"\n return linear(x)\n\n def guide(x):\n linear.mode = \"guide\"\n return linear(x)\n\n model_trace = pyro.poutine.trace(model).get_trace(torch.tensor(5.0))\n guide_trace = pyro.poutine.trace(guide).get_trace(torch.tensor(5.0))\n for p in [\"b\", \"c\", \"d\"]:\n assert \"Linear.{}\".format(p) in model_trace.nodes\n assert \"Linear.{}\".format(p) in guide_trace.nodes\n\n assert isinstance(guide_trace.nodes[\"Linear.b\"][\"fn\"], dist.Delta)\n c_dist = guide_trace.nodes[\"Linear.c\"][\"fn\"]\n assert isinstance(getattr(c_dist, \"base_dist\", c_dist), dist.Normal)\n d_dist = guide_trace.nodes[\"Linear.d\"][\"fn\"]\n assert isinstance(getattr(d_dist, \"base_dist\", d_dist), dist.MultivariateNormal)\n\n\ndef test_nested_parameterized():\n class Linear(Parameterized):\n def __init__(self, a):\n super().__init__()\n self.a = Parameter(a)\n\n def forward(self, x):\n return self.a * x\n\n class Quadratic(Parameterized):\n def __init__(self, linear1, linear2, a):\n super().__init__()\n self._pyro_name = \"Quadratic\"\n self.linear1 = linear1\n self.linear2 = linear2\n self.a = Parameter(a)\n\n def forward(self, x):\n return self.linear1(x) * x + self.linear2(self.a)\n\n linear1 = Linear(torch.tensor(1.0))\n linear1.a = PyroSample(dist.Normal(0, 1))\n linear2 = Linear(torch.tensor(1.0))\n linear2.a = PyroSample(dist.Normal(0, 1))\n q = Quadratic(linear1, linear2, torch.tensor(2.0))\n q.a = PyroSample(dist.Cauchy(0, 1))\n\n def model(x):\n q.set_mode(\"model\")\n return q(x)\n\n trace = pyro.poutine.trace(model).get_trace(torch.tensor(5.0))\n assert \"Quadratic.a\" in trace.nodes\n assert \"Quadratic.linear1.a\" in trace.nodes\n assert \"Quadratic.linear2.a\" in trace.nodes\n\n\ndef test_inference():\n class Linear(Parameterized):\n def __init__(self, a):\n super().__init__()\n self.a = Parameter(a)\n\n def forward(self, x):\n return self.a * x\n\n target_a = torch.tensor(2.0)\n x_train = torch.rand(100)\n y_train = target_a * x_train + torch.rand(100) * 0.001\n linear = Linear(torch.tensor(1.0))\n linear.a = PyroSample(dist.Normal(0, 10))\n linear.autoguide(\"a\", dist.Normal)\n\n def model(x, y):\n linear.set_mode(\"model\")\n mu = linear(x)\n with pyro.plate(\"plate\"):\n return pyro.sample(\"y\", dist.Normal(mu, 0.1), obs=y)\n\n def guide(x, y):\n linear.set_mode(\"guide\")\n linear._load_pyro_samples()\n\n loss_fn = pyro.infer.Trace_ELBO().differentiable_loss\n optimizer = torch.optim.Adam(linear.parameters(), lr=0.5)\n\n def closure():\n optimizer.zero_grad()\n loss = loss_fn(model, guide, x_train, y_train)\n loss.backward()\n return loss\n\n for i in range(200):\n optimizer.step(closure)\n\n linear.mode = \"guide\"\n assert_equal(linear.a, target_a, prec=0.05)\n",
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\n\nimport torch\nimport torch.nn as nn\nfrom utils.custom_mlp import MLP, Exp\nfrom utils.mnist_cached import MNISTCached, mkdir_p, setup_data_loaders\nfrom utils.vae_plots import mnist_test_tsne_ssvae, plot_conditional_samples_ssvae\nfrom visdom import Visdom\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.examples.util import print_and_log\nfrom pyro.infer import (\n SVI,\n JitTrace_ELBO,\n JitTraceEnum_ELBO,\n Trace_ELBO,\n TraceEnum_ELBO,\n config_enumerate,\n)\nfrom pyro.optim import Adam\n\n\nclass SSVAE(nn.Module):\n \"\"\"\n This class encapsulates the parameters (neural networks) and models & guides needed to train a\n semi-supervised variational auto-encoder on the MNIST image dataset\n\n :param output_size: size of the tensor representing the class label (10 for MNIST since\n we represent the class labels as a one-hot vector with 10 components)\n :param input_size: size of the tensor representing the image (28*28 = 784 for our MNIST dataset\n since we flatten the images and scale the pixels to be in [0,1])\n :param z_dim: size of the tensor representing the latent random variable z\n (handwriting style for our MNIST dataset)\n :param hidden_layers: a tuple (or list) of MLP layers to be used in the neural networks\n representing the parameters of the distributions in our model\n :param use_cuda: use GPUs for faster training\n :param aux_loss_multiplier: the multiplier to use with the auxiliary loss\n \"\"\"\n\n def __init__(\n self,\n output_size=10,\n input_size=784,\n z_dim=50,\n hidden_layers=(500,),\n config_enum=None,\n use_cuda=False,\n aux_loss_multiplier=None,\n ):\n\n super().__init__()\n\n # initialize the class with all arguments provided to the constructor\n self.output_size = output_size\n self.input_size = input_size\n self.z_dim = z_dim\n self.hidden_layers = hidden_layers\n self.allow_broadcast = config_enum == \"parallel\"\n self.use_cuda = use_cuda\n self.aux_loss_multiplier = aux_loss_multiplier\n\n # define and instantiate the neural networks representing\n # the paramters of various distributions in the model\n self.setup_networks()\n\n def setup_networks(self):\n\n z_dim = self.z_dim\n hidden_sizes = self.hidden_layers\n\n # define the neural networks used later in the model and the guide.\n # these networks are MLPs (multi-layered perceptrons or simple feed-forward networks)\n # where the provided activation parameter is used on every linear layer except\n # for the output layer where we use the provided output_activation parameter\n self.encoder_y = MLP(\n [self.input_size] + hidden_sizes + [self.output_size],\n activation=nn.Softplus,\n output_activation=nn.Softmax,\n allow_broadcast=self.allow_broadcast,\n use_cuda=self.use_cuda,\n )\n\n # a split in the final layer's size is used for multiple outputs\n # and potentially applying separate activation functions on them\n # e.g. in this network the final output is of size [z_dim,z_dim]\n # to produce loc and scale, and apply different activations [None,Exp] on them\n self.encoder_z = MLP(\n [self.input_size + self.output_size] + hidden_sizes + [[z_dim, z_dim]],\n activation=nn.Softplus,\n output_activation=[None, Exp],\n allow_broadcast=self.allow_broadcast,\n use_cuda=self.use_cuda,\n )\n\n self.decoder = MLP(\n [z_dim + self.output_size] + hidden_sizes + [self.input_size],\n activation=nn.Softplus,\n output_activation=nn.Sigmoid,\n allow_broadcast=self.allow_broadcast,\n use_cuda=self.use_cuda,\n )\n\n # using GPUs for faster training of the networks\n if self.use_cuda:\n self.cuda()\n\n def model(self, xs, ys=None):\n \"\"\"\n The model corresponds to the following generative process:\n p(z) = normal(0,I) # handwriting style (latent)\n p(y|x) = categorical(I/10.) # which digit (semi-supervised)\n p(x|y,z) = bernoulli(loc(y,z)) # an image\n loc is given by a neural network `decoder`\n\n :param xs: a batch of scaled vectors of pixels from an image\n :param ys: (optional) a batch of the class labels i.e.\n the digit corresponding to the image(s)\n :return: None\n \"\"\"\n # register this pytorch module and all of its sub-modules with pyro\n pyro.module(\"ss_vae\", self)\n\n batch_size = xs.size(0)\n options = dict(dtype=xs.dtype, device=xs.device)\n with pyro.plate(\"data\"):\n\n # sample the handwriting style from the constant prior distribution\n prior_loc = torch.zeros(batch_size, self.z_dim, **options)\n prior_scale = torch.ones(batch_size, self.z_dim, **options)\n zs = pyro.sample(\"z\", dist.Normal(prior_loc, prior_scale).to_event(1))\n\n # if the label y (which digit to write) is supervised, sample from the\n # constant prior, otherwise, observe the value (i.e. score it against the constant prior)\n alpha_prior = torch.ones(batch_size, self.output_size, **options) / (\n 1.0 * self.output_size\n )\n ys = pyro.sample(\"y\", dist.OneHotCategorical(alpha_prior), obs=ys)\n\n # Finally, score the image (x) using the handwriting style (z) and\n # the class label y (which digit to write) against the\n # parametrized distribution p(x|y,z) = bernoulli(decoder(y,z))\n # where `decoder` is a neural network. We disable validation\n # since the decoder output is a relaxed Bernoulli value.\n loc = self.decoder.forward([zs, ys])\n pyro.sample(\n \"x\", dist.Bernoulli(loc, validate_args=False).to_event(1), obs=xs\n )\n # return the loc so we can visualize it later\n return loc\n\n def guide(self, xs, ys=None):\n \"\"\"\n The guide corresponds to the following:\n q(y|x) = categorical(alpha(x)) # infer digit from an image\n q(z|x,y) = normal(loc(x,y),scale(x,y)) # infer handwriting style from an image and the digit\n loc, scale are given by a neural network `encoder_z`\n alpha is given by a neural network `encoder_y`\n\n :param xs: a batch of scaled vectors of pixels from an image\n :param ys: (optional) a batch of the class labels i.e.\n the digit corresponding to the image(s)\n :return: None\n \"\"\"\n # inform Pyro that the variables in the batch of xs, ys are conditionally independent\n with pyro.plate(\"data\"):\n\n # if the class label (the digit) is not supervised, sample\n # (and score) the digit with the variational distribution\n # q(y|x) = categorical(alpha(x))\n if ys is None:\n alpha = self.encoder_y.forward(xs)\n ys = pyro.sample(\"y\", dist.OneHotCategorical(alpha))\n\n # sample (and score) the latent handwriting-style with the variational\n # distribution q(z|x,y) = normal(loc(x,y),scale(x,y))\n loc, scale = self.encoder_z.forward([xs, ys])\n pyro.sample(\"z\", dist.Normal(loc, scale).to_event(1))\n\n def classifier(self, xs):\n \"\"\"\n classify an image (or a batch of images)\n\n :param xs: a batch of scaled vectors of pixels from an image\n :return: a batch of the corresponding class labels (as one-hots)\n \"\"\"\n # use the trained model q(y|x) = categorical(alpha(x))\n # compute all class probabilities for the image(s)\n alpha = self.encoder_y.forward(xs)\n\n # get the index (digit) that corresponds to\n # the maximum predicted class probability\n res, ind = torch.topk(alpha, 1)\n\n # convert the digit(s) to one-hot tensor(s)\n ys = torch.zeros_like(alpha).scatter_(1, ind, 1.0)\n return ys\n\n def model_classify(self, xs, ys=None):\n \"\"\"\n this model is used to add an auxiliary (supervised) loss as described in the\n Kingma et al., \"Semi-Supervised Learning with Deep Generative Models\".\n \"\"\"\n # register all pytorch (sub)modules with pyro\n pyro.module(\"ss_vae\", self)\n\n # inform Pyro that the variables in the batch of xs, ys are conditionally independent\n with pyro.plate(\"data\"):\n # this here is the extra term to yield an auxiliary loss that we do gradient descent on\n if ys is not None:\n alpha = self.encoder_y.forward(xs)\n with pyro.poutine.scale(scale=self.aux_loss_multiplier):\n pyro.sample(\"y_aux\", dist.OneHotCategorical(alpha), obs=ys)\n\n def guide_classify(self, xs, ys=None):\n \"\"\"\n dummy guide function to accompany model_classify in inference\n \"\"\"\n pass\n\n\ndef run_inference_for_epoch(data_loaders, losses, periodic_interval_batches):\n \"\"\"\n runs the inference algorithm for an epoch\n returns the values of all losses separately on supervised and unsupervised parts\n \"\"\"\n num_losses = len(losses)\n\n # compute number of batches for an epoch\n sup_batches = len(data_loaders[\"sup\"])\n unsup_batches = len(data_loaders[\"unsup\"])\n batches_per_epoch = sup_batches + unsup_batches\n\n # initialize variables to store loss values\n epoch_losses_sup = [0.0] * num_losses\n epoch_losses_unsup = [0.0] * num_losses\n\n # setup the iterators for training data loaders\n sup_iter = iter(data_loaders[\"sup\"])\n unsup_iter = iter(data_loaders[\"unsup\"])\n\n # count the number of supervised batches seen in this epoch\n ctr_sup = 0\n for i in range(batches_per_epoch):\n\n # whether this batch is supervised or not\n is_supervised = (i % periodic_interval_batches == 1) and ctr_sup < sup_batches\n\n # extract the corresponding batch\n if is_supervised:\n (xs, ys) = next(sup_iter)\n ctr_sup += 1\n else:\n (xs, ys) = next(unsup_iter)\n\n # run the inference for each loss with supervised or un-supervised\n # data as arguments\n for loss_id in range(num_losses):\n if is_supervised:\n new_loss = losses[loss_id].step(xs, ys)\n epoch_losses_sup[loss_id] += new_loss\n else:\n new_loss = losses[loss_id].step(xs)\n epoch_losses_unsup[loss_id] += new_loss\n\n # return the values of all losses\n return epoch_losses_sup, epoch_losses_unsup\n\n\ndef get_accuracy(data_loader, classifier_fn, batch_size):\n \"\"\"\n compute the accuracy over the supervised training set or the testing set\n \"\"\"\n predictions, actuals = [], []\n\n # use the appropriate data loader\n for (xs, ys) in data_loader:\n # use classification function to compute all predictions for each batch\n predictions.append(classifier_fn(xs))\n actuals.append(ys)\n\n # compute the number of accurate predictions\n accurate_preds = 0\n for pred, act in zip(predictions, actuals):\n for i in range(pred.size(0)):\n v = torch.sum(pred[i] == act[i])\n accurate_preds += v.item() == 10\n\n # calculate the accuracy between 0 and 1\n accuracy = (accurate_preds * 1.0) / (len(predictions) * batch_size)\n return accuracy\n\n\ndef visualize(ss_vae, viz, test_loader):\n if viz:\n plot_conditional_samples_ssvae(ss_vae, viz)\n mnist_test_tsne_ssvae(ssvae=ss_vae, test_loader=test_loader)\n\n\ndef main(args):\n \"\"\"\n run inference for SS-VAE\n :param args: arguments for SS-VAE\n :return: None\n \"\"\"\n if args.seed is not None:\n pyro.set_rng_seed(args.seed)\n\n viz = None\n if args.visualize:\n viz = Visdom()\n mkdir_p(\"./vae_results\")\n\n # batch_size: number of images (and labels) to be considered in a batch\n ss_vae = SSVAE(\n z_dim=args.z_dim,\n hidden_layers=args.hidden_layers,\n use_cuda=args.cuda,\n config_enum=args.enum_discrete,\n aux_loss_multiplier=args.aux_loss_multiplier,\n )\n\n # setup the optimizer\n adam_params = {\"lr\": args.learning_rate, \"betas\": (args.beta_1, 0.999)}\n optimizer = Adam(adam_params)\n\n # set up the loss(es) for inference. wrapping the guide in config_enumerate builds the loss as a sum\n # by enumerating each class label for the sampled discrete categorical distribution in the model\n guide = config_enumerate(ss_vae.guide, args.enum_discrete, expand=True)\n Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO\n elbo = Elbo(max_plate_nesting=1, strict_enumeration_warning=False)\n loss_basic = SVI(ss_vae.model, guide, optimizer, loss=elbo)\n\n # build a list of all losses considered\n losses = [loss_basic]\n\n # aux_loss: whether to use the auxiliary loss from NIPS 14 paper (Kingma et al)\n if args.aux_loss:\n elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()\n loss_aux = SVI(\n ss_vae.model_classify, ss_vae.guide_classify, optimizer, loss=elbo\n )\n losses.append(loss_aux)\n\n try:\n # setup the logger if a filename is provided\n logger = open(args.logfile, \"w\") if args.logfile else None\n\n data_loaders = setup_data_loaders(\n MNISTCached, args.cuda, args.batch_size, sup_num=args.sup_num\n )\n\n # how often would a supervised batch be encountered during inference\n # e.g. if sup_num is 3000, we would have every 16th = int(50000/3000) batch supervised\n # until we have traversed through the all supervised batches\n periodic_interval_batches = int(\n MNISTCached.train_data_size / (1.0 * args.sup_num)\n )\n\n # number of unsupervised examples\n unsup_num = MNISTCached.train_data_size - args.sup_num\n\n # initializing local variables to maintain the best validation accuracy\n # seen across epochs over the supervised training set\n # and the corresponding testing set and the state of the networks\n best_valid_acc, corresponding_test_acc = 0.0, 0.0\n\n # run inference for a certain number of epochs\n for i in range(0, args.num_epochs):\n\n # get the losses for an epoch\n epoch_losses_sup, epoch_losses_unsup = run_inference_for_epoch(\n data_loaders, losses, periodic_interval_batches\n )\n\n # compute average epoch losses i.e. losses per example\n avg_epoch_losses_sup = map(lambda v: v / args.sup_num, epoch_losses_sup)\n avg_epoch_losses_unsup = map(lambda v: v / unsup_num, epoch_losses_unsup)\n\n # store the loss and validation/testing accuracies in the logfile\n str_loss_sup = \" \".join(map(str, avg_epoch_losses_sup))\n str_loss_unsup = \" \".join(map(str, avg_epoch_losses_unsup))\n\n str_print = \"{} epoch: avg losses {}\".format(\n i, \"{} {}\".format(str_loss_sup, str_loss_unsup)\n )\n\n validation_accuracy = get_accuracy(\n data_loaders[\"valid\"], ss_vae.classifier, args.batch_size\n )\n str_print += \" validation accuracy {}\".format(validation_accuracy)\n\n # this test accuracy is only for logging, this is not used\n # to make any decisions during training\n test_accuracy = get_accuracy(\n data_loaders[\"test\"], ss_vae.classifier, args.batch_size\n )\n str_print += \" test accuracy {}\".format(test_accuracy)\n\n # update the best validation accuracy and the corresponding\n # testing accuracy and the state of the parent module (including the networks)\n if best_valid_acc < validation_accuracy:\n best_valid_acc = validation_accuracy\n corresponding_test_acc = test_accuracy\n\n print_and_log(logger, str_print)\n\n final_test_accuracy = get_accuracy(\n data_loaders[\"test\"], ss_vae.classifier, args.batch_size\n )\n print_and_log(\n logger,\n \"best validation accuracy {} corresponding testing accuracy {} \"\n \"last testing accuracy {}\".format(\n best_valid_acc, corresponding_test_acc, final_test_accuracy\n ),\n )\n\n # visualize the conditional samples\n visualize(ss_vae, viz, data_loaders[\"test\"])\n finally:\n # close the logger file object if we opened it earlier\n if args.logfile:\n logger.close()\n\n\nEXAMPLE_RUN = (\n \"example run: python ss_vae_M2.py --seed 0 --cuda -n 2 --aux-loss -alm 46 -enum parallel \"\n \"-sup 3000 -zd 50 -hl 500 -lr 0.00042 -b1 0.95 -bs 200 -log ./tmp.log\"\n)\n\nif __name__ == \"__main__\":\n assert pyro.__version__.startswith(\"1.8.0\")\n\n parser = argparse.ArgumentParser(description=\"SS-VAE\\n{}\".format(EXAMPLE_RUN))\n\n parser.add_argument(\n \"--cuda\", action=\"store_true\", help=\"use GPU(s) to speed up training\"\n )\n parser.add_argument(\n \"--jit\", action=\"store_true\", help=\"use PyTorch jit to speed up training\"\n )\n parser.add_argument(\n \"-n\", \"--num-epochs\", default=50, type=int, help=\"number of epochs to run\"\n )\n parser.add_argument(\n \"--aux-loss\",\n action=\"store_true\",\n help=\"whether to use the auxiliary loss from NIPS 14 paper \"\n \"(Kingma et al). It is not used by default \",\n )\n parser.add_argument(\n \"-alm\",\n \"--aux-loss-multiplier\",\n default=46,\n type=float,\n help=\"the multiplier to use with the auxiliary loss\",\n )\n parser.add_argument(\n \"-enum\",\n \"--enum-discrete\",\n default=\"parallel\",\n help=\"parallel, sequential or none. uses parallel enumeration by default\",\n )\n parser.add_argument(\n \"-sup\",\n \"--sup-num\",\n default=3000,\n type=float,\n help=\"supervised amount of the data i.e. \"\n \"how many of the images have supervised labels\",\n )\n parser.add_argument(\n \"-zd\",\n \"--z-dim\",\n default=50,\n type=int,\n help=\"size of the tensor representing the latent variable z \"\n \"variable (handwriting style for our MNIST dataset)\",\n )\n parser.add_argument(\n \"-hl\",\n \"--hidden-layers\",\n nargs=\"+\",\n default=[500],\n type=int,\n help=\"a tuple (or list) of MLP layers to be used in the neural networks \"\n \"representing the parameters of the distributions in our model\",\n )\n parser.add_argument(\n \"-lr\",\n \"--learning-rate\",\n default=0.00042,\n type=float,\n help=\"learning rate for Adam optimizer\",\n )\n parser.add_argument(\n \"-b1\",\n \"--beta-1\",\n default=0.9,\n type=float,\n help=\"beta-1 parameter for Adam optimizer\",\n )\n parser.add_argument(\n \"-bs\",\n \"--batch-size\",\n default=200,\n type=int,\n help=\"number of images (and labels) to be considered in a batch\",\n )\n parser.add_argument(\n \"-log\",\n \"--logfile\",\n default=\"./tmp.log\",\n type=str,\n help=\"filename for logging the outputs\",\n )\n parser.add_argument(\n \"--seed\",\n default=None,\n type=int,\n help=\"seed for controlling randomness in this example\",\n )\n parser.add_argument(\n \"--visualize\",\n action=\"store_true\",\n help=\"use a visdom server to visualize the embeddings\",\n )\n args = parser.parse_args()\n\n # some assertions to make sure that batching math assumptions are met\n assert args.sup_num % args.batch_size == 0, \"assuming simplicity of batching math\"\n assert (\n MNISTCached.validation_size % args.batch_size == 0\n ), \"batch size should divide the number of validation examples\"\n assert (\n MNISTCached.train_data_size % args.batch_size == 0\n ), \"batch size doesn't divide total number of training data examples\"\n assert (\n MNISTCached.test_size % args.batch_size == 0\n ), \"batch size should divide the number of test examples\"\n\n main(args)\n",
"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\nimport torch\nfrom torch.autograd import grad\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro import poutine\nfrom pyro.distributions.transforms import Permute\nfrom pyro.infer.reparam import UnitJacobianReparam\nfrom tests.common import assert_close\n\nfrom .util import check_init_reparam\n\n\n# Test helper to extract central moments from samples.\ndef get_moments(x):\n n = x.size(0)\n x = x.reshape(n, -1)\n mean = x.mean(0)\n x = x - mean\n std = (x * x).mean(0).sqrt()\n x = x / std\n corr = (x.unsqueeze(-1) * x.unsqueeze(-2)).mean(0).reshape(-1)\n return torch.cat([mean, std, corr])\n\n\[email protected](\"shape\", [(6,), (4, 5), (2, 1, 3)], ids=str)\ndef test_normal(shape):\n loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()\n scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()\n\n def model():\n with pyro.plate_stack(\"plates\", shape[:-1]):\n with pyro.plate(\"particles\", 10000):\n pyro.sample(\"x\", dist.Normal(loc, scale).expand(shape).to_event(1))\n\n value = poutine.trace(model).get_trace().nodes[\"x\"][\"value\"]\n expected_probe = get_moments(value)\n\n transform = Permute(torch.randperm(shape[-1]))\n rep = UnitJacobianReparam(transform)\n reparam_model = poutine.reparam(model, {\"x\": rep})\n trace = poutine.trace(reparam_model).get_trace()\n assert isinstance(trace.nodes[\"x_transformed\"][\"fn\"], dist.TransformedDistribution)\n assert isinstance(trace.nodes[\"x\"][\"fn\"], dist.Delta)\n value = trace.nodes[\"x\"][\"value\"]\n actual_probe = get_moments(value)\n assert_close(actual_probe, expected_probe, atol=0.1)\n\n for actual_m, expected_m in zip(actual_probe[:10], expected_probe[:10]):\n expected_grads = grad(expected_m.sum(), [loc, scale], retain_graph=True)\n actual_grads = grad(actual_m.sum(), [loc, scale], retain_graph=True)\n assert_close(actual_grads[0], expected_grads[0], atol=0.05)\n assert_close(actual_grads[1], expected_grads[1], atol=0.05)\n\n\[email protected](\"shape\", [(6,), (4, 5), (2, 1, 3)], ids=str)\ndef test_init(shape):\n loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()\n scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()\n\n def model():\n with pyro.plate_stack(\"plates\", shape[:-1]):\n return pyro.sample(\"x\", dist.Normal(loc, scale).to_event(1))\n\n transform = Permute(torch.randperm(shape[-1]))\n rep = UnitJacobianReparam(transform)\n check_init_reparam(model, rep)\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.randn",
"torch.tensor",
"torch.rand"
],
[
"torch.nn.Parameter",
"torch.zeros",
"torch.eye",
"torch.tensor",
"torch.rand"
],
[
"torch.ones",
"torch.zeros",
"torch.sum",
"torch.zeros_like",
"torch.topk"
],
[
"torch.randperm",
"torch.empty",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
makram93/executors | [
"9e88b56650ee154e811b8ecfaf883861475c082f"
] | [
"tests/integration/psql_dump_reload/test_dump_psql.py"
] | [
"import os\nimport time\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nimport pytest\nfrom jina import Flow, Document, Executor, DocumentArray, requests\nfrom jina.logging.profile import TimeContext\nfrom jina_commons.indexers.dump import (\n import_vectors,\n import_metas,\n)\n\n\[email protected]()\ndef docker_compose(request):\n os.system(\n f\"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans\"\n )\n time.sleep(5)\n yield\n os.system(\n f\"docker-compose -f {request.param} --project-directory . down --remove-orphans\"\n )\n\n\n# noinspection PyUnresolvedReferences\nfrom jinahub.indexers.storage.PostgreSQLStorage.postgreshandler import (\n doc_without_embedding,\n)\n\n# required in order to be found by Flow creation\n# noinspection PyUnresolvedReferences\nfrom jinahub.indexers.searcher.compound.NumpyPostgresSearcher import (\n NumpyPostgresSearcher,\n)\nfrom jinahub.indexers.storage.PostgreSQLStorage import PostgreSQLStorage\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\ncompose_yml = os.path.join(cur_dir, 'docker-compose.yml')\nstorage_flow_yml = os.path.join(cur_dir, 'flow_storage.yml')\nquery_flow_yml = os.path.join(cur_dir, 'flow_query.yml')\n\n\nclass Pass(Executor):\n @requests(on='/search')\n def pass_me(self, **kwargs):\n pass\n\n\nclass MatchMerger(Executor):\n @requests(on='/search')\n def merge(self, docs_matrix, parameters: Dict, **kwargs):\n if docs_matrix:\n results = OrderedDict()\n for docs in docs_matrix:\n for doc in docs:\n if doc.id in results:\n results[doc.id].matches.extend(doc.matches)\n else:\n results[doc.id] = doc\n\n top_k = parameters.get('top_k')\n if top_k:\n top_k = int(top_k)\n\n for doc in results.values():\n try:\n doc.matches = sorted(\n doc.matches,\n key=lambda m: m.scores['cosine'].value,\n reverse=True,\n )[:top_k]\n except TypeError as e:\n print(f'##### {e}')\n\n docs = DocumentArray(list(results.values()))\n return docs\n\n\ndef get_documents(nr=10, index_start=0, emb_size=7):\n for i in range(index_start, nr + index_start):\n with Document() as d:\n d.id = f'aa{i}' # to test it supports non-int ids\n d.text = f'hello world {i}'\n d.embedding = np.random.random(emb_size)\n d.tags['field'] = f'tag data {i}'\n yield d\n\n\ndef assert_dump_data(dump_path, docs, shards, pea_id):\n docs = sorted(\n docs, key=lambda doc: doc.id\n ) # necessary since the ordering is done as str in PSQL\n size_shard = len(docs) // shards\n size_shard_modulus = len(docs) % shards\n ids_dump, vectors_dump = import_vectors(\n dump_path,\n str(pea_id),\n )\n if pea_id == shards - 1:\n docs_expected = docs[\n (pea_id) * size_shard : (pea_id + 1) * size_shard + size_shard_modulus\n ]\n else:\n docs_expected = docs[(pea_id) * size_shard : (pea_id + 1) * size_shard]\n print(f'### pea {pea_id} has {len(docs_expected)} docs')\n\n # TODO these might fail if we implement any ordering of elements on dumping / reloading\n ids_dump = list(ids_dump)\n vectors_dump = list(vectors_dump)\n np.testing.assert_equal(set(ids_dump), set([d.id for d in docs_expected]))\n np.testing.assert_allclose(vectors_dump, [d.embedding for d in docs_expected])\n\n _, metas_dump = import_metas(\n dump_path,\n str(pea_id),\n )\n metas_dump = list(metas_dump)\n np.testing.assert_equal(\n metas_dump,\n [doc_without_embedding(d) for d in docs_expected],\n )\n\n\ndef path_size(dump_path):\n dir_size = (\n sum(f.stat().st_size for f in Path(dump_path).glob('**/*') if f.is_file()) / 1e6\n )\n return dir_size\n\n\n# replicas w 1 shard doesn't work\n# @pytest.mark.parametrize('shards', [1, 3, 7])\[email protected]('shards', [3, 7])\[email protected]('nr_docs', [100])\[email protected]('emb_size', [10])\[email protected]('docker_compose', [compose_yml], indirect=['docker_compose'])\ndef test_dump_reload(tmpdir, nr_docs, emb_size, shards, docker_compose):\n # for psql to start\n time.sleep(2)\n top_k = 5\n docs = DocumentArray(\n list(get_documents(nr=nr_docs, index_start=0, emb_size=emb_size))\n )\n # make sure to delete any overlapping docs\n PostgreSQLStorage().delete(docs, {})\n assert len(docs) == nr_docs\n\n dump_path = os.path.join(str(tmpdir), 'dump_dir')\n os.environ['STORAGE_WORKSPACE'] = os.path.join(str(tmpdir), 'index_ws')\n os.environ['SHARDS'] = str(shards)\n if shards > 1:\n os.environ['USES_AFTER'] = 'MatchMerger'\n else:\n os.environ['USES_AFTER'] = 'Pass'\n\n with Flow.load_config(storage_flow_yml) as flow_storage:\n with Flow.load_config(query_flow_yml) as flow_query:\n with TimeContext(f'### indexing {len(docs)} docs'):\n flow_storage.post(on='/index', inputs=docs)\n\n results = flow_query.post(on='/search', inputs=docs, return_results=True)\n assert len(results[0].docs[0].matches) == 0\n\n with TimeContext(f'### dumping {len(docs)} docs'):\n flow_storage.post(\n on='/dump',\n target_peapod='indexer_storage',\n parameters={\n 'dump_path': dump_path,\n 'shards': shards,\n 'timeout': -1,\n },\n )\n\n dir_size = path_size(dump_path)\n assert dir_size > 0\n print(f'### dump path size: {dir_size} MBs')\n\n flow_query.rolling_update(pod_name='indexer_query', dump_path=dump_path)\n results = flow_query.post(\n on='/search',\n inputs=docs,\n parameters={'top_k': top_k},\n return_results=True,\n )\n assert len(results[0].docs[0].matches) == top_k\n assert results[0].docs[0].matches[0].scores['cosine'].value == 1.0\n\n idx = PostgreSQLStorage()\n assert idx.size == nr_docs\n\n # assert data dumped is correct\n for pea_id in range(shards):\n assert_dump_data(dump_path, docs, shards, pea_id)\n\n\ndef _in_docker():\n \"\"\" Returns: True if running in a Docker container, else False \"\"\"\n with open('/proc/1/cgroup', 'rt') as ifh:\n if 'docker' in ifh.read():\n print('in docker, skipping benchmark')\n return True\n return False\n\n\n# benchmark only\[email protected](\n _in_docker() or ('GITHUB_WORKFLOW' in os.environ),\n reason='skip the benchmark test on github workflow or docker',\n)\[email protected]('docker_compose', [compose_yml], indirect=['docker_compose'])\ndef test_benchmark(tmpdir, docker_compose):\n nr_docs = 1000\n return test_dump_reload(\n tmpdir, nr_docs=nr_docs, emb_size=128, shards=3, docker_compose=compose_yml\n )\n"
] | [
[
"numpy.random.random",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FrederichRiver/neutrino | [
"e91db53486e56ddeb83ae9714311d606b33fb165"
] | [
"applications/alkaid/alkaid/phoenix.py"
] | [
"#!/usr/bin/python3\n# from strategy_base import strategyBase\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\n\ninput_size = 4\nhidden_size = 4 * input_size\nnum_layers = 1\nseq_len = 10\nbatch_size = 20\n\n\nclass strategyBase(object):\n def __init__(self):\n pass\n\n def _get_data(self):\n pass\n\n def _settle(self):\n pass\n\n\nclass Phoenix(nn.Module):\n \"\"\"\n input (batch, seq_len, feature_size)\n output (batch, 1)\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Phoenix, self).__init__()\n self.lstm = nn.LSTM(input_size,\n hidden_size,\n num_layers)\n # nn.init.xavier_uniform_(self.lstm.weight)\n self.linear = nn.Linear(hidden_size, 1)\n\n def forward(self, x):\n x, _ = self.lstm(x)\n print(x.size())\n b, s, h = x.shape\n x = x.reshape(batch_size * seq_len, hidden_size)\n x = self.linear(x)\n print(x.size())\n return x\n\n def train(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=0.001)\n loss_func = nn.CrossEntropyLoss()\n for epoch in range(10):\n pass\n\n\nclass stockData(torch.utils.data.Dataset):\n def __init__(self):\n from polaris.mysql8 import mysqlBase, mysqlHeader\n from dev_global.env import GLOBAL_HEADER\n import torch.tensor\n import pandas\n import numpy as np\n stock_code = 'SH600000'\n self.mysql = mysqlBase(GLOBAL_HEADER)\n result = self.mysql.select_values(stock_code, 'open_price,close_price,highest_price,lowest_price')\n predict = pandas.DataFrame()\n predict['predict'] = result[1].shift(-1)\n # print(result.head(10))\n self.input = []\n self.label = []\n # print(result.shape[0])\n block = 10\n for i in range(int(result.shape[0]/block)):\n x = result[i*block: (i+1)*block]\n y = predict['predict'][(i+1)*block-1]\n self.input.append(torch.tensor(np.array(x), dtype=torch.float32))\n self.label.append(torch.tensor(np.array(y), dtype=torch.float32))\n # print(result.head(15))\n # print(self.input[0])\n # print(self.label[0])\n\n def __len__(self):\n return len(self.input)\n\n def __getitem__(self, index):\n return self.input[index], self.label[index]\n\n\nh = torch.randn(num_layers, batch_size, hidden_size)\nc = torch.randn(num_layers, batch_size, hidden_size)\ninput = torch.randn(seq_len, batch_size, input_size)\n# print(input)\nnet = Phoenix()\n# output, _ = net.lstm(input, (h, c))\n# out2 = net.forward(input)\n\ndt = stockData()\ninp = DataLoader(dt, batch_size=batch_size, drop_last=True)\n\nfor step, (x, y) in enumerate(inp):\n # print(step)\n # print(x.size())\n # print(x)\n # print(input.size())\n # print(input)\n print(y)\n print(net.forward(x))\n pass\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.LSTM",
"torch.randn",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.nn.Linear",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
DSPLab-IC6/ikfs_anomaly_detector | [
"e0a36e185be6e9dcd75451c956a2aaf6a6fec677"
] | [
"ikfs_anomaly_detector/intellectual/tests.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom ikfs_anomaly_detector.intellectual.autoencoder import LSTMAutoencoder\nfrom ikfs_anomaly_detector.intellectual.predictor import LSTMPredictor\nfrom ikfs_anomaly_detector.intellectual.utils import (\n z_normalization,\n calculate_mean,\n find_anomaly_points,\n calculate_covariance_matrix,\n mahalanobis_distance,\n squared_error,\n ewma,\n)\n\n\nclass TestAutoencoder(unittest.TestCase):\n\n def test_init(self) -> None:\n LSTMAutoencoder(signals_count=5)\n\n\nclass TestPredictor(unittest.TestCase):\n\n def test_init(self) -> None:\n LSTMPredictor()\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_z_normalization(self) -> None:\n arr = np.array([1, 2, 3, 30, 30, 3, 2, 1])\n self.assertListEqual(\n list(z_normalization(arr)), [\n -0.6587095756740946,\n -0.5763708787148328,\n -0.49403218175557095,\n 1.7291126361444984,\n 1.7291126361444984,\n -0.49403218175557095,\n -0.5763708787148328,\n -0.6587095756740946,\n ])\n\n def test_calculate_mean(self) -> None:\n arr = np.array([1, 2, 5, 10])\n self.assertAlmostEqual(calculate_mean(arr), arr.mean(axis=0))\n\n def test_calculate_covariance_matrix(self) -> None:\n arr = np.array([[1., 1.5], [2., 2.], [3., 1.], [1., 5.]])\n mean, matrix = calculate_covariance_matrix(arr)\n\n self.assertAlmostEqual(mean, 2.0625)\n self.assertListEqual([list(v) for v in matrix], [[0.78515625, -0.87890625],\n [-0.87890625, 2.51953125]])\n\n def test_mahalanobis_distance(self) -> None:\n arr = np.array([[1., 1.5], [2., 2.], [3., 1.], [1., 5.]])\n mean, matrix = calculate_covariance_matrix(arr)\n\n self.assertAlmostEqual(mahalanobis_distance(arr[0], mean, matrix), 3.43629, places=5)\n\n def test_squared_error(self) -> None:\n a = np.array([1, 2, 3])\n b = np.array([4, 5, 6])\n\n self.assertListEqual(list(squared_error(a, b)), [3.0, 3.0, 3.0])\n\n def test_ewma(self) -> None:\n arr = np.array(range(10))\n self.assertListEqual(list(ewma(arr, window=4)), [0.0, 0.5, 1.0, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5])\n\n def test_find_anomaly_points(self) -> None:\n arr = np.array([1, 10, 20, 30, 10, 20, 2, 2, 1, 10])\n self.assertListEqual(find_anomaly_points(arr, threshold=5, offset=2), [1, 3, 5, 9])\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yoshihikosuzuki/pbcore | [
"956c45dea8868b5cf7d9b8e9ce98ac8fe8a60150"
] | [
"pbcore/io/align/BamIO.py"
] | [
"# Author: David Alexander\n\n\n\n\n__all__ = [ \"BamReader\", \"IndexedBamReader\" ]\n\ntry:\n from pysam.calignmentfile import AlignmentFile # pylint: disable=no-name-in-module, import-error, fixme, line-too-long\nexcept ImportError:\n from pysam.libcalignmentfile import AlignmentFile # pylint: disable=no-name-in-module, import-error, fixme, line-too-long\nfrom pbcore.io import FastaTable\nfrom pbcore.chemistry import decodeTriple, ChemistryLookupError\n\nimport numpy as np\nfrom itertools import groupby\nfrom functools import wraps\nfrom os.path import abspath, expanduser, exists\n\nfrom ..base import ReaderBase\nfrom .PacBioBamIndex import PacBioBamIndex\nfrom .BamAlignment import *\nfrom ._BamSupport import *\nfrom ._AlignmentMixin import AlignmentReaderMixin, IndexedAlignmentReaderMixin\n\n\ndef requiresBai(method):\n @wraps(method)\n def f(bamReader, *args, **kwargs):\n if not bamReader.peer.has_index():\n raise UnavailableFeature(\"this feature requires an standard BAM index file (bam.bai)\")\n else:\n return method(bamReader, *args, **kwargs)\n return f\n\n\nclass _BamReaderBase(ReaderBase):\n \"\"\"\n The BamReader class provides a high-level interface to PacBio BAM\n files. If a PacBio BAM index (bam.pbi file) is present and the\n user instantiates the BamReader using the reference FASTA as the\n second argument, the BamReader will provide an interface\n compatible with CmpH5Reader.\n \"\"\"\n def _loadReferenceInfo(self):\n refRecords = self.peer.header[\"SQ\"]\n refNames = [r[\"SN\"] for r in refRecords]\n refLengths = [r[\"LN\"] for r in refRecords]\n refMD5s = [r[\"M5\"] for r in refRecords]\n refIds = list(map(self.peer.get_tid, refNames))\n nRefs = len(refRecords)\n\n if nRefs > 0:\n self._referenceInfoTable = np.rec.fromrecords(list(zip(\n refIds,\n refIds,\n refNames,\n refNames,\n refLengths,\n refMD5s,\n np.zeros(nRefs, dtype=np.uint32),\n np.zeros(nRefs, dtype=np.uint32))),\n dtype=[('ID', '<i8'), ('RefInfoID', '<i8'),\n ('Name', 'O'), ('FullName', 'O'),\n ('Length', '<i8'), ('MD5', 'O'),\n ('StartRow', '<u4'), ('EndRow', '<u4')])\n self._referenceDict = {}\n self._referenceDict.update(list(zip(refIds, self._referenceInfoTable)))\n self._referenceDict.update(list(zip(refNames, self._referenceInfoTable)))\n else:\n self._referenceInfoTable = None\n self._referenceDict = None\n\n def _loadReadGroupInfo(self):\n rgs = self.peer.header[\"RG\"]\n readGroupTable_ = []\n\n # RGID -> (\"abstract feature name\" -> actual feature name)\n self._baseFeatureNameMappings = {}\n self._pulseFeatureNameMappings = {}\n\n for rg in rgs:\n rgID = rgAsInt(rg[\"ID\"])\n rgName = rg[\"PU\"]\n ds = dict([pair.split(\"=\") for pair in rg[\"DS\"].split(\";\") if pair != \"\"])\n # spec: we only consider first two components of basecaller version\n # in \"chem\" lookup\n rgReadType = ds[\"READTYPE\"]\n rgChem = \"unknown\"\n rgFrameRate = 0.0\n if rgReadType != \"TRANSCRIPT\":\n rgFrameRate = ds[\"FRAMERATEHZ\"]\n basecallerVersion = \".\".join(ds[\"BASECALLERVERSION\"].split(\".\")[0:2])\n triple = ds[\"BINDINGKIT\"], ds[\"SEQUENCINGKIT\"], basecallerVersion\n rgChem = decodeTriple(*triple)\n\n # Look for the features manifest entries within the DS tag,\n # and build an \"indirection layer\", i.e. to get from\n # \"Ipd\" to \"Ipd:Frames\"\n # (This is a bit messy. Can we separate the manifest from\n # the rest of the DS content?)\n baseFeatureNameMapping = { key.split(\":\")[0] : key\n for key in list(ds.keys())\n if key in BASE_FEATURE_TAGS }\n pulseFeatureNameMapping = { key.split(\":\")[0] : key\n for key in list(ds.keys())\n if key in PULSE_FEATURE_TAGS }\n self._baseFeatureNameMappings[rgID] = baseFeatureNameMapping\n self._pulseFeatureNameMappings[rgID] = pulseFeatureNameMapping\n\n readGroupTable_.append((rgID, rgName, rgReadType, rgChem, rgFrameRate,\n frozenset(iter(baseFeatureNameMapping.keys()))))\n\n self._readGroupTable = np.rec.fromrecords(\n readGroupTable_,\n dtype=[(\"ID\" , np.int32),\n (\"MovieName\" , \"O\"),\n (\"ReadType\" , \"O\"),\n (\"SequencingChemistry\", \"O\"),\n (\"FrameRate\", float),\n (\"BaseFeatures\", \"O\")])\n assert len(set(self._readGroupTable.ID)) == len(self._readGroupTable), \\\n \"First 8 chars of read group IDs must be unique!\"\n\n self._readGroupDict = { rg.ID : rg\n for rg in self._readGroupTable }\n\n # The base/pulse features \"available\" to clients of this file are the intersection\n # of features available from each read group.\n self._baseFeaturesAvailable = set.intersection(\n *[set(mapping.keys()) for mapping in list(self._baseFeatureNameMappings.values())])\n self._pulseFeaturesAvailable = set.intersection(\n *[set(mapping.keys()) for mapping in list(self._pulseFeatureNameMappings.values())])\n\n def _loadProgramInfo(self):\n pgRecords = [ (pg[\"ID\"], pg.get(\"VN\", None), pg.get(\"CL\", None))\n for pg in self.peer.header.get(\"PG\", []) ]\n\n if len(pgRecords) > 0:\n self._programTable = np.rec.fromrecords(\n pgRecords,\n dtype=[(\"ID\" , \"O\"),\n (\"Version\", \"O\"),\n (\"CommandLine\", \"O\")])\n else:\n self._programTable = None\n\n def _loadReferenceFasta(self, referenceFastaFname):\n ft = FastaTable(referenceFastaFname)\n # Verify that this FASTA is in agreement with the BAM's\n # reference table---BAM should be a subset.\n fastaIdsAndLens = set((c.id, len(c)) for c in ft)\n bamIdsAndLens = set((c.Name, c.Length) for c in self.referenceInfoTable)\n if not bamIdsAndLens.issubset(fastaIdsAndLens):\n raise ReferenceMismatch(\"FASTA file must contain superset of reference contigs in BAM\")\n self.referenceFasta = ft\n\n def _checkFileCompatibility(self):\n # Verify that this is a \"pacbio\" BAM file of version at least\n # 3.0.1\n badVersionException = IncompatibleFile(\n \"This BAM file is incompatible with this API \" +\n \"(only PacBio BAM files version >= 3.0.1 are supported)\")\n checkedVersion = self.version\n if \"b\" in checkedVersion:\n raise badVersionException\n else:\n major, minor, patch = checkedVersion.split('.')\n if not (major, minor, patch) >= (3, 0, 1):\n raise badVersionException\n\n def __init__(self, fname, referenceFastaFname=None):\n self.filename = fname = abspath(expanduser(fname))\n self.peer = AlignmentFile(fname, \"rb\", check_sq=False)\n self._checkFileCompatibility()\n\n self._loadReferenceInfo()\n self._loadReadGroupInfo()\n self._loadProgramInfo()\n\n self.referenceFasta = None\n if referenceFastaFname is not None:\n if self.isUnmapped:\n raise ValueError(\"Unmapped BAM file--reference FASTA should not be given as argument to BamReader\")\n self._loadReferenceFasta(referenceFastaFname)\n\n @property\n def isIndexLoaded(self):\n return self.index is not None # pylint: disable=no-member\n\n @property\n def isReferenceLoaded(self):\n return self.referenceFasta is not None\n\n @property\n def isUnmapped(self):\n return not(self.isMapped)\n\n @property\n def isMapped(self):\n return len(self.peer.header[\"SQ\"]) > 0\n\n @property\n def alignmentIndex(self):\n raise UnavailableFeature(\"BAM has no alignment index\")\n\n @property\n def movieNames(self):\n return set([mi.MovieName for mi in self.readGroupTable])\n\n @property\n def readGroupTable(self):\n return self._readGroupTable\n\n def readGroupInfo(self, readGroupId):\n return self._readGroupDict[readGroupId]\n\n @property\n def sequencingChemistry(self):\n \"\"\"\n List of the sequencing chemistries by movie. Order is\n unspecified.\n \"\"\"\n return list(self.readGroupTable.SequencingChemistry)\n\n @property\n def referenceInfoTable(self):\n return self._referenceInfoTable\n\n #TODO: standard? how about subread instead? why capitalize ccs?\n # can we standardize this? is cDNA an additional possibility\n @property\n def readType(self):\n \"\"\"\n Either \"standard\", \"CCS\", \"mixed\", or \"unknown\", to represent the\n type of PacBio reads aligned in this BAM file.\n \"\"\"\n readTypes = self.readGroupTable.ReadType\n if all(readTypes == \"SUBREAD\"):\n return \"standard\"\n elif all(readTypes == \"CCS\"):\n return \"CCS\"\n elif all(readTypes == \"TRANSCRIPT\"):\n return \"TRANSCRIPT\"\n elif all((readTypes == \"CCS\") | (readTypes == \"SUBREAD\")):\n return \"mixed\"\n else:\n return \"unknown\"\n\n @property\n def version(self):\n return self.peer.header[\"HD\"][\"pb\"]\n\n def versionAtLeast(self, minimalVersion):\n raise Unimplemented()\n\n def softwareVersion(self, programName):\n raise Unimplemented()\n\n @property\n def isSorted(self):\n return self.peer.header[\"HD\"][\"SO\"] == \"coordinate\"\n\n @property\n def isBarcoded(self):\n raise Unimplemented()\n\n @property\n def isEmpty(self):\n return (len(self) == 0)\n\n def referenceInfo(self, key):\n return self._referenceDict[key]\n\n def atOffset(self, offset):\n self.peer.seek(offset)\n return BamAlignment(self, next(self.peer))\n\n def hasBaseFeature(self, featureName):\n return featureName in self._baseFeaturesAvailable\n\n def baseFeaturesAvailable(self):\n return self._baseFeaturesAvailable\n\n def hasPulseFeature(self, featureName):\n return featureName in self._pulseFeaturesAvailable\n\n def pulseFeaturesAvailable(self):\n return self._pulseFeaturesAvailable\n\n def hasPulseFeatures(self):\n \"\"\"\n Is this BAM file a product of running analysis with the\n PacBio-internal analysis mode enabled?\n \"\"\"\n return self.hasPulseFeature(\"PulseCall\")\n\n @property\n def barcode(self):\n raise Unimplemented()\n\n @property\n def barcodeName(self):\n raise Unimplemented()\n\n @property\n def barcodes(self):\n raise Unimplemented()\n\n @requiresBai\n def __len__(self):\n return self.peer.mapped + self.peer.unmapped\n\n def close(self):\n if hasattr(self, \"file\") and self.file is not None:\n self.file.close()\n self.file = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n\nclass BamReader(_BamReaderBase, AlignmentReaderMixin):\n \"\"\"\n Reader for a BAM with a bam.bai (SAMtools) index, but not a\n bam.pbi (PacBio) index. Supports basic BAM operations.\n \"\"\"\n def __init__(self, fname, referenceFastaFname=None):\n super(BamReader, self).__init__(fname, referenceFastaFname)\n\n @property\n def index(self):\n return None\n\n def __iter__(self):\n self.peer.reset()\n for a in self.peer:\n yield BamAlignment(self, a)\n\n def readsInRange(self, winId, winStart, winEnd, justIndices=False):\n # PYSAM BUG: fetch doesn't work if arg 1 is tid and not rname\n if not isinstance(winId, str):\n winId = self.peer.get_reference_name(winId)\n if justIndices == True:\n raise UnavailableFeature(\"BAM is not random-access\")\n else:\n return ( BamAlignment(self, it)\n for it in self.peer.fetch(winId, winStart, winEnd, multiple_iterators=False) )\n\n def __getitem__(self, rowNumbers):\n raise UnavailableFeature(\"Use IndexedBamReader to get row-number based slicing.\")\n\n\n\nclass IndexedBamReader(_BamReaderBase, IndexedAlignmentReaderMixin):\n \"\"\"\n A `IndexedBamReader` is a BAM reader class that uses the\n ``bam.pbi`` (PacBio BAM index) file to enable random access by\n \"row number\" and to provide access to precomputed semantic\n information about the BAM records\n \"\"\"\n def __init__(self, fname, referenceFastaFname=None, sharedIndex=None):\n super(IndexedBamReader, self).__init__(fname, referenceFastaFname)\n if sharedIndex is None:\n self.pbi = None\n pbiFname = self.filename + \".pbi\"\n if exists(pbiFname):\n self.pbi = PacBioBamIndex(pbiFname)\n else:\n raise IOError(\"IndexedBamReader requires bam.pbi index file \"+\n \"to read {f}\".format(f=fname))\n else:\n self.pbi = sharedIndex\n\n @property\n def index(self):\n return self.pbi\n\n def atRowNumber(self, rn):\n offset = self.pbi.virtualFileOffset[rn]\n self.peer.seek(offset)\n return BamAlignment(self, next(self.peer), rn)\n\n def readsInRange(self, winId, winStart, winEnd, justIndices=False):\n if isinstance(winId, str):\n winId = self.referenceInfo(winId).ID\n ix = self.pbi.rangeQuery(winId, winStart, winEnd)\n if justIndices:\n return ix\n else:\n return self[ix]\n\n def __iter__(self):\n self.peer.reset()\n for (rowNumber, peerRecord) in enumerate(self.peer):\n yield BamAlignment(self, peerRecord, rowNumber)\n\n def __len__(self):\n return len(self.pbi)\n\n def __getitem__(self, rowNumbers):\n if (isinstance(rowNumbers, int) or\n issubclass(type(rowNumbers), np.integer)):\n return self.atRowNumber(rowNumbers)\n elif isinstance(rowNumbers, slice):\n return ( self.atRowNumber(r)\n for r in range(*rowNumbers.indices(len(self))))\n elif isinstance(rowNumbers, list) or isinstance(rowNumbers, np.ndarray):\n if len(rowNumbers) == 0:\n return []\n else:\n entryType = type(rowNumbers[0])\n if entryType == int or issubclass(entryType, np.integer):\n return ( self.atRowNumber(r) for r in rowNumbers )\n elif entryType == bool or issubclass(entryType, np.bool_):\n return ( self.atRowNumber(r) for r in np.flatnonzero(rowNumbers) )\n raise TypeError(\"Invalid type for IndexedBamReader slicing\")\n\n def __getattr__(self, key):\n if key in self.pbi.columnNames:\n return getattr(self.pbi, key)\n else:\n raise AttributeError(\"no such column in pbi index\")\n\n def __dir__(self):\n basicDir = dir(self.__class__)\n return basicDir + self.pbi.columnNames\n\n @property\n def identity(self):\n \"\"\"\n Fractional alignment sequence identities as numpy array.\n \"\"\"\n if len(self.pbi) == 0:\n return np.array([])\n if not \"nMM\" in self.pbi.columnNames:\n raise AttributeError(\"Identities require mapped BAM.\")\n return 1 - ((self.pbi.nMM + self.pbi.nIns + self.pbi.nDel) /\n (self.pbi.aEnd.astype(float) - self.pbi.aStart.astype(float)))\n"
] | [
[
"numpy.array",
"numpy.rec.fromrecords",
"numpy.zeros",
"numpy.flatnonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jschuhmac/qiskit-nature | [
"b8b1181d951cf8fa76fe0db9e5ea192dad5fb186",
"b8b1181d951cf8fa76fe0db9e5ea192dad5fb186",
"b8b1181d951cf8fa76fe0db9e5ea192dad5fb186",
"b8b1181d951cf8fa76fe0db9e5ea192dad5fb186",
"b8b1181d951cf8fa76fe0db9e5ea192dad5fb186"
] | [
"test/problems/second_quantization/lattice/models/test_fermi_hubbard_model.py",
"qiskit_nature/transformers/second_quantization/electronic/active_space_transformer.py",
"qiskit_nature/drivers/second_quantization/pyscfd/pyscfdriver.py",
"test/mappers/second_quantization/resources/bksf_lih.py",
"test/algorithms/pes_samplers/test_extrapolators.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test FermiHubbardModel.\"\"\"\nfrom test import QiskitNatureTestCase\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom retworkx import PyGraph, is_isomorphic\n\nfrom qiskit_nature.problems.second_quantization.lattice import FermiHubbardModel, Lattice\n\n\nclass TestFermiHubbardModel(QiskitNatureTestCase):\n \"\"\"TestFermiHubbardModel\"\"\"\n\n def test_init(self):\n \"\"\"Test init.\"\"\"\n graph = PyGraph(multigraph=False)\n graph.add_nodes_from(range(3))\n weighted_edge_list = [\n (0, 1, 1.0 + 1.0j),\n (0, 2, -1.0),\n (1, 1, 2.0),\n ]\n graph.add_edges_from(weighted_edge_list)\n lattice = Lattice(graph)\n fhm = FermiHubbardModel(lattice, onsite_interaction=10.0)\n\n with self.subTest(\"Check the graph.\"):\n self.assertTrue(\n is_isomorphic(fhm.lattice.graph, lattice.graph, edge_matcher=lambda x, y: x == y)\n )\n\n with self.subTest(\"Check the hopping matrix\"):\n hopping_matrix = fhm.hopping_matrix()\n target_matrix = np.array(\n [[0.0, 1.0 + 1.0j, -1.0], [1.0 - 1.0j, 2.0, 0.0], [-1.0, 0.0, 0.0]]\n )\n assert_array_equal(hopping_matrix, target_matrix)\n\n with self.subTest(\"Check the second q op representation.\"):\n hopping = [\n (\"+_0 -_2\", 1.0 + 1.0j),\n (\"-_0 +_2\", -(1.0 - 1.0j)),\n (\"+_0 -_4\", -1.0),\n (\"-_0 +_4\", 1.0),\n (\"+_1 -_3\", 1.0 + 1.0j),\n (\"-_1 +_3\", -(1.0 - 1.0j)),\n (\"+_1 -_5\", -1.0),\n (\"-_1 +_5\", 1.0),\n (\"+_2 -_2\", 2.0),\n (\"+_3 -_3\", 2.0),\n ]\n\n interaction = [\n (\"+_0 -_0 +_1 -_1\", 10.0),\n (\"+_2 -_2 +_3 -_3\", 10.0),\n (\"+_4 -_4 +_5 -_5\", 10.0),\n ]\n\n ham = hopping + interaction\n\n self.assertSetEqual(set(ham), set(fhm.second_q_ops(display_format=\"sparse\").to_list()))\n\n def test_uniform_parameters(self):\n \"\"\"Test uniform_parameters.\"\"\"\n graph = PyGraph(multigraph=False)\n graph.add_nodes_from(range(3))\n weighted_edge_list = [\n (0, 1, 1.0 + 1.0j),\n (0, 2, -1.0),\n (1, 1, 2.0),\n ]\n graph.add_edges_from(weighted_edge_list)\n lattice = Lattice(graph)\n uniform_fhm = FermiHubbardModel.uniform_parameters(\n lattice,\n uniform_interaction=1.0 + 1.0j,\n uniform_onsite_potential=0.0,\n onsite_interaction=10.0,\n )\n with self.subTest(\"Check the graph.\"):\n target_graph = PyGraph(multigraph=False)\n target_graph.add_nodes_from(range(3))\n target_weight = [\n (0, 1, 1.0 + 1.0j),\n (0, 2, 1.0 + 1.0j),\n (0, 0, 0.0),\n (1, 1, 0.0),\n (2, 2, 0.0),\n ]\n target_graph.add_edges_from(target_weight)\n self.assertTrue(\n is_isomorphic(\n uniform_fhm.lattice.graph, target_graph, edge_matcher=lambda x, y: x == y\n )\n )\n with self.subTest(\"Check the hopping matrix.\"):\n hopping_matrix = uniform_fhm.hopping_matrix()\n target_matrix = np.array(\n [[0.0, 1.0 + 1.0j, 1.0 + 1.0j], [1.0 - 1.0j, 0.0, 0.0], [1.0 - 1.0j, 0.0, 0.0]]\n )\n assert_array_equal(hopping_matrix, target_matrix)\n\n with self.subTest(\"Check the second q op representation.\"):\n hopping = [\n (\"+_0 -_2\", 1.0 + 1.0j),\n (\"-_0 +_2\", -(1.0 - 1.0j)),\n (\"+_0 -_4\", 1.0 + 1.0j),\n (\"-_0 +_4\", -(1.0 - 1.0j)),\n (\"+_1 -_3\", 1.0 + 1.0j),\n (\"-_1 +_3\", -(1.0 - 1.0j)),\n (\"+_1 -_5\", 1.0 + 1.0j),\n (\"-_1 +_5\", -(1.0 - 1.0j)),\n (\"+_0 -_0\", 0.0),\n (\"+_1 -_1\", 0.0),\n (\"+_2 -_2\", 0.0),\n (\"+_3 -_3\", 0.0),\n (\"+_4 -_4\", 0.0),\n (\"+_5 -_5\", 0.0),\n ]\n\n interaction = [\n (\"+_0 -_0 +_1 -_1\", 10.0),\n (\"+_2 -_2 +_3 -_3\", 10.0),\n (\"+_4 -_4 +_5 -_5\", 10.0),\n ]\n\n ham = hopping + interaction\n\n self.assertSetEqual(\n set(ham), set(uniform_fhm.second_q_ops(display_format=\"sparse\").to_list())\n )\n\n def test_from_parameters(self):\n \"\"\"Test from_parameters.\"\"\"\n hopping_matrix = np.array(\n [[1.0, 1.0 + 1.0j, 2.0 + 2.0j], [1.0 - 1.0j, 0.0, 0.0], [2.0 - 2.0j, 0.0, 1.0]]\n )\n\n onsite_interaction = 10.0\n fhm = FermiHubbardModel.from_parameters(hopping_matrix, onsite_interaction)\n with self.subTest(\"Check the graph.\"):\n target_graph = PyGraph(multigraph=False)\n target_graph.add_nodes_from(range(3))\n target_weight = [(0, 0, 1.0), (0, 1, 1.0 + 1.0j), (0, 2, 2.0 + 2.0j), (2, 2, 1.0)]\n target_graph.add_edges_from(target_weight)\n self.assertTrue(\n is_isomorphic(fhm.lattice.graph, target_graph, edge_matcher=lambda x, y: x == y)\n )\n\n with self.subTest(\"Check the hopping matrix.\"):\n assert_array_equal(fhm.hopping_matrix(), hopping_matrix)\n\n with self.subTest(\"Check the second q op representation.\"):\n hopping = [\n (\"+_0 -_2\", 1.0 + 1.0j),\n (\"-_0 +_2\", -(1.0 - 1.0j)),\n (\"+_0 -_4\", 2.0 + 2.0j),\n (\"-_0 +_4\", -(2.0 - 2.0j)),\n (\"+_1 -_3\", 1.0 + 1.0j),\n (\"-_1 +_3\", -(1.0 - 1.0j)),\n (\"+_1 -_5\", 2.0 + 2.0j),\n (\"-_1 +_5\", -(2.0 - 2.0j)),\n (\"+_0 -_0\", 1.0),\n (\"+_1 -_1\", 1.0),\n (\"+_4 -_4\", 1.0),\n (\"+_5 -_5\", 1.0),\n ]\n\n interaction = [\n (\"+_0 -_0 +_1 -_1\", onsite_interaction),\n (\"+_2 -_2 +_3 -_3\", onsite_interaction),\n (\"+_4 -_4 +_5 -_5\", onsite_interaction),\n ]\n\n ham = hopping + interaction\n\n self.assertSetEqual(set(ham), set(fhm.second_q_ops(display_format=\"sparse\").to_list()))\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021, 2022.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The Active-Space Reduction interface.\"\"\"\n\nimport logging\n\nfrom copy import deepcopy\nfrom typing import List, Optional, Tuple, Union, cast\n\nimport numpy as np\n\nfrom qiskit_nature import QiskitNatureError\nfrom qiskit_nature.properties import GroupedProperty, Property\nfrom qiskit_nature.properties.second_quantization import (\n SecondQuantizedProperty,\n GroupedSecondQuantizedProperty,\n)\nfrom qiskit_nature.properties.second_quantization.driver_metadata import DriverMetadata\nfrom qiskit_nature.properties.second_quantization.electronic import ParticleNumber\nfrom qiskit_nature.properties.second_quantization.electronic.bases import (\n ElectronicBasis,\n ElectronicBasisTransform,\n)\nfrom qiskit_nature.properties.second_quantization.electronic.integrals import (\n IntegralProperty,\n OneBodyElectronicIntegrals,\n)\nfrom qiskit_nature.properties.second_quantization.electronic.types import GroupedElectronicProperty\nfrom qiskit_nature.results import ElectronicStructureResult\n\nfrom ..base_transformer import BaseTransformer\n\nlogger = logging.getLogger(__name__)\n\n\nclass ActiveSpaceTransformer(BaseTransformer):\n r\"\"\"The Active-Space reduction.\n\n The reduction is done by computing the inactive Fock operator which is defined as\n :math:`F^I_{pq} = h_{pq} + \\sum_i 2 g_{iipq} - g_{iqpi}` and the inactive energy which is\n given by :math:`E^I = \\sum_j h_{jj} + F ^I_{jj}`, where :math:`i` and :math:`j` iterate over\n the inactive orbitals.\n By using the inactive Fock operator in place of the one-electron integrals, `h1`, the\n description of the active space contains an effective potential generated by the inactive\n electrons. Therefore, this method permits the exclusion of non-core electrons while\n retaining a high-quality description of the system.\n\n For more details on the computation of the inactive Fock operator refer to\n https://arxiv.org/abs/2009.01872.\n\n The active space can be configured in one of the following ways through the initializer:\n - when only `num_electrons` and `num_molecular_orbitals` are specified, these integers\n indicate the number of active electrons and orbitals, respectively. The active space will\n then be chosen around the Fermi level resulting in a unique choice for any pair of\n numbers. Nonetheless, the following criteria must be met:\n\n #. the remaining number of inactive electrons must be a positive, even number\n\n #. the number of active orbitals must not exceed the total number of orbitals minus the\n number of orbitals occupied by the inactive electrons\n\n - when, in addition to the above, `num_alpha` is specified, this can be used to disambiguate\n the active space in systems with non-zero spin. Thus, `num_alpha` determines the number of\n active alpha electrons. The number of active beta electrons can then be determined based\n via `num_beta = num_electrons - num_alpha`. The same requirements as listed in the\n previous case must be met.\n - finally, it is possible to select a custom set of active orbitals via their indices using\n `active_orbitals`. This allows selecting an active space which is not placed around the\n Fermi level as described in the first case, above. When using this keyword argument, the\n following criteria must be met *in addition* to the ones listed above:\n\n #. the length of `active_orbitals` must be equal to `num_molecular_orbitals`.\n\n #. the sum of electrons present in `active_orbitals` must be equal to `num_electrons`.\n\n References:\n - *M. Rossmannek, P. Barkoutsos, P. Ollitrault, and I. Tavernelli, arXiv:2009.01872\n (2020).*\n \"\"\"\n\n def __init__(\n self,\n num_electrons: Optional[Union[int, Tuple[int, int]]] = None,\n num_molecular_orbitals: Optional[int] = None,\n active_orbitals: Optional[List[int]] = None,\n ):\n \"\"\"Initializes a transformer which can reduce a `GroupedElectronicProperty` to a configured\n active space.\n\n This transformer requires a `ParticleNumber` property and an `ElectronicBasisTransform`\n pseudo-property to be available as well as `ElectronicIntegrals` in the `ElectronicBasis.AO`\n basis. An `ElectronicStructureDriverResult` produced by Qiskit's drivers in general\n satisfies these conditions unless it was read from an FCIDump file. However, those integrals\n are likely already reduced by the code which produced the file.\n\n Args:\n num_electrons: The number of active electrons. If this is a tuple, it represents the\n number of alpha and beta electrons. If this is a number, it is\n interpreted as the total number of active electrons, should be even, and\n implies that the number of alpha and beta electrons equals half of this\n value, respectively.\n num_molecular_orbitals: The number of active orbitals.\n active_orbitals: A list of indices specifying the molecular orbitals of the active\n space. This argument must match with the remaining arguments and should\n only be used to enforce an active space that is not chosen purely\n around the Fermi level.\n\n Raises:\n QiskitNatureError: if an invalid configuration is provided.\n \"\"\"\n self._num_electrons = num_electrons\n self._num_molecular_orbitals = num_molecular_orbitals\n self._active_orbitals = active_orbitals\n\n try:\n self._check_configuration()\n except QiskitNatureError as exc:\n raise QiskitNatureError(\"Incorrect Active-Space configuration.\") from exc\n\n self._mo_occ_total: np.ndarray = None\n self._active_orbs_indices: List[int] = None\n self._transform_active: ElectronicBasisTransform = None\n self._density_inactive: OneBodyElectronicIntegrals = None\n\n def _check_configuration(self):\n if isinstance(self._num_electrons, int):\n if self._num_electrons % 2 != 0:\n raise QiskitNatureError(\n \"The number of active electrons must be even! Otherwise you must specify them \"\n \"as a tuple, not as:\",\n str(self._num_electrons),\n )\n if self._num_electrons < 0:\n raise QiskitNatureError(\n \"The number of active electrons cannot be negative, not:\",\n str(self._num_electrons),\n )\n elif isinstance(self._num_electrons, tuple):\n if not all(isinstance(n_elec, int) and n_elec >= 0 for n_elec in self._num_electrons):\n raise QiskitNatureError(\n \"Neither the number of alpha, nor the number of beta electrons can be \"\n \"negative, not:\",\n str(self._num_electrons),\n )\n else:\n raise QiskitNatureError(\n \"The number of active electrons must be an int, or a tuple thereof, not:\",\n str(self._num_electrons),\n )\n\n if isinstance(self._num_molecular_orbitals, int):\n if self._num_molecular_orbitals < 0:\n raise QiskitNatureError(\n \"The number of active orbitals cannot be negative, not:\",\n str(self._num_molecular_orbitals),\n )\n else:\n raise QiskitNatureError(\n \"The number of active orbitals must be an int, not:\",\n str(self._num_electrons),\n )\n\n def transform(\n self, grouped_property: GroupedSecondQuantizedProperty\n ) -> GroupedElectronicProperty:\n \"\"\"Reduces the given `GroupedElectronicProperty` to a given active space.\n\n Args:\n grouped_property: the `GroupedElectronicProperty` to be transformed.\n\n Returns:\n A new `GroupedElectronicProperty` instance.\n\n Raises:\n QiskitNatureError: If the provided `GroupedElectronicProperty` does not contain a\n `ParticleNumber` or `ElectronicBasisTransform` instance, if more\n electrons or orbitals are requested than are available, or if the\n number of selected active orbital indices does not match\n `num_molecular_orbitals`.\n \"\"\"\n if not isinstance(grouped_property, GroupedElectronicProperty):\n raise QiskitNatureError(\n \"Only `GroupedElectronicProperty` objects can be transformed by this Transformer, \"\n f\"not objects of type, {type(grouped_property)}.\"\n )\n\n particle_number = grouped_property.get_property(ParticleNumber)\n if particle_number is None:\n raise QiskitNatureError(\n \"The provided `GroupedElectronicProperty` does not contain a `ParticleNumber` \"\n \"property, which is required by this transformer!\"\n )\n particle_number = cast(ParticleNumber, particle_number)\n\n electronic_basis_transform = grouped_property.get_property(ElectronicBasisTransform)\n if electronic_basis_transform is None:\n raise QiskitNatureError(\n \"The provided `GroupedElectronicProperty` does not contain an \"\n \"`ElectronicBasisTransform` property, which is required by this transformer!\"\n )\n electronic_basis_transform = cast(ElectronicBasisTransform, electronic_basis_transform)\n\n # get molecular orbital occupation numbers\n occupation_alpha = particle_number.occupation_alpha\n occupation_beta = particle_number.occupation_beta\n self._mo_occ_total = occupation_alpha + occupation_beta\n\n # determine the active space\n self._active_orbs_indices, inactive_orbs_idxs = self._determine_active_space(\n grouped_property\n )\n\n # get molecular orbital coefficients\n coeff_alpha = electronic_basis_transform.coeff_alpha\n coeff_beta = electronic_basis_transform.coeff_beta\n\n # initialize size-reducing basis transformation\n self._transform_active = ElectronicBasisTransform(\n ElectronicBasis.AO,\n ElectronicBasis.MO,\n coeff_alpha[:, self._active_orbs_indices],\n coeff_beta[:, self._active_orbs_indices],\n )\n\n # compute inactive density matrix\n def _inactive_density(mo_occ, mo_coeff):\n return np.dot(\n mo_coeff[:, inactive_orbs_idxs] * mo_occ[inactive_orbs_idxs],\n np.transpose(mo_coeff[:, inactive_orbs_idxs]),\n )\n\n self._density_inactive = OneBodyElectronicIntegrals(\n ElectronicBasis.AO,\n (\n _inactive_density(occupation_alpha, coeff_alpha),\n _inactive_density(occupation_beta, coeff_beta),\n ),\n )\n\n # construct new GroupedElectronicProperty\n grouped_property_transformed = ElectronicStructureResult()\n grouped_property_transformed.electronic_basis_transform = self._transform_active\n grouped_property_transformed = self._transform_property(grouped_property) # type: ignore\n\n return grouped_property_transformed\n\n def _determine_active_space(\n self, grouped_property: GroupedElectronicProperty\n ) -> Tuple[List[int], List[int]]:\n \"\"\"Determines the active and inactive orbital indices.\n\n Args:\n grouped_property: the `GroupedElectronicProperty` to be transformed.\n\n Returns:\n The list of active and inactive orbital indices.\n \"\"\"\n particle_number = grouped_property.get_property(ParticleNumber)\n if isinstance(self._num_electrons, tuple):\n num_alpha, num_beta = self._num_electrons\n elif isinstance(self._num_electrons, int):\n num_alpha = num_beta = self._num_electrons // 2\n\n # compute number of inactive electrons\n nelec_total = particle_number._num_alpha + particle_number._num_beta\n nelec_inactive = nelec_total - num_alpha - num_beta\n\n self._validate_num_electrons(nelec_inactive)\n self._validate_num_orbitals(nelec_inactive, particle_number)\n\n # determine active and inactive orbital indices\n if self._active_orbitals is None:\n norbs_inactive = nelec_inactive // 2\n inactive_orbs_idxs = list(range(norbs_inactive))\n active_orbs_idxs = list(\n range(norbs_inactive, norbs_inactive + self._num_molecular_orbitals)\n )\n else:\n active_orbs_idxs = self._active_orbitals\n inactive_orbs_idxs = [\n o\n for o in range(nelec_total // 2)\n if o not in self._active_orbitals and self._mo_occ_total[o] > 0\n ]\n\n return (active_orbs_idxs, inactive_orbs_idxs)\n\n def _validate_num_electrons(self, nelec_inactive: int) -> None:\n \"\"\"Validates the number of electrons.\n\n Args:\n nelec_inactive: the computed number of inactive electrons.\n\n Raises:\n QiskitNatureError: if the number of inactive electrons is either negative or odd.\n \"\"\"\n if nelec_inactive < 0:\n raise QiskitNatureError(\"More electrons requested than available.\")\n if nelec_inactive % 2 != 0:\n raise QiskitNatureError(\"The number of inactive electrons must be even.\")\n\n def _validate_num_orbitals(self, nelec_inactive: int, particle_number: ParticleNumber) -> None:\n \"\"\"Validates the number of orbitals.\n\n Args:\n nelec_inactive: the computed number of inactive electrons.\n particle_number: the `ParticleNumber` containing system size information.\n\n Raises:\n QiskitNatureError: if more orbitals were requested than are available in total or if the\n number of selected orbitals mismatches the specified number of active\n orbitals.\n \"\"\"\n if self._active_orbitals is None:\n norbs_inactive = nelec_inactive // 2\n if (\n norbs_inactive + self._num_molecular_orbitals\n > particle_number._num_spin_orbitals // 2\n ):\n raise QiskitNatureError(\"More orbitals requested than available.\")\n else:\n if self._num_molecular_orbitals != len(self._active_orbitals):\n raise QiskitNatureError(\n \"The number of selected active orbital indices does not \"\n \"match the specified number of active orbitals.\"\n )\n if max(self._active_orbitals) >= particle_number._num_spin_orbitals // 2:\n raise QiskitNatureError(\"More orbitals requested than available.\")\n expected_num_electrons = (\n self._num_electrons\n if isinstance(self._num_electrons, int)\n else sum(self._num_electrons)\n )\n if sum(self._mo_occ_total[self._active_orbitals]) != expected_num_electrons:\n raise QiskitNatureError(\n \"The number of electrons in the selected active orbitals \"\n \"does not match the specified number of active electrons.\"\n )\n\n # TODO: can we efficiently extract this into the base class? At least the logic dealing with\n # recursion is general and we should avoid having to duplicate it.\n def _transform_property(self, prop: Property) -> Property:\n \"\"\"Transforms a Property object.\n\n This is a recursive reduction, iterating GroupedProperty objects when encountering one.\n\n Args:\n property: the property object to transform.\n\n Returns:\n The transformed property object.\n\n Raises:\n TypeError: if an unexpected Property subtype is encountered.\n \"\"\"\n transformed_property: Property\n if isinstance(prop, GroupedProperty):\n transformed_property = deepcopy(prop)\n\n # Get the iterator of the Group's properties. We access __iter__() directly to make\n # mypy happy :-)\n iterator = transformed_property.__iter__()\n\n transformed_internal_property = None\n while True:\n try:\n # Send the transformed internal property to the GroupedProperty generator.\n # NOTE: in the first iteration, this variable is None, which is equivalent to\n # starting the iterator.\n # NOTE: a Generator's send method returns the iterators next value [2].\n # [2]: https://docs.python.org/3/reference/expressions.html#generator.send\n internal_property = iterator.send(transformed_internal_property)\n except StopIteration:\n break\n\n try:\n transformed_internal_property = self._transform_property(internal_property)\n except TypeError:\n logger.warning(\n \"The Property %s of type %s could not be transformed!\",\n internal_property.name,\n type(internal_property),\n )\n continue\n\n elif isinstance(prop, IntegralProperty):\n # get matrix operator of IntegralProperty\n fock_operator = prop.integral_operator(self._density_inactive)\n # the total operator equals the AO-1-body-term + the inactive matrix operator\n total_op = prop.get_electronic_integral(ElectronicBasis.AO, 1) + fock_operator\n # compute the energy shift introduced by the ActiveSpaceTransformer\n e_inactive = 0.5 * cast(complex, total_op.compose(self._density_inactive))\n\n transformed_property = deepcopy(prop)\n # insert the AO-basis inactive operator\n transformed_property.add_electronic_integral(fock_operator)\n # actually reduce the system size\n transformed_property.transform_basis(self._transform_active)\n # insert the energy shift\n transformed_property._shift[self.__class__.__name__] = e_inactive\n\n elif isinstance(prop, ParticleNumber):\n p_n = prop\n active_occ_alpha = p_n.occupation_alpha[self._active_orbs_indices]\n active_occ_beta = p_n.occupation_beta[self._active_orbs_indices]\n transformed_property = ParticleNumber(\n len(self._active_orbs_indices) * 2,\n (int(sum(active_occ_alpha)), int(sum(active_occ_beta))),\n active_occ_alpha,\n active_occ_beta,\n )\n\n elif isinstance(prop, SecondQuantizedProperty):\n transformed_property = prop.__class__(len(self._active_orbs_indices) * 2) # type: ignore\n\n elif isinstance(prop, ElectronicBasisTransform):\n # transformation done manually during `transform`\n transformed_property = prop\n\n elif isinstance(prop, DriverMetadata):\n # for the time being we manually catch this to avoid unnecessary warnings\n # TODO: support storing transformer information in the DriverMetadata container\n transformed_property = prop\n\n else:\n raise TypeError(f\"{type(prop)} is an unsupported Property-type for this Transformer!\")\n\n return transformed_property\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2022.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The PySCF Driver.\"\"\"\n\nimport inspect\nimport logging\nimport os\nimport tempfile\nimport warnings\nfrom enum import Enum\nfrom typing import List, Optional, Tuple, Union, Any, Dict\n\nimport numpy as np\nfrom qiskit.utils.validation import validate_min\n\nfrom qiskit_nature.properties.second_quantization.driver_metadata import DriverMetadata\nfrom qiskit_nature.properties.second_quantization.electronic import (\n ElectronicStructureDriverResult,\n AngularMomentum,\n Magnetization,\n ParticleNumber,\n ElectronicEnergy,\n DipoleMoment,\n ElectronicDipoleMoment,\n)\nfrom qiskit_nature.properties.second_quantization.electronic.bases import (\n ElectronicBasis,\n ElectronicBasisTransform,\n)\nfrom qiskit_nature.properties.second_quantization.electronic.integrals import (\n OneBodyElectronicIntegrals,\n TwoBodyElectronicIntegrals,\n)\nimport qiskit_nature.optionals as _optionals\n\nfrom ....exceptions import QiskitNatureError\nfrom ..electronic_structure_driver import ElectronicStructureDriver, MethodType\nfrom ...molecule import Molecule\nfrom ...units_type import UnitsType\n\nlogger = logging.getLogger(__name__)\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"pyscf\")\n\n\nclass InitialGuess(Enum):\n \"\"\"Initial Guess Enum\"\"\"\n\n MINAO = \"minao\"\n HCORE = \"1e\"\n ONE_E = \"1e\"\n ATOM = \"atom\"\n\n\n@_optionals.HAS_PYSCF.require_in_instance\nclass PySCFDriver(ElectronicStructureDriver):\n \"\"\"A Second-Quantization driver for Qiskit Nature using the PySCF library.\n\n References:\n https://pyscf.org/\n \"\"\"\n\n def __init__(\n self,\n atom: Union[str, List[str]] = \"H 0.0 0.0 0.0; H 0.0 0.0 0.735\",\n unit: UnitsType = UnitsType.ANGSTROM,\n charge: int = 0,\n spin: int = 0,\n basis: str = \"sto3g\",\n method: MethodType = MethodType.RHF,\n xc_functional: str = \"lda,vwn\",\n xcf_library: str = \"libxc\",\n conv_tol: float = 1e-9,\n max_cycle: int = 50,\n init_guess: InitialGuess = InitialGuess.MINAO,\n max_memory: Optional[int] = None,\n chkfile: Optional[str] = None,\n ) -> None:\n \"\"\"\n Args:\n atom: A string (or a list thereof) denoting the elements and coordinates of all atoms in\n the system. Two formats are allowed; first, the PySCF-style `XYZ` format which is a\n list of strings formatted as `{element symbol} {x_coord} {y_coord} {z_coord}`. If a\n single string is given, the list entries should be joined by `;` as in the example:\n `H 0.0 0.0 0.0; H 0.0 0.0 0.735`.\n Second, the `Z-Matrix` format which is explained at 1_. The previous example\n would be written as `H; H 3 0.735`.\n See also 2_ for more details on geometry specifications supported by PySCF.\n unit: Denotes the unit of coordinates. Valid values are given by the ``UnitsType`` enum.\n charge: The charge of the molecule.\n spin: The spin of the molecule. In accordance with PySCF's definition, the spin equals\n :math:`2*S`, where :math:`S` is the total spin number of the molecule.\n basis: A basis set name as recognized by PySCF (3_), e.g. `sto3g` (the default), `321g`,\n etc. Note, that more advanced configuration options like a Dictionary or custom\n basis sets are not allowed for the moment. Refer to 4_ for an extensive list of\n PySCF's valid basis set names.\n method: The SCF method type to be used for the PySCF calculation. While the name\n refers to HF methods, the PySCFDriver also supports KS methods. Refer to the\n ``MethodType`` for a list of the supported methods.\n xc_functional: One of the predefined Exchange-Correlation functional names as recognized\n by PySCF (5_). Defaults to PySCF's default: 'lda,vwn'. __Note: this setting only has\n an effect when a KS method is chosen for `method`.__\n xcf_library: The Exchange-Correlation functional library to be used. This can be either\n 'libxc' (the default) or 'xcfun'. Depending on this value, a different set of values\n for `xc_functional` will be available. Refer to 5_ for more details.\n conv_tol: The SCF convergence tolerance. See 6_ for more details.\n max_cycle: The maximum number of SCF iterations. See 6_ for more details.\n init_guess: The method to make the initial guess for the SCF starting point. Valid\n values are given by the ``InitialGuess`` enum. See 6_ for more details.\n max_memory: The maximum memory that PySCF should use. See 6_ for more details.\n chkfile: The path to a PySCF checkpoint file from which to load a previously run\n calculation. The data stored in this file is assumed to be already converged.\n Refer to 6_ and 7_ for more details.\n\n Raises:\n QiskitNatureError: An invalid input was supplied.\n\n .. _1: https://en.wikipedia.org/wiki/Z-matrix_(chemistry)\n .. _2: https://pyscf.org/user/gto.html#geometry\n .. _3: https://pyscf.org/user/gto.html#basis-set\n .. _4: https://pyscf.org/pyscf_api_docs/pyscf.gto.basis.html#module-pyscf.gto.basis\n .. _5: https://pyscf.org/user/dft.html#predefined-xc-functionals-and-functional-aliases\n .. _6: https://pyscf.org/pyscf_api_docs/pyscf.scf.html#module-pyscf.scf.hf\n .. _7: https://pyscf.org/pyscf_api_docs/pyscf.lib.html#module-pyscf.lib.chkfile\n \"\"\"\n super().__init__()\n # pylint: disable=import-error\n from pyscf import gto, scf\n\n # First, ensure that PySCF supports the method\n PySCFDriver.check_method_supported(method)\n\n if isinstance(atom, list):\n atom = \";\".join(atom)\n elif isinstance(atom, str):\n atom = atom.replace(\"\\n\", \";\")\n else:\n raise QiskitNatureError(\n f\"`atom` must be either a `str` or `List[str]`, but you passed {atom}\"\n )\n\n validate_min(\"max_cycle\", max_cycle, 1)\n\n # we use the property-setter to deal with conversion\n self.atom = atom\n self._unit = unit\n self._charge = charge\n self._spin = spin\n self._basis = basis\n self._method = method\n self._xc_functional = xc_functional\n self.xcf_library = xcf_library # validate choice in property setter\n self._conv_tol = conv_tol\n self._max_cycle = max_cycle\n self._init_guess = init_guess.value\n self._max_memory = max_memory\n self._chkfile = chkfile\n\n self._mol: gto.Mole = None\n self._calc: scf.HF = None\n\n @property\n def atom(self) -> str:\n \"\"\"Returns the atom.\"\"\"\n return self._atom\n\n @atom.setter\n def atom(self, atom: Union[str, List[str]]) -> None:\n \"\"\"Sets the atom.\"\"\"\n if isinstance(atom, list):\n atom = \";\".join(atom)\n self._atom = atom.replace(\"\\n\", \";\")\n\n @property\n def unit(self) -> UnitsType:\n \"\"\"Returns the unit.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit: UnitsType) -> None:\n \"\"\"Sets the unit.\"\"\"\n self._unit = unit\n\n @property\n def charge(self) -> int:\n \"\"\"Returns the charge.\"\"\"\n return self._charge\n\n @charge.setter\n def charge(self, charge: int) -> None:\n \"\"\"Sets the charge.\"\"\"\n self._charge = charge\n\n @property\n def spin(self) -> int:\n \"\"\"Returns the spin.\"\"\"\n return self._spin\n\n @spin.setter\n def spin(self, spin: int) -> None:\n \"\"\"Sets the spin.\"\"\"\n self._spin = spin\n\n @property\n def basis(self) -> str:\n \"\"\"return basis\"\"\"\n return self._basis\n\n @basis.setter\n def basis(self, value: str) -> None:\n \"\"\"set basis\"\"\"\n self._basis = value\n\n @property\n def method(self) -> MethodType:\n \"\"\"Returns Hartree-Fock/Kohn-Sham method\"\"\"\n return self._method\n\n @method.setter\n def method(self, value: MethodType) -> None:\n \"\"\"Sets Hartree-Fock/Kohn-Sham method\"\"\"\n self._method = value\n\n @property\n def xc_functional(self) -> str:\n \"\"\"Returns the Exchange-Correlation functional.\"\"\"\n return self._xc_functional\n\n @xc_functional.setter\n def xc_functional(self, xc_functional: str) -> None:\n \"\"\"Sets the Exchange-Correlation functional.\"\"\"\n self._xc_functional = xc_functional\n\n @property\n def xcf_library(self) -> str:\n \"\"\"Returns the Exchange-Correlation functional library.\"\"\"\n return self._xcf_library\n\n @xcf_library.setter\n def xcf_library(self, xcf_library: str) -> None:\n \"\"\"Sets the Exchange-Correlation functional library.\"\"\"\n if xcf_library not in (\"libxc\", \"xcfun\"):\n raise QiskitNatureError(\n \"Invalid XCF library. It can be either 'libxc' or 'xcfun', not \" f\"'{xcf_library}'\"\n )\n self._xcf_library = xcf_library\n\n @property\n def conv_tol(self) -> float:\n \"\"\"Returns the SCF convergence tolerance.\"\"\"\n return self._conv_tol\n\n @conv_tol.setter\n def conv_tol(self, conv_tol: float) -> None:\n \"\"\"Sets the SCF convergence tolerance.\"\"\"\n self._conv_tol = conv_tol\n\n @property\n def max_cycle(self) -> int:\n \"\"\"Returns the maximum number of SCF iterations.\"\"\"\n return self._max_cycle\n\n @max_cycle.setter\n def max_cycle(self, max_cycle: int) -> None:\n \"\"\"Sets the maximum number of SCF iterations.\"\"\"\n self._max_cycle = max_cycle\n\n @property\n def init_guess(self) -> str:\n \"\"\"Returns the method for the initial guess.\"\"\"\n return self._init_guess\n\n @init_guess.setter\n def init_guess(self, init_guess: str) -> None:\n \"\"\"Sets the method for the initial guess.\"\"\"\n self._init_guess = init_guess\n\n @property\n def max_memory(self) -> int:\n \"\"\"Returns the maximum memory allowance for the calculation.\"\"\"\n return self._max_memory\n\n @max_memory.setter\n def max_memory(self, max_memory: int) -> None:\n \"\"\"Sets the maximum memory allowance for the calculation.\"\"\"\n self._max_memory = max_memory\n\n @property\n def chkfile(self) -> str:\n \"\"\"Returns the path to the PySCF checkpoint file.\"\"\"\n return self._chkfile\n\n @chkfile.setter\n def chkfile(self, chkfile: str) -> None:\n \"\"\"Sets the path to the PySCF checkpoint file.\"\"\"\n self._chkfile = chkfile\n\n @staticmethod\n @_optionals.HAS_PYSCF.require_in_call\n def from_molecule(\n molecule: Molecule,\n basis: str = \"sto3g\",\n method: MethodType = MethodType.RHF,\n driver_kwargs: Optional[Dict[str, Any]] = None,\n ) -> \"PySCFDriver\":\n \"\"\"\n Args:\n molecule: molecule\n basis: basis set\n method: Hartree-Fock Method type\n driver_kwargs: kwargs to be passed to driver\n Returns:\n driver\n \"\"\"\n PySCFDriver.check_method_supported(method)\n kwargs = {}\n if driver_kwargs:\n args = inspect.signature(PySCFDriver.__init__).parameters.keys()\n for key, value in driver_kwargs.items():\n if key not in [\"self\"] and key in args:\n kwargs[key] = value\n\n kwargs[\"atom\"] = [\" \".join(map(str, (name, *coord))) for (name, coord) in molecule.geometry]\n kwargs[\"charge\"] = molecule.charge\n kwargs[\"spin\"] = molecule.multiplicity - 1\n kwargs[\"unit\"] = molecule.units\n kwargs[\"basis\"] = PySCFDriver.to_driver_basis(basis)\n kwargs[\"method\"] = method\n return PySCFDriver(**kwargs)\n\n @staticmethod\n def to_driver_basis(basis: str) -> str:\n \"\"\"\n Converts basis to a driver acceptable basis\n Args:\n basis: The basis set to be used\n Returns:\n driver acceptable basis\n \"\"\"\n return basis\n\n @staticmethod\n def check_method_supported(method: MethodType) -> None:\n \"\"\"\n Checks that PySCF supports this method.\n Args:\n method: Method type\n\n Raises:\n UnsupportMethodError: If method not supported.\n \"\"\"\n # supports all methods\n pass\n\n def run(self) -> ElectronicStructureDriverResult:\n \"\"\"\n Returns:\n ElectronicStructureDriverResult produced by the run driver.\n\n Raises:\n QiskitNatureError: if an error during the PySCF setup or calculation occurred.\n \"\"\"\n self._build_molecule()\n self.run_pyscf()\n\n driver_result = self._construct_driver_result()\n return driver_result\n\n def _build_molecule(self) -> None:\n \"\"\"Builds the PySCF molecule object.\n\n Raises:\n QiskitNatureError: If building the PySCF molecule object failed.\n \"\"\"\n # Get config from input parameters\n # molecule is in PySCF atom string format e.g. \"H .0 .0 .0; H .0 .0 0.2\"\n # or in Z-Matrix format e.g. \"H; O 1 1.08; H 2 1.08 1 107.5\"\n # other parameters are as per PySCF got.Mole format\n # pylint: disable=import-error\n from pyscf import gto\n from pyscf.lib import logger as pylogger\n from pyscf.lib import param\n\n atom = self._check_molecule_format(self.atom)\n if self._max_memory is None:\n self._max_memory = param.MAX_MEMORY\n\n try:\n verbose = pylogger.QUIET\n output = None\n if logger.isEnabledFor(logging.DEBUG):\n verbose = pylogger.INFO\n file, output = tempfile.mkstemp(suffix=\".log\")\n os.close(file)\n\n self._mol = gto.Mole(\n atom=atom,\n unit=self._unit.value,\n basis=self._basis,\n max_memory=self._max_memory,\n verbose=verbose,\n output=output,\n )\n self._mol.symmetry = False\n self._mol.charge = self._charge\n self._mol.spin = self._spin\n self._mol.build(parse_arg=False)\n\n if output is not None:\n self._process_pyscf_log(output)\n try:\n os.remove(output)\n except Exception: # pylint: disable=broad-except\n pass\n\n except Exception as exc:\n raise QiskitNatureError(\"Failed to build the PySCF Molecule object.\") from exc\n\n @staticmethod\n def _check_molecule_format(val: str) -> Union[str, List[str]]:\n \"\"\"Ensures the molecule coordinates are in XYZ format.\n\n This utility automatically converts a Z-matrix coordinate format into XYZ coordinates.\n\n Args:\n val: the atomic coordinates.\n\n Raises:\n QiskitNatureError: If the provided coordinate are badly formatted.\n\n Returns:\n The coordinates in XYZ format.\n \"\"\"\n # pylint: disable=import-error\n from pyscf import gto\n\n atoms = [x.strip() for x in val.split(\";\")]\n if atoms is None or len(atoms) < 1:\n raise QiskitNatureError(\"Molecule format error: \" + val)\n\n # An xyz format has 4 parts in each atom, if not then do zmatrix convert\n # Allows dummy atoms, using symbol 'X' in zmatrix format for coord computation to xyz\n parts = [x.strip() for x in atoms[0].split()]\n if len(parts) != 4:\n try:\n newval = []\n for entry in gto.mole.from_zmatrix(val):\n if entry[0].upper() != \"X\":\n newval.append(entry)\n return newval\n except Exception as exc:\n raise QiskitNatureError(\"Failed to convert atom string: \" + val) from exc\n\n return val\n\n def run_pyscf(self) -> None:\n \"\"\"Runs the PySCF calculation.\n\n This method is part of the public interface to allow the user to easily overwrite it in a\n subclass to further tailor the behavior to some specific use case.\n\n Raises:\n QiskitNatureError: If an invalid HF method type was supplied.\n \"\"\"\n # pylint: disable=import-error\n from pyscf import dft, scf\n from pyscf.lib import chkfile as lib_chkfile\n\n method_name = None\n method_cls = None\n try:\n # attempt to gather the SCF-method class specified by the MethodType\n method_name = self.method.value.upper()\n method_cls = getattr(scf, method_name)\n except AttributeError as exc:\n raise QiskitNatureError(f\"Failed to load {method_name} HF object.\") from exc\n\n self._calc = method_cls(self._mol)\n\n if method_name in (\"RKS\", \"ROKS\", \"UKS\"):\n self._calc._numint.libxc = getattr(dft, self.xcf_library)\n self._calc.xc = self.xc_functional\n\n if self._chkfile is not None and os.path.exists(self._chkfile):\n self._calc.__dict__.update(lib_chkfile.load(self._chkfile, \"scf\"))\n\n logger.info(\"PySCF loaded from chkfile e(hf): %s\", self._calc.e_tot)\n else:\n self._calc.conv_tol = self._conv_tol\n self._calc.max_cycle = self._max_cycle\n self._calc.init_guess = self._init_guess\n self._calc.kernel()\n\n logger.info(\n \"PySCF kernel() converged: %s, e(hf): %s\",\n self._calc.converged,\n self._calc.e_tot,\n )\n\n def _construct_driver_result(self) -> ElectronicStructureDriverResult:\n driver_result = ElectronicStructureDriverResult()\n\n self._populate_driver_result_molecule(driver_result)\n self._populate_driver_result_metadata(driver_result)\n self._populate_driver_result_basis_transform(driver_result)\n self._populate_driver_result_particle_number(driver_result)\n self._populate_driver_result_electronic_energy(driver_result)\n self._populate_driver_result_electronic_dipole_moment(driver_result)\n\n # TODO: once https://github.com/Qiskit/qiskit-nature/issues/312 is fixed we can stop adding\n # these properties by default.\n # if not settings.dict_aux_operators:\n driver_result.add_property(AngularMomentum(self._mol.nao * 2))\n driver_result.add_property(Magnetization(self._mol.nao * 2))\n\n return driver_result\n\n def _populate_driver_result_molecule(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n coords = self._mol.atom_coords(unit=\"Angstrom\")\n geometry = [(self._mol.atom_pure_symbol(i), list(xyz)) for i, xyz in enumerate(coords)]\n\n driver_result.molecule = Molecule(\n geometry,\n multiplicity=self._spin + 1,\n charge=self._charge,\n masses=list(self._mol.atom_mass_list()),\n )\n\n def _populate_driver_result_metadata(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n # pylint: disable=import-error\n from pyscf import __version__ as pyscf_version\n\n cfg = [\n f\"atom={self._atom}\",\n f\"unit={self._unit.value}\",\n f\"charge={self._charge}\",\n f\"spin={self._spin}\",\n f\"basis={self._basis}\",\n f\"method={self.method.value}\",\n f\"conv_tol={self._conv_tol}\",\n f\"max_cycle={self._max_cycle}\",\n f\"init_guess={self._init_guess}\",\n f\"max_memory={self._max_memory}\",\n ]\n\n if self.method.value.lower() in (\"rks\", \"roks\", \"uks\"):\n cfg.extend(\n [\n f\"xc_functional={self._xc_functional}\",\n f\"xcf_library={self._xcf_library}\",\n ]\n )\n\n driver_result.add_property(DriverMetadata(\"PYSCF\", pyscf_version, \"\\n\".join(cfg + [\"\"])))\n\n def _populate_driver_result_basis_transform(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n # pylint: disable=import-error\n from pyscf.tools import dump_mat\n\n mo_coeff, mo_coeff_b = self._extract_mo_data(\"mo_coeff\", array_dimension=3)\n\n if logger.isEnabledFor(logging.DEBUG):\n # Add some more to PySCF output...\n # First analyze() which prints extra information about MO energy and occupation\n self._mol.stdout.write(\"\\n\")\n self._calc.analyze()\n # Now labelled orbitals for contributions to the MOs for s,p,d etc of each atom\n self._mol.stdout.write(\"\\n\\n--- Alpha Molecular Orbitals ---\\n\\n\")\n dump_mat.dump_mo(self._mol, mo_coeff, digits=7, start=1)\n if mo_coeff_b is not None:\n self._mol.stdout.write(\"\\n--- Beta Molecular Orbitals ---\\n\\n\")\n dump_mat.dump_mo(self._mol, mo_coeff_b, digits=7, start=1)\n self._mol.stdout.flush()\n\n driver_result.add_property(\n ElectronicBasisTransform(\n ElectronicBasis.AO,\n ElectronicBasis.MO,\n mo_coeff,\n mo_coeff_b,\n )\n )\n\n def _populate_driver_result_particle_number(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n mo_occ, mo_occ_b = self._extract_mo_data(\"mo_occ\")\n\n driver_result.add_property(\n ParticleNumber(\n num_spin_orbitals=self._mol.nao * 2,\n num_particles=(self._mol.nelec[0], self._mol.nelec[1]),\n occupation=mo_occ,\n occupation_beta=mo_occ_b,\n )\n )\n\n def _populate_driver_result_electronic_energy(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n # pylint: disable=import-error\n from pyscf import gto\n\n basis_transform = driver_result.get_property(ElectronicBasisTransform)\n\n one_body_ao = OneBodyElectronicIntegrals(\n ElectronicBasis.AO,\n (self._calc.get_hcore(), None),\n )\n\n two_body_ao = TwoBodyElectronicIntegrals(\n ElectronicBasis.AO,\n (self._mol.intor(\"int2e\", aosym=1), None, None, None),\n )\n\n one_body_mo = one_body_ao.transform_basis(basis_transform)\n two_body_mo = two_body_ao.transform_basis(basis_transform)\n\n electronic_energy = ElectronicEnergy(\n [one_body_ao, two_body_ao, one_body_mo, two_body_mo],\n nuclear_repulsion_energy=gto.mole.energy_nuc(self._mol),\n reference_energy=self._calc.e_tot,\n )\n\n electronic_energy.kinetic = OneBodyElectronicIntegrals(\n ElectronicBasis.AO,\n (self._mol.intor_symmetric(\"int1e_kin\"), None),\n )\n electronic_energy.overlap = OneBodyElectronicIntegrals(\n ElectronicBasis.AO,\n (self._calc.get_ovlp(), None),\n )\n\n orbs_energy, orbs_energy_b = self._extract_mo_data(\"mo_energy\")\n orbital_energies = (\n (orbs_energy, orbs_energy_b) if orbs_energy_b is not None else orbs_energy\n )\n electronic_energy.orbital_energies = np.asarray(orbital_energies)\n\n driver_result.add_property(electronic_energy)\n\n def _populate_driver_result_electronic_dipole_moment(\n self, driver_result: ElectronicStructureDriverResult\n ) -> None:\n basis_transform = driver_result.get_property(ElectronicBasisTransform)\n\n self._mol.set_common_orig((0, 0, 0))\n ao_dip = self._mol.intor_symmetric(\"int1e_r\", comp=3)\n\n d_m = self._calc.make_rdm1(self._calc.mo_coeff, self._calc.mo_occ)\n\n if not (isinstance(d_m, np.ndarray) and d_m.ndim == 2):\n d_m = d_m[0] + d_m[1]\n\n elec_dip = np.negative(np.einsum(\"xij,ji->x\", ao_dip, d_m).real)\n elec_dip = np.round(elec_dip, decimals=8)\n nucl_dip = np.einsum(\"i,ix->x\", self._mol.atom_charges(), self._mol.atom_coords())\n nucl_dip = np.round(nucl_dip, decimals=8)\n\n logger.info(\"HF Electronic dipole moment: %s\", elec_dip)\n logger.info(\"Nuclear dipole moment: %s\", nucl_dip)\n logger.info(\"Total dipole moment: %s\", nucl_dip + elec_dip)\n\n x_dip_ints = OneBodyElectronicIntegrals(ElectronicBasis.AO, (ao_dip[0], None))\n y_dip_ints = OneBodyElectronicIntegrals(ElectronicBasis.AO, (ao_dip[1], None))\n z_dip_ints = OneBodyElectronicIntegrals(ElectronicBasis.AO, (ao_dip[2], None))\n\n x_dipole = DipoleMoment(\"x\", [x_dip_ints, x_dip_ints.transform_basis(basis_transform)])\n y_dipole = DipoleMoment(\"y\", [y_dip_ints, y_dip_ints.transform_basis(basis_transform)])\n z_dipole = DipoleMoment(\"z\", [z_dip_ints, z_dip_ints.transform_basis(basis_transform)])\n\n driver_result.add_property(\n ElectronicDipoleMoment(\n [x_dipole, y_dipole, z_dipole],\n nuclear_dipole_moment=nucl_dip,\n reverse_dipole_sign=True,\n )\n )\n\n def _extract_mo_data(\n self, name: str, array_dimension: int = 2\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Extract molecular orbital data from a PySCF calculation object.\n\n Args:\n name: the name of the molecular orbital data field to extract.\n array_dimension: since PySCF 1.6.2, the alpha and beta components are no longer stored\n as a tuple but as a multi-dimensional numpy array. This argument specifies the\n dimension of that array in such a case. Making this configurable permits this\n function to be used to extract both, MO coefficients (3D array) and MO energies (2D\n array).\n\n Returns:\n The (alpha, beta) tuple of MO data.\n \"\"\"\n attr = getattr(self._calc, name)\n if isinstance(attr, tuple):\n attr_alpha = attr[0]\n attr_beta = attr[1]\n else:\n # Since PySCF 1.6.2, instead of a tuple it could be a multi-dimensional array with the\n # first dimension indexing the arrays for alpha and beta\n if len(attr.shape) == array_dimension:\n attr_alpha = attr[0]\n attr_beta = attr[1]\n else:\n attr_alpha = attr\n attr_beta = None\n return attr_alpha, attr_beta\n\n def _process_pyscf_log(self, logfile: str) -> None:\n \"\"\"Processes a PySCF logfile.\n\n Args:\n logfile: the path of the PySCF logfile.\n \"\"\"\n with open(logfile, \"r\", encoding=\"utf8\") as file:\n content = file.readlines()\n\n for i, _ in enumerate(content):\n if content[i].startswith(\"System:\"):\n content = content[i:]\n break\n\n logger.debug(\"PySCF processing messages log:\\n%s\", \"\".join(content))\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2022.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nFermionic Hamilton for LiH and Qubit Hamiltonian obtained from BKSF\n\nThe Fermionic Hamiltonian is generated by the following code:\n\n from qiskit_nature.drivers import Molecule\n\n molecule = Molecule(\n # coordinates are given in Angstrom\n geometry=[\n [\"Li\", [0.0, 0.0, 0.0]],\n [\"H\", [0.0, 0.0, 1.6]],\n ],\n multiplicity=1, # = 2*spin + 1\n charge=0,\n )\n\n from qiskit_nature.drivers.second_quantization import ElectronicStructureMoleculeDriver,\n ElectronicStructureDriverType\n\n driver = ElectronicStructureMoleculeDriver(\n molecule=molecule,\n basis=\"sto3g\",\n driver_type=ElectronicStructureDriverType.PYSCF,\n )\n\n from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem\n from qiskit_nature.transformers.second_quantization.electronic import ActiveSpaceTransformer\n\n transformer = ActiveSpaceTransformer(\n num_electrons=2,\n num_molecular_orbitals=3,\n )\n\n problem_reduced = ElectronicStructureProblem(driver, [transformer])\n second_q_ops_reduced = problem_reduced.second_q_ops()\n hamiltonian_reduced = second_q_ops_reduced[0]\n\"\"\"\n\nimport numpy\nfrom qiskit.quantum_info import PauliList, SparsePauliOp\nfrom qiskit_nature.operators.second_quantization import FermionicOp\n\n\nFERMIONIC_HAMILTONIAN = FermionicOp(\n [\n (\"+-I+-I\", (0.013063981998607477 + 0j)),\n (\"+-I-+I\", (-0.013063981998607472 + 0j)),\n (\"+-IIII\", (0.048579599520367646 + 0j)),\n (\"+-IIIN\", (0.005767502046076787 + 0j)),\n (\"+-IINI\", (0.007484171005646165 + 0j)),\n (\"+-INII\", (-0.04857958891220289 + 0j)),\n (\"+-NIII\", (-0.013509390402447545 + 0j)),\n (\"+I-+I-\", (0.023422673239767655 + 0j)),\n (\"+I--I+\", (-0.023422673239767655 + 0j)),\n (\"+I-I+-\", (0.019276892448524333 + 0j)),\n (\"+I-I-+\", (-0.019276892448524333 + 0j)),\n (\"-+I+-I\", (-0.013063981998607472 + 0j)),\n (\"-+I-+I\", (0.013063981998607473 - 0j)),\n (\"-+IIII\", (-0.04857959952036771 + 0j)),\n (\"-+IIIN\", (-0.005767502046076761 + 0j)),\n (\"-+IINI\", (-0.007484171005646201 + 0j)),\n (\"-+INII\", (0.04857958891220291 + 0j)),\n (\"-+NIII\", (0.013509390402447573 + 0j)),\n (\"-I++I-\", (-0.023422673239767655 + 0j)),\n (\"-I+-I+\", (0.023422673239767655 - 0j)),\n (\"-I+I+-\", (-0.019276892448524333 + 0j)),\n (\"-I+I-+\", (0.019276892448524333 - 0j)),\n (\"I+-+I-\", (0.019276892448524333 + 0j)),\n (\"I+--I+\", (-0.019276892448524333 + 0j)),\n (\"I+-I+-\", (0.041276695997097185 + 0j)),\n (\"I+-I-+\", (-0.04127669599709719 + 0j)),\n (\"I-++I-\", (-0.019276892448524333 + 0j)),\n (\"I-+-I+\", (0.019276892448524333 - 0j)),\n (\"I-+I+-\", (-0.04127669599709719 + 0j)),\n (\"I-+I-+\", (0.041276695997097185 - 0j)),\n (\"III+-I\", (0.048579599520367646 + 0j)),\n (\"III+-N\", (-0.013509390402447545 + 0j)),\n (\"III-+I\", (-0.04857959952036771 + 0j)),\n (\"III-+N\", (0.013509390402447573 + 0j)),\n (\"IIIIIN\", (-0.35297896520254896 + 0j)),\n (\"IIIINI\", (-0.355939542660255 + 0j)),\n (\"IIIINN\", (0.2407146489655783 + 0j)),\n (\"IIINII\", (-0.772581720072654 + 0j)),\n (\"IIININ\", (0.24674881903629914 + 0j)),\n (\"IIINNI\", (0.2105460611420031 + 0j)),\n (\"IIN+-I\", (0.005767502046076787 + 0j)),\n (\"IIN-+I\", (-0.005767502046076761 + 0j)),\n (\"IINIII\", (-0.35297896520254896 + 0j)),\n (\"IINIIN\", (0.3129455111594082 + 0j)),\n (\"IININI\", (0.28199134496267547 + 0j)),\n (\"IINNII\", (0.2701714922760668 + 0j)),\n (\"INI+-I\", (0.007484171005646165 + 0j)),\n (\"INI-+I\", (-0.007484171005646201 + 0j)),\n (\"INIIII\", (-0.355939542660255 + 0j)),\n (\"INIIIN\", (0.28199134496267547 + 0j)),\n (\"INIINI\", (0.3378822722917939 + 0j)),\n (\"ININII\", (0.2236100431406106 + 0j)),\n (\"INNIII\", (0.2407146489655783 + 0j)),\n (\"NII+-I\", (-0.04857958891220289 + 0j)),\n (\"NII-+I\", (0.04857958891220291 - 0j)),\n (\"NIIIII\", (-0.772581720072654 + 0j)),\n (\"NIIIIN\", (0.2701714922760668 + 0j)),\n (\"NIIINI\", (0.2236100431406106 + 0j)),\n (\"NIINII\", (0.48731096863288564 + 0j)),\n (\"NINIII\", (0.24674881903629914 + 0j)),\n (\"NNIIII\", (0.2105460611420031 + 0j)),\n ],\n display_format=\"dense\",\n)\n\n\ndef _qubit_operator():\n pauli_list = PauliList(\n [\n \"IIIIIIIIIII\",\n \"IIIIIIIZZZY\",\n \"IIIIIIIZZZZ\",\n \"IIIIIIXIXII\",\n \"IIIIIXZIZXI\",\n \"IIIIZYIZIYI\",\n \"IIIIZZYZYZI\",\n \"IIIIZZZIIIY\",\n \"IIIIZZZIIIZ\",\n \"IIIIZZZZZZI\",\n \"IIIXIIZXZIZ\",\n \"IIIXXZIZIZZ\",\n \"IIXZIZIXIZZ\",\n \"IIXZXIZZZIZ\",\n \"IIYIYIZIZIZ\",\n \"IIYIZZIYIZZ\",\n \"IIZYYZIIIZZ\",\n \"IIZYZIZYZIZ\",\n \"IIZZIYIIIYI\",\n \"IIZZIZYIYZI\",\n \"IIZZIZZIZZI\",\n \"IIZZZIIZIII\",\n \"IIZZZIXZXII\",\n \"IIZZZXZZZXI\",\n \"IXIIIIIIIXZ\",\n \"IXIIIIIIXZZ\",\n \"IXIIIIXIIZZ\",\n \"IXIIIXZIZIZ\",\n \"IXZZZIIZIXZ\",\n \"IXZZZIIZXZZ\",\n \"IXZZZIXZIZZ\",\n \"IXZZZXZZZIZ\",\n \"IYIIIYIZZII\",\n \"IYIIIZYZIZI\",\n \"IYIIZIIIYII\",\n \"IYIIZIIIZYI\",\n \"IYZZIIIZYII\",\n \"IYZZIIIZZYI\",\n \"IYZZZYIIZII\",\n \"IYZZZZYIIZI\",\n \"IZIIIZZZIIY\",\n \"IZIIIZZZIIZ\",\n \"IZIIZIIIZZY\",\n \"IZIIZIIIZZZ\",\n \"IZIIZIIZIII\",\n \"IZIXXIZIIZI\",\n \"IZIXZIZXIZI\",\n \"IZXZXZIIZII\",\n \"IZXZZZIXZII\",\n \"IZYIIZIYZII\",\n \"IZYIYZIZZII\",\n \"IZZYIIZYIZI\",\n \"IZZYYIZZIZI\",\n \"IZZZIIIIIII\",\n \"IZZZIIIZZZY\",\n \"IZZZIIIZZZZ\",\n \"IZZZZZZIIIY\",\n \"IZZZZZZIIIZ\",\n \"YIIZIIZIIZI\",\n \"YIIZIIZZZIZ\",\n \"YIIZZZIIIZZ\",\n \"YIZIIZIIZII\",\n \"YIZIIZIZIZZ\",\n \"YIZIZIZIZIZ\",\n \"YZIZIZIIZII\",\n \"YZIZZIZZIZI\",\n \"YZZIIIZIIZI\",\n \"YZZIZZIZZII\",\n \"ZIIYIIIYIII\",\n \"ZIIYYIIZIII\",\n \"ZIIZIIYZYIZ\",\n \"ZIIZIIZIIZI\",\n \"ZIIZIIZZZIY\",\n \"ZIIZIIZZZIZ\",\n \"ZIIZIXIZIXZ\",\n \"ZIIZZYZIZYZ\",\n \"ZIIZZZIIIZY\",\n \"ZIIZZZIIIZZ\",\n \"ZIIZZZXIXZZ\",\n \"ZIXIXIIIIII\",\n \"ZIXIZIIXIII\",\n \"ZIYZIIIYIII\",\n \"ZIYZYIIZIII\",\n \"ZIZIIYZZZYZ\",\n \"ZIZIIZIIZII\",\n \"ZIZIIZIZIZY\",\n \"ZIZIIZIZIZZ\",\n \"ZIZIIZXZXZZ\",\n \"ZIZIZIYIYIZ\",\n \"ZIZIZIZIZIY\",\n \"ZIZIZIZIZIZ\",\n \"ZIZIZXIIIXZ\",\n \"ZIZXXIIIIII\",\n \"ZIZXZIIXIII\",\n \"ZXIZIIZZZXI\",\n \"ZXIZIXIZIII\",\n \"ZXIZZZIIXII\",\n \"ZXIZZZXIIII\",\n \"ZXZIIZIZXII\",\n \"ZXZIIZXZIII\",\n \"ZXZIZIZIZXI\",\n \"ZXZIZXIIIII\",\n \"ZYIZIYZIIIZ\",\n \"ZYIZIZIIYZZ\",\n \"ZYIZZIYZIIZ\",\n \"ZYIZZIZZIYZ\",\n \"ZYZIIIYIIIZ\",\n \"ZYZIIIZIIYZ\",\n \"ZYZIZYZZIIZ\",\n \"ZYZIZZIZYZZ\",\n \"ZZIYYZZIIIZ\",\n \"ZZIYZIIYZZZ\",\n \"ZZIZIZIIZII\",\n \"ZZIZZIZZIZI\",\n \"ZZXIIIIXZZZ\",\n \"ZZXIXZZZIIZ\",\n \"ZZYZYZZIIIZ\",\n \"ZZYZZIIYZZZ\",\n \"ZZZIIIZIIZI\",\n \"ZZZIZZIZZII\",\n \"ZZZXIIIXZZZ\",\n \"ZZZXXZZZIIZ\",\n ]\n )\n\n coeffs = numpy.array(\n [\n -0.46007434 + 0.0j,\n 0.01208047 + 0.0j,\n 0.02669401 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n -0.01208047 + 0.0j,\n -0.14571632 + 0.0j,\n 0.05263652 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n 0.05263652 + 0.0j,\n 0.07823638 + 0.0j,\n -0.001633 + 0.0j,\n -0.001633 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00144188 + 0.0j,\n 0.07049784 + 0.0j,\n -0.00144188 + 0.0j,\n 0.06754287 + 0.0j,\n -0.16165347 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n -0.16165347 + 0.0j,\n 0.00337735 + 0.0j,\n 0.0616872 + 0.0j,\n -0.00337735 + 0.0j,\n 0.06017866 + 0.0j,\n -0.01208047 + 0.0j,\n -0.0121449 + 0.0j,\n 0.00187104 + 0.0j,\n 0.01208047 + 0.0j,\n 0.0121449 + 0.0j,\n -0.00187104 + 0.0j,\n -0.00144188 + 0.0j,\n -0.00337735 + 0.0j,\n 0.00144188 + 0.0j,\n 0.00337735 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.001633 + 0.0j,\n 0.02669401 + 0.0j,\n 0.0121449 + 0.0j,\n 0.12182774 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n -0.0121449 + 0.0j,\n 0.05590251 + 0.0j,\n 0.001633 + 0.0j,\n -0.00515959 + 0.0j,\n -0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.001633 + 0.0j,\n -0.14571632 + 0.0j,\n -0.00187104 + 0.0j,\n 0.05590251 + 0.0j,\n 0.001633 + 0.0j,\n 0.001633 + 0.0j,\n 0.00187104 + 0.0j,\n 0.08447057 + 0.0j,\n 0.001633 + 0.0j,\n -0.00240961 + 0.0j,\n -0.00292783 + 0.0j,\n -0.00292783 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n -0.00240961 + 0.0j,\n -0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n -0.00515959 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00292783 + 0.0j,\n 0.07049784 + 0.0j,\n 0.0616872 + 0.0j,\n 0.00240961 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00515959 + 0.0j,\n 0.00240961 + 0.0j,\n 0.06754287 + 0.0j,\n 0.06017866 + 0.0j,\n 0.00292783 + 0.0j,\n 0.00240961 + 0.0j,\n ]\n )\n\n return SparsePauliOp(pauli_list, coeffs=coeffs)\n\n\nQUBIT_HAMILTONIAN = _qubit_operator()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nUnit test of Extrapolators based on a dictionary of point,\nparameter pairs.\n\"\"\"\n\nimport unittest\nfrom test import QiskitNatureTestCase\nfrom sklearn import linear_model\nfrom qiskit_nature.exceptions import QiskitNatureError\nfrom qiskit_nature.algorithms.pes_samplers import (\n Extrapolator,\n WindowExtrapolator,\n PolynomialExtrapolator,\n DifferentialExtrapolator,\n PCAExtrapolator,\n SieveExtrapolator,\n)\n\n\nPARAM_DICT = {\n 0.5: [\n 0.07649726233077458,\n 1.2340960400591198e-07,\n 2.719308771599091e-08,\n 0.008390437810203526,\n 0.0764741614750971,\n 1.9132000956096602e-07,\n 2.9065333930913485e-07,\n 0.00833313170973392,\n -0.05158805745745537,\n 5.8737804595134226e-08,\n ],\n 0.6: [\n 0.047537653925701376,\n -1.917048657008852e-07,\n -3.422658080883276e-08,\n 0.004906396230989982,\n 0.0475356511244039,\n -1.4858989951517077e-07,\n -4.554118927702692e-07,\n 0.004904674017550646,\n -0.026469990208471097,\n -7.247195166355873e-08,\n ],\n 0.7: [\n 0.03613992230685178,\n 4.741673241363808e-07,\n 2.3773620079947958e-07,\n 0.0019807112069739983,\n 0.03613265701497832,\n 6.110370616271814e-07,\n 4.7624119913927746e-07,\n 0.0019793617137878546,\n -0.022690629295970738,\n 1.4536323300165836e-07,\n ],\n 0.8: [\n 0.032353493452967126,\n -5.988966845753558e-07,\n 5.745328822729277e-08,\n 0.00021194523430201692,\n 0.032355414067993846,\n -5.189590826114998e-07,\n -3.67836416341141e-07,\n 0.0002135481005546501,\n -0.021642050861260625,\n -1.7031569870078584e-08,\n ],\n 0.9: [\n 0.029140898456611407,\n -6.173482350529143e-07,\n 6.195627362572485e-08,\n -0.0003490974905228344,\n 0.029145442226752687,\n -6.121480980106215e-07,\n -7.301901035010772e-07,\n -0.0003490856477090481,\n -0.02152841574436387,\n 1.7275126260813324e-07,\n ],\n 1.0: [\n 0.029247914836804657,\n 5.406923000869481e-08,\n -1.2835940341198057e-08,\n -0.00014550115747168215,\n 0.02924483275168305,\n 3.332715515541604e-08,\n -4.214594252866692e-08,\n -0.00014476700526947067,\n -0.022193103836975897,\n 1.8066966314280466e-07,\n ],\n 1.1: [\n 0.03006212028509998,\n 8.719643359505152e-07,\n 2.2976675446724122e-07,\n 0.00047712315923690516,\n 0.03006488639435572,\n 1.1166361133977898e-06,\n 5.061212361236216e-07,\n 0.00047764287100316387,\n -0.023178549796925824,\n -2.928595563199974e-07,\n ],\n 1.2: [\n 0.030974376843233304,\n -1.0856144455895877e-06,\n -3.476503050548108e-07,\n 0.001136538429249089,\n 0.03097485850614847,\n -9.341556711096642e-07,\n -1.1105021085720809e-07,\n 0.0011365812922166018,\n -0.024770225335378506,\n 3.1946997465490094e-07,\n ],\n 1.3: [\n 0.031882221296263585,\n 1.786623717240475e-06,\n 5.966161740895298e-07,\n 0.0019238138369525367,\n 0.03188025548265294,\n 2.001914958908424e-06,\n -7.558698586542756e-08,\n 0.0019267033837603463,\n -0.026633630436000855,\n -4.838673928102748e-07,\n ],\n 1.4: [\n 0.03363319046621523,\n -6.215327218045763e-06,\n -1.707461485292177e-06,\n 0.0022111427295926026,\n 0.03363427344016048,\n -6.479433272163631e-06,\n -8.620279811840461e-07,\n 0.0022079369298442677,\n -0.029254200628083923,\n 2.03258913595112e-06,\n ],\n 1.5: [\n 0.03566191437849682,\n 1.3175681716659443e-05,\n 3.3463916528882136e-06,\n 0.0030670576873521546,\n 0.03565986755932079,\n 1.3808936313520536e-05,\n 2.1227354591337757e-06,\n 0.0030639663487480417,\n -0.03203113690256062,\n -2.988438361808215e-06,\n ],\n 1.6: [\n 0.03853048160610931,\n -4.500510577352305e-05,\n -1.1042391095055013e-05,\n 0.003589496950963951,\n 0.03852649109560952,\n -4.632560074669591e-05,\n -6.9604927841086826e-06,\n 0.003591766338853773,\n -0.03617535567521557,\n 1.03526517642164e-05,\n ],\n 1.7: [\n 0.04166595503111059,\n 0.00012474608362087326,\n 3.0811181106852395e-05,\n 0.004449408009656353,\n 0.04167583498336048,\n 0.0001291807564363206,\n 1.9103762924011895e-05,\n 0.004443558543591776,\n -0.0411176424372442,\n -3.143959686889569e-05,\n ],\n 1.8: [\n 0.04630023768704881,\n -0.0003032527231323504,\n -7.224290210451026e-05,\n 0.004988381942930891,\n 0.04629620402315099,\n -0.0003111138155773558,\n -4.900370932911525e-05,\n 0.004995942389375613,\n -0.047398106863887825,\n 7.734110549927737e-05,\n ],\n 1.9: [\n 0.05237961421167222,\n 0.0006396923182584415,\n 0.00014873747649097767,\n 0.005855974769304974,\n 0.05234227038906301,\n 0.0006540391246003456,\n 0.00010652381338578109,\n 0.005850757199904456,\n -0.055346836396118364,\n -0.00018559571977688104,\n ],\n}\n\n\nclass TestExtrapolators(QiskitNatureTestCase):\n \"\"\"Test Extrapolators.\"\"\"\n\n def test_factory(self):\n \"\"\"\n Test factory method implementation to create instances of various Extrapolators.\n \"\"\"\n self.assertIsInstance(Extrapolator.factory(mode=\"window\"), WindowExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"poly\"), PolynomialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"diff_model\"), DifferentialExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"pca\"), PCAExtrapolator)\n self.assertIsInstance(Extrapolator.factory(mode=\"l1\"), SieveExtrapolator)\n self.assertRaises(QiskitNatureError, Extrapolator.factory, mode=\"unknown\")\n\n def test_polynomial_extrapolator(self):\n \"\"\"\n Test extrapolation using a polynomial extrapolator with degree = 1 using all previous points\n in the parameters for extrapolation. This test confirms that the extrapolation of the\n parameters has a specified error relative to the actual parameter values.\n NOTE: The polynomial fit may give a runtime warning if the data is poorly fitted.\n This depends on degree and dataset and may need be tuned by the user to achieve\n optimal results. This reasoning holds for any instance using an internal\n polynomial extrapolator.\n \"\"\"\n points = 0.7\n params = PolynomialExtrapolator(degree=3).extrapolate(\n points=[points], param_dict=PARAM_DICT\n )\n sq_diff = [\n (actual - expected) ** 2 for actual, expected in zip(params[points], PARAM_DICT[points])\n ]\n self.assertLess(sum(sq_diff), 1e-3)\n\n def test_poly_window_extrapolator(self):\n \"\"\"\n Test extrapolation using an WindowExtrapolator using a data window/lookback of 3 points\n and an internal polynomial extrapolator with degree = 1. This test confirms that no\n extrapolation is performed on points before the data window, i.e, the first two points,\n and that the extrapolation of the parameters on the last three points has a error below\n a threshold when compared to the actual parameter values.\n \"\"\"\n points_interspersed = [0.3, 0.5, 0.7, 0.8, 1.5]\n window_extrapolator = Extrapolator.factory(\n \"window\", extrapolator=PolynomialExtrapolator(degree=1), window=3\n )\n params = window_extrapolator.extrapolate(points=points_interspersed, param_dict=PARAM_DICT)\n self.assertFalse(params.get(0.3))\n self.assertFalse(params.get(0.5))\n sq_diff_1 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.7], PARAM_DICT[0.7])\n ]\n self.assertLess(sum(sq_diff_1), 1e-1)\n sq_diff_2 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.8], PARAM_DICT[0.8])\n ]\n self.assertLess(sum(sq_diff_2), 1e-2)\n sq_diff_3 = [\n (actual - expected) ** 2 for actual, expected in zip(params[1.5], PARAM_DICT[1.5])\n ]\n self.assertLess(sum(sq_diff_3), 1e-2)\n\n def test_differential_model_window_extrapolator(self):\n \"\"\"\n Test extrapolation using an WindowExtrapolator using a data window/lookback of 3 points\n and an internal differential extrapolator with degree = 1 and the default linear regression\n model from scikit-learn. This test confirms that no extrapolation is performed on points\n before the data window, i.e, the first two points, and that the extrapolation of the\n parameters on the last three points has some specified error relative to the actual values.\n \"\"\"\n points_interspersed = [0.3, 0.5, 0.7, 0.8, 1.5]\n window_extrapolator = WindowExtrapolator(\n extrapolator=DifferentialExtrapolator(degree=1), window=3\n )\n params = window_extrapolator.extrapolate(points=points_interspersed, param_dict=PARAM_DICT)\n self.assertFalse(params.get(0.3))\n self.assertFalse(params.get(0.5))\n sq_diff_1 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.7], PARAM_DICT[0.7])\n ]\n self.assertLess(sum(sq_diff_1), 1e-2)\n sq_diff_2 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.8], PARAM_DICT[0.8])\n ]\n self.assertLess(sum(sq_diff_2), 1e-3)\n sq_diff_3 = [\n (actual - expected) ** 2 for actual, expected in zip(params[1.5], PARAM_DICT[1.5])\n ]\n self.assertLess(sum(sq_diff_3), 1e-3)\n\n def test_differential_model_window_alternate_model_extrapolator(self):\n \"\"\"\n Test extrapolation using an WindowExtrapolator using a data window/lookback of 3 points\n and an internal differential extrapolator with degree = 1 and the Ridge regression model\n from scikit-learn. This test confirms that no extrapolation is performed on points before\n the data window, i.e, the first two points, and that the extrapolation of the parameters on\n the last three points has some specified error relative to the actual parameter values.\n \"\"\"\n points_interspersed = [0.3, 0.5, 0.7, 0.8, 1.5]\n model = linear_model.Ridge()\n window_extrapolator = WindowExtrapolator(\n extrapolator=DifferentialExtrapolator(degree=1, model=model), window=3\n )\n params = window_extrapolator.extrapolate(points=points_interspersed, param_dict=PARAM_DICT)\n self.assertFalse(params.get(0.3))\n self.assertFalse(params.get(0.5))\n sq_diff_1 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.7], PARAM_DICT[0.7])\n ]\n self.assertLess(sum(sq_diff_1), 1e-2)\n sq_diff_2 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.8], PARAM_DICT[0.8])\n ]\n self.assertLess(sum(sq_diff_2), 1e-3)\n sq_diff_3 = [\n (actual - expected) ** 2 for actual, expected in zip(params[1.5], PARAM_DICT[1.5])\n ]\n self.assertLess(sum(sq_diff_3), 1e-3)\n\n def test_pca_polynomial_window_extrapolator(self):\n \"\"\"\n Test extrapolation using an PCAExtrapolator using a data window/lookback of 3 points\n and an internal polynomial extrapolator with degree = 1 using regular PCA as default.\n This test confirms that no extrapolation is performed on points before the\n data window, i.e, the first two points, and that the extrapolation of the parameters on\n last three points has a specified error relative to the actual parameter values.\n \"\"\"\n points_interspersed = [0.3, 0.5, 0.7, 0.8, 1.5]\n pca_poly_win_ext = PCAExtrapolator(extrapolator=PolynomialExtrapolator(degree=1), window=3)\n params = pca_poly_win_ext.extrapolate(points=points_interspersed, param_dict=PARAM_DICT)\n self.assertFalse(params.get(0.3))\n self.assertFalse(params.get(0.5))\n sq_diff_1 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.7], PARAM_DICT[0.7])\n ]\n self.assertLess(sum(sq_diff_1), 1e-2)\n sq_diff_2 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.8], PARAM_DICT[0.8])\n ]\n self.assertLess(sum(sq_diff_2), 1e-2)\n sq_diff_3 = [\n (actual - expected) ** 2 for actual, expected in zip(params[1.5], PARAM_DICT[1.5])\n ]\n self.assertLess(sum(sq_diff_3), 1e-2)\n\n def test_sieve_poly_window_extrapolator(self):\n \"\"\"\n Test extrapolation using an Sieve/Clustering Extrapolator using a data window/lookback of\n 3 points and an internal polynomial extrapolator with degree = 1.\n This test confirms that no extrapolation is performed on points before the\n data window, i.e, the first two points, and that the extrapolation of the parameters on the\n last three points has some specified error relative to the actual parameter values.\n \"\"\"\n points_interspersed = [0.3, 0.5, 0.7, 0.8, 1.5]\n sieve_win_extrapolator = SieveExtrapolator(\n extrapolator=PolynomialExtrapolator(degree=1), window=3\n )\n params = sieve_win_extrapolator.extrapolate(\n points=points_interspersed, param_dict=PARAM_DICT\n )\n self.assertFalse(params.get(0.3))\n self.assertFalse(params.get(0.5))\n sq_diff_1 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.7], PARAM_DICT[0.7])\n ]\n self.assertLess(sum(sq_diff_1), 1e-1)\n sq_diff_2 = [\n (actual - expected) ** 2 for actual, expected in zip(params[0.8], PARAM_DICT[0.8])\n ]\n self.assertLess(sum(sq_diff_2), 1e-1)\n sq_diff_3 = [\n (actual - expected) ** 2 for actual, expected in zip(params[1.5], PARAM_DICT[1.5])\n ]\n self.assertLess(sum(sq_diff_3), 1e-1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
],
[
"numpy.transpose"
],
[
"numpy.asarray",
"numpy.round",
"numpy.einsum"
],
[
"numpy.array"
],
[
"sklearn.linear_model.Ridge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
walterddr/nestedtensor | [
"2818db1d83dbd475aa54aee3ab84749a4c13e911"
] | [
"nestedtensor/nested/fuser.py"
] | [
"import torch.fx as fx\nfrom typing import Type, Dict, Any, Tuple, Iterable\nimport torch\nimport copy\nfrom torch.fx import symbolic_trace\nimport time\n\ndef _parent_name(target : str) -> Tuple[str, str]:\n \"\"\"\n Splits a qualname into parent path and last atom.\n For example, `foo.bar.baz` -> (`foo.bar`, `baz`)\n \"\"\"\n *parent, name = target.rsplit('.', 1)\n return parent[0] if parent else '', name\n\n# Works for length 2 patterns with 2 modules\ndef matches_module_pattern(pattern: Iterable[Type], node: fx.Node, modules: Dict[str, Any]):\n if len(node.args) == 0:\n return False\n nodes: Tuple[Any, fx.Node] = (node.args[0], node)\n for expected_type, current_node in zip(pattern, nodes):\n if not isinstance(current_node, fx.Node):\n return False\n if current_node.op != 'call_module':\n return False\n if not isinstance(current_node.target, str):\n return False\n if current_node.target not in modules:\n return False\n if type(modules[current_node.target]) is not expected_type:\n return False\n return True\n\n\ndef replace_node_module(node: fx.Node, modules: Dict[str, Any], new_module: torch.nn.Module):\n assert(isinstance(node.target, str))\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, new_module)\n\ndef computeUpdatedConvWeightAndBias(\n bn_rv,\n bn_eps,\n bn_w,\n bn_b,\n bn_rm,\n conv_w,\n conv_b=None):\n orig_dtype = bn_rv.dtype\n bn_var_rsqrt = (bn_w / torch.sqrt(bn_rv.to(torch.double) + bn_eps))\n new_w = (conv_w * (bn_var_rsqrt).reshape(-1, 1, 1, 1)).to(orig_dtype)\n if conv_b is None:\n return new_w\n new_b = (conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b\n return new_w, new_b\n\ndef fuse_conv_bn_eval(conv, bn):\n assert(not (conv.training or bn.training)), \"Fusion only for eval!\"\n fused_conv = copy.deepcopy(conv)\n fused_conv.bias = None\n\n fused_conv.weight = \\\n torch.nn.Parameter(computeUpdatedConvWeightAndBias(bn.running_var, bn.eps, bn.weight, bn.bias, bn.running_mean, fused_conv.weight))\n\n return fused_conv\n\ndef fuse_conv_bn(model: torch.nn.Module, inplace=False) -> torch.nn.Module:\n \"\"\"\n Fuses convolution/BN layers for inference purposes. Will deepcopy your\n model by default, but can modify the model inplace as well.\n \"\"\"\n patterns = [(torch.nn.Conv2d, torch.nn.BatchNorm2d)]\n if not inplace:\n model = copy.deepcopy(model)\n fx_model = fx.symbolic_trace(model)\n modules = dict(fx_model.named_modules())\n new_graph = copy.deepcopy(fx_model.graph)\n\n for pattern in patterns:\n for node in new_graph.nodes:\n if matches_module_pattern(pattern, node, modules):\n if len(node.args[0].users) > 1: # Output of conv is used by other nodes\n continue\n conv = modules[node.args[0].target]\n bn = modules[node.target]\n fused_conv = fuse_conv_bn_eval(conv, bn)\n replace_node_module(node.args[0], modules, fused_conv)\n node.replace_all_uses_with(node.args[0])\n new_graph.erase_node(node)\n return fx.GraphModule(fx_model, new_graph)\n"
] | [
[
"torch.fx.symbolic_trace",
"torch.fx.GraphModule"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QiXi9409/Simultaneous_ECG_Heartbeat | [
"8984084d570a0e45bf3508a1a23d562ba147ca84"
] | [
"rcn_tool_b.py"
] | [
"from rcn_tool_a import rcn_tool_a\r\nimport torch\r\nfrom config import cfg\r\nclass rcn_tool_b(rcn_tool_a):\r\n def roi_pooling_cuda(self, features, proposal, label=None, stride=cfg.feature_stride, pool=None, batch=False):\r\n if batch == True:\r\n batch_output = []\r\n batch_label = []\r\n if label != None:\r\n batch_label.extend([j for i in label for j in i])\r\n batch_label = torch.stack(batch_label)\r\n outputs = pool(features, proposal)\r\n batch_output = outputs\r\n class_num = [0] * 6\r\n # if label != None:\r\n # for i in batch_label:\r\n # if i != -1:\r\n # class_num[i.item()] += 1\r\n # average = int(sum(class_num) / 6)\r\n # class_num = [average / (i + 1) for i in class_num]\r\n return batch_output, batch_label, class_num\r\n else:\r\n if len(features.size()) == 3:\r\n batch_size, num_channels, data_width = features.size()\r\n batch_output = []\r\n batch_label = []\r\n for index in range(batch_size):\r\n data = features[index]\r\n this_proposal = proposal[index]\r\n # num_proposal = this_proposal.size()[0]\r\n outputs = pool(data, this_proposal)\r\n # if torch.isnan(outputs).sum()>=1:\r\n # print('nan produce')\r\n # if torch.isinf(outputs).sum()>=1:\r\n # print('inf procude')\r\n batch_output.append(outputs)\r\n if label != None:\r\n batch_label.extend([i for i in label[index]])\r\n if label != None:\r\n batch_label = torch.stack(batch_label)\r\n # batch_output = [torch.stack(i) for i in batch_output]\r\n\r\n class_num = [0] * 5\r\n # if label != None:\r\n # for i in batch_label:\r\n # if i != -1:\r\n # class_num[i.item()] += 1\r\n # average = int(sum(class_num) / 5)\r\n # class_num = [average / (i + 1) for i in class_num]\r\n # class_num[0] /= 30\r\n return batch_output, batch_label, class_num\r\n else:\r\n batch_output = []\r\n batch_label = []\r\n # num_channels, data_width = features.size()\r\n data = features\r\n this_proposal = proposal\r\n num_proposal = this_proposal.size()[0]\r\n # width_limit_right = torch.Tensor([data_width - 1] * num_proposal).cuda()\r\n # width_limit_left = torch.zeros(num_proposal).cuda()\r\n # start = torch.floor(this_proposal * (1 / stride))[:,\r\n # 0]\r\n # end = torch.ceil(this_proposal * (1 / stride))[:, 1] #\r\n # wstart = torch.min(width_limit_right, torch.max(width_limit_left, start)).type(\r\n # torch.long)\r\n # wend = torch.min(width_limit_right, torch.max(width_limit_left, end)).type(\r\n # torch.long)\r\n # tmp = self.get_average(data, wstart, wend, stride)\r\n outputs = pool(data, this_proposal)\r\n # outputs = tmp\r\n batch_output.extend([outputs[i, :] for i in range(num_proposal)])\r\n if label != None:\r\n batch_label.extend(label)\r\n batch_output = torch.stack(batch_output, 0)\r\n return batch_output"
] | [
[
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pquochuy/SeqSleepNet | [
"ae0b4bd72aec456d0ef6fe15589ef17cdb9468e3"
] | [
"tensorflow_net/E2E-ARNN/train_arnn_sleep.py"
] | [
"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,-1\"\nimport numpy as np\nimport tensorflow as tf\n\n#from tensorflow.python.client import device_lib\n#print(device_lib.list_local_devices())\n\nimport shutil, sys\nfrom datetime import datetime\nimport h5py\n\nfrom arnn_sleep import ARNN_Sleep\nfrom arnn_sleep_config import Config\n\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import cohen_kappa_score\n\nfrom datagenerator_from_list_v2 import DataGenerator\n\n#from scipy.io import loadmat\n\n\n# Parameters\n# ==================================================\n\n# Misc Parameters\ntf.app.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.app.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n# My Parameters\ntf.app.flags.DEFINE_string(\"eeg_train_data\", \"../train_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"eeg_eval_data\", \"../data/eval_data_1.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"eeg_test_data\", \"../test_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"eog_train_data\", \"../train_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"eog_eval_data\", \"../data/eval_data_1.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"eog_test_data\", \"../test_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"emg_train_data\", \"../train_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"emg_eval_data\", \"../data/eval_data_1.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"emg_test_data\", \"../test_data.mat\", \"Point to directory of input data\")\ntf.app.flags.DEFINE_string(\"out_dir\", \"./output/\", \"Point to output directory\")\ntf.app.flags.DEFINE_string(\"checkpoint_dir\", \"./checkpoint/\", \"Point to checkpoint directory\")\n\ntf.app.flags.DEFINE_float(\"dropout_keep_prob_rnn\", 0.75, \"Dropout keep probability (default: 0.75)\")\n\ntf.app.flags.DEFINE_integer(\"seq_len\", 32, \"Sequence length (default: 32)\")\n\ntf.app.flags.DEFINE_integer(\"nfilter\", 20, \"Sequence length (default: 20)\")\n\ntf.app.flags.DEFINE_integer(\"nhidden1\", 64, \"Sequence length (default: 20)\")\ntf.app.flags.DEFINE_integer(\"attention_size1\", 32, \"Sequence length (default: 20)\")\n\nFLAGS = tf.app.flags.FLAGS\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()): # python3\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\n# Data Preparatopn\n# ==================================================\n\n# path where some output are stored\nout_path = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir))\n# path where checkpoint models are stored\ncheckpoint_path = os.path.abspath(os.path.join(out_path,FLAGS.checkpoint_dir))\nif not os.path.isdir(os.path.abspath(out_path)): os.makedirs(os.path.abspath(out_path))\nif not os.path.isdir(os.path.abspath(checkpoint_path)): os.makedirs(os.path.abspath(checkpoint_path))\n\nconfig = Config()\nconfig.dropout_keep_prob_rnn = FLAGS.dropout_keep_prob_rnn\nconfig.epoch_seq_len = FLAGS.seq_len\nconfig.epoch_step = FLAGS.seq_len\nconfig.nfilter = FLAGS.nfilter\nconfig.nhidden1 = FLAGS.nhidden1\nconfig.attention_size1 = FLAGS.attention_size1\n\neeg_active = ((FLAGS.eeg_train_data != \"\") and (FLAGS.eeg_test_data != \"\"))\neog_active = ((FLAGS.eog_train_data != \"\") and (FLAGS.eog_test_data != \"\"))\nemg_active = ((FLAGS.emg_train_data != \"\") and (FLAGS.emg_test_data != \"\"))\n\nif (eeg_active):\n print(\"eeg active\")\n # Initalize the data generator seperately for the training, validation, and test sets\n eeg_train_gen = DataGenerator(os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n eeg_eval_gen = DataGenerator(os.path.abspath(FLAGS.eeg_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n\n # data normalization here\n X = eeg_train_gen.X\n X = np.reshape(X,(eeg_train_gen.data_size*eeg_train_gen.data_shape[0], eeg_train_gen.data_shape[1]))\n meanX = X.mean(axis=0)\n stdX = X.std(axis=0)\n X = (X - meanX) / stdX\n eeg_train_gen.X = np.reshape(X, (eeg_train_gen.data_size, eeg_train_gen.data_shape[0], eeg_train_gen.data_shape[1]))\n\n X = eeg_eval_gen.X\n X = np.reshape(X,(eeg_eval_gen.data_size*eeg_eval_gen.data_shape[0], eeg_eval_gen.data_shape[1]))\n X = (X - meanX) / stdX\n eeg_eval_gen.X = np.reshape(X, (eeg_eval_gen.data_size, eeg_eval_gen.data_shape[0], eeg_eval_gen.data_shape[1]))\n\n X = eeg_test_gen.X\n X = np.reshape(X,(eeg_test_gen.data_size*eeg_test_gen.data_shape[0], eeg_test_gen.data_shape[1]))\n X = (X - meanX) / stdX\n eeg_test_gen.X = np.reshape(X, (eeg_test_gen.data_size, eeg_test_gen.data_shape[0], eeg_test_gen.data_shape[1]))\n\nif (eog_active):\n print(\"eog active\")\n # Initalize the data generator seperately for the training, validation, and test sets\n eog_train_gen = DataGenerator(os.path.abspath(FLAGS.eog_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n eog_test_gen = DataGenerator(os.path.abspath(FLAGS.eog_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n eog_eval_gen = DataGenerator(os.path.abspath(FLAGS.eog_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n\n # data normalization here\n X = eog_train_gen.X\n X = np.reshape(X,(eog_train_gen.data_size*eog_train_gen.data_shape[0], eog_train_gen.data_shape[1]))\n meanX = X.mean(axis=0)\n stdX = X.std(axis=0)\n X = (X - meanX) / stdX\n eog_train_gen.X = np.reshape(X, (eog_train_gen.data_size, eog_train_gen.data_shape[0], eog_train_gen.data_shape[1]))\n\n X = eog_eval_gen.X\n X = np.reshape(X,(eog_eval_gen.data_size*eog_eval_gen.data_shape[0], eog_eval_gen.data_shape[1]))\n X = (X - meanX) / stdX\n eog_eval_gen.X = np.reshape(X, (eog_eval_gen.data_size, eog_eval_gen.data_shape[0], eog_eval_gen.data_shape[1]))\n\n X = eog_test_gen.X\n X = np.reshape(X,(eog_test_gen.data_size*eog_test_gen.data_shape[0], eog_test_gen.data_shape[1]))\n X = (X - meanX) / stdX\n eog_test_gen.X = np.reshape(X, (eog_test_gen.data_size, eog_test_gen.data_shape[0], eog_test_gen.data_shape[1]))\n\nif (emg_active):\n print(\"emg active\")\n # Initalize the data generator seperately for the training, validation, and test sets\n emg_train_gen = DataGenerator(os.path.abspath(FLAGS.emg_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n emg_test_gen = DataGenerator(os.path.abspath(FLAGS.emg_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n emg_eval_gen = DataGenerator(os.path.abspath(FLAGS.emg_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)\n\n # data normalization here\n X = emg_train_gen.X\n X = np.reshape(X,(emg_train_gen.data_size*emg_train_gen.data_shape[0], emg_train_gen.data_shape[1]))\n meanX = X.mean(axis=0)\n stdX = X.std(axis=0)\n X = (X - meanX) / stdX\n emg_train_gen.X = np.reshape(X, (emg_train_gen.data_size, emg_train_gen.data_shape[0], emg_train_gen.data_shape[1]))\n\n X = emg_eval_gen.X\n X = np.reshape(X,(emg_eval_gen.data_size*emg_eval_gen.data_shape[0], emg_eval_gen.data_shape[1]))\n X = (X - meanX) / stdX\n emg_eval_gen.X = np.reshape(X, (emg_eval_gen.data_size, emg_eval_gen.data_shape[0], emg_eval_gen.data_shape[1]))\n\n X = emg_test_gen.X\n X = np.reshape(X,(emg_test_gen.data_size*emg_test_gen.data_shape[0], emg_test_gen.data_shape[1]))\n X = (X - meanX) / stdX\n emg_test_gen.X = np.reshape(X, (emg_test_gen.data_size, emg_test_gen.data_shape[0], emg_test_gen.data_shape[1]))\n\n# eeg always active\ntrain_generator = eeg_train_gen\ntest_generator = eeg_test_gen\neval_generator = eeg_eval_gen\n\nif (not(eog_active) and not(emg_active)):\n train_generator.X = np.expand_dims(train_generator.X, axis=-1) # expand channel dimension\n train_generator.data_shape = train_generator.X.shape[1:]\n test_generator.X = np.expand_dims(test_generator.X, axis=-1) # expand channel dimension\n test_generator.data_shape = test_generator.X.shape[1:]\n eval_generator.X = np.expand_dims(eval_generator.X, axis=-1) # expand channel dimension\n eval_generator.data_shape = eval_generator.X.shape[1:]\n nchannel = 1\n print(train_generator.X.shape)\n\nif (eog_active and not(emg_active)):\n print(train_generator.X.shape)\n print(eog_train_gen.X.shape)\n train_generator.X = np.stack((train_generator.X, eog_train_gen.X), axis=-1) # merge and make new dimension\n train_generator.data_shape = train_generator.X.shape[1:]\n test_generator.X = np.stack((test_generator.X, eog_test_gen.X), axis=-1) # merge and make new dimension\n test_generator.data_shape = test_generator.X.shape[1:]\n eval_generator.X = np.stack((eval_generator.X, eog_eval_gen.X), axis=-1) # merge and make new dimension\n eval_generator.data_shape = eval_generator.X.shape[1:]\n nchannel = 2\n print(train_generator.X.shape)\n\nif (eog_active and emg_active):\n print(train_generator.X.shape)\n print(eog_train_gen.X.shape)\n print(emg_train_gen.X.shape)\n train_generator.X = np.stack((train_generator.X, eog_train_gen.X, emg_train_gen.X), axis=-1) # merge and make new dimension\n train_generator.data_shape = train_generator.X.shape[1:]\n test_generator.X = np.stack((test_generator.X, eog_test_gen.X, emg_test_gen.X), axis=-1) # merge and make new dimension\n test_generator.data_shape = test_generator.X.shape[1:]\n eval_generator.X = np.stack((eval_generator.X, eog_eval_gen.X, emg_eval_gen.X), axis=-1) # merge and make new dimension\n eval_generator.data_shape = eval_generator.X.shape[1:]\n nchannel = 3\n print(train_generator.X.shape)\n\nconfig.nchannel = nchannel\n\ndel eeg_train_gen\ndel eeg_test_gen\ndel eeg_eval_gen\nif (eog_active):\n del eog_train_gen\n del eog_test_gen\n del eog_eval_gen\nif (emg_active):\n del emg_train_gen\n del emg_test_gen\n del emg_eval_gen\n\n# shuffle training data here\ntrain_generator.shuffle_data()\n\ntrain_batches_per_epoch = np.floor(len(train_generator.data_index) / config.batch_size).astype(np.uint32)\neval_batches_per_epoch = np.floor(len(eval_generator.data_index) / config.batch_size).astype(np.uint32)\ntest_batches_per_epoch = np.floor(len(test_generator.data_index) / config.batch_size).astype(np.uint32)\n\nprint(\"Train/Eval/Test set: {:d}/{:d}/{:d}\".format(train_generator.data_size, eval_generator.data_size, test_generator.data_size))\n\nprint(\"Train/Eval/Test batches per epoch: {:d}/{:d}/{:d}\".format(train_batches_per_epoch, eval_batches_per_epoch, test_batches_per_epoch))\n\n# variable to keep track of best fscore\nbest_fscore = 0.0\nbest_acc = 0.0\nbest_kappa = 0.0\nmin_loss = float(\"inf\")\n# Training\n# ==================================================\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n session_conf.gpu_options.allow_growth = True\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n arnn = ARNN_Sleep(config=config)\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(config.learning_rate)\n grads_and_vars = optimizer.compute_gradients(arnn.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n out_dir = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir))\n print(\"Writing to {}\\n\".format(out_dir))\n\n saver = tf.train.Saver(tf.all_variables(), max_to_keep=1)\n\n # initialize all variables\n print(\"Model initialized\")\n sess.run(tf.initialize_all_variables())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n frame_seq_len = np.ones(len(x_batch),dtype=int) * config.frame_seq_len\n feed_dict = {\n arnn.input_x: x_batch,\n arnn.input_y: y_batch,\n arnn.dropout_keep_prob_rnn: config.dropout_keep_prob_rnn,\n arnn.frame_seq_len: frame_seq_len\n }\n _, step, output_loss, total_loss, accuracy = sess.run(\n [train_op, global_step, arnn.output_loss, arnn.loss, arnn.accuracy],\n feed_dict)\n return step, output_loss, total_loss, accuracy\n\n def dev_step(x_batch, y_batch):\n frame_seq_len = np.ones(len(x_batch),dtype=int) * config.frame_seq_len\n feed_dict = {\n arnn.input_x: x_batch,\n arnn.input_y: y_batch,\n arnn.dropout_keep_prob_rnn: 1.0,\n arnn.frame_seq_len: frame_seq_len\n }\n output_loss, total_loss, yhat = sess.run(\n [arnn.output_loss, arnn.loss, arnn.prediction], feed_dict)\n return output_loss, total_loss, yhat\n\n def evaluate(gen, log_filename):\n # Validate the model on the entire evaluation test set after each epoch\n output_loss =0\n total_loss = 0\n yhat = np.zeros([len(gen.data_index)])\n num_batch_per_epoch = np.floor(len(gen.data_index) / (config.batch_size)).astype(np.uint32)\n test_step = 1\n while test_step < num_batch_per_epoch:\n x_batch, y_batch, label_batch_ = gen.next_batch(config.batch_size)\n output_loss_, total_loss_, yhat_ = dev_step(x_batch, y_batch)\n output_loss += output_loss_\n total_loss += total_loss_\n\n yhat[(test_step-1)*config.batch_size : test_step*config.batch_size] = yhat_\n test_step += 1\n if(gen.pointer < len(gen.data_index)):\n actual_len, x_batch, y_batch, label_batch_ = gen.rest_batch(config.batch_size)\n output_loss_, total_loss_, yhat_ = dev_step(x_batch, y_batch)\n\n yhat[(test_step-1)*config.batch_size : len(gen.data_index)] = yhat_\n output_loss += output_loss_\n total_loss += total_loss_\n yhat = yhat + 1\n acc = accuracy_score(gen.label, yhat)\n with open(os.path.join(out_dir, log_filename), \"a\") as text_file:\n text_file.write(\"{:g} {:g} {:g}\\n\".format(output_loss, total_loss, acc))\n return acc, yhat, output_loss, total_loss\n\n # Loop over number of epochs\n for epoch in range(config.training_epoch):\n print(\"{} Epoch number: {}\".format(datetime.now(), epoch + 1))\n step = 1\n while step < train_batches_per_epoch:\n # Get a batch\n x_batch, y_batch, label_batch = train_generator.next_batch(config.batch_size)\n train_step_, train_output_loss_, train_total_loss_, train_acc_ = train_step(x_batch, y_batch)\n time_str = datetime.now().isoformat()\n\n print(\"{}: step {}, output_loss {}, total_loss {} acc {}\".format(time_str, train_step_, train_output_loss_, train_total_loss_, train_acc_))\n step += 1\n\n current_step = tf.train.global_step(sess, global_step)\n if current_step % config.evaluate_every == 0:\n # Validate the model on the entire evaluation test set after each epoch\n print(\"{} Start validation\".format(datetime.now()))\n eval_acc, eval_yhat, eval_output_loss, eval_total_loss = evaluate(gen=eval_generator, log_filename=\"eval_result_log.txt\")\n test_acc, test_yhat, test_output_loss, test_total_loss = evaluate(gen=test_generator, log_filename=\"test_result_log.txt\")\n\n if(eval_acc >= best_acc):\n best_acc = eval_acc\n checkpoint_name = os.path.join(checkpoint_path, 'model_step' + str(current_step) +'.ckpt')\n save_path = saver.save(sess, checkpoint_name)\n\n print(\"Best model updated\")\n source_file = checkpoint_name\n dest_file = os.path.join(checkpoint_path, 'best_model_acc')\n shutil.copy(source_file + '.data-00000-of-00001', dest_file + '.data-00000-of-00001')\n shutil.copy(source_file + '.index', dest_file + '.index')\n shutil.copy(source_file + '.meta', dest_file + '.meta')\n\n\n test_generator.reset_pointer()\n eval_generator.reset_pointer()\n train_generator.reset_pointer()\n"
] | [
[
"tensorflow.train.global_step",
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.all_variables",
"tensorflow.Variable",
"numpy.reshape",
"tensorflow.app.flags.DEFINE_integer",
"numpy.stack",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.app.flags.DEFINE_boolean",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AdityaSavara/Frhodo | [
"90ad23b8f238bd2b4dd9b94eea757fa658e92499",
"90ad23b8f238bd2b4dd9b94eea757fa658e92499"
] | [
"src/mech_fcns.py",
"Development/Random Testing/cross_correlation_testing/test.py"
] | [
"# This file is part of Frhodo. Copyright © 2020, UChicago Argonne, LLC\n# and licensed under BSD-3-Clause. See License.txt in the top-level \n# directory for license and copyright information.\n\nimport os, io, stat, contextlib, pathlib, time\nimport cantera as ct\nfrom cantera import interrupts, cti2yaml#, ck2yaml, ctml2yaml\nimport numpy as np\nimport integrate, shock_fcns, ck2yaml\nfrom timeit import default_timer as timer\n\n# list of all possible variables\nall_var = {'Laboratory Time': {'SIM_name': 't_lab', 'sub_type': None},\n 'Shockwave Time': {'SIM_name': 't_shock', 'sub_type': None}, \n 'Gas Velocity': {'SIM_name': 'vel', 'sub_type': None}, \n 'Temperature': {'SIM_name': 'T', 'sub_type': None}, \n 'Pressure': {'SIM_name': 'P', 'sub_type': None}, \n 'Enthalpy': {'SIM_name': 'h', 'sub_type': ['total', 'species']},\n 'Entropy': {'SIM_name': 's', 'sub_type': ['total', 'species']}, \n 'Density': {'SIM_name': 'rho', 'sub_type': None}, \n 'Density Gradient': {'SIM_name': 'drhodz', 'sub_type': ['total', 'rxn']},\n '% Density Gradient': {'SIM_name': 'perc_drhodz', 'sub_type': ['rxn']},\n 'Mole Fraction': {'SIM_name': 'X', 'sub_type': ['species']}, \n 'Mass Fraction': {'SIM_name': 'Y', 'sub_type': ['species']}, \n 'Concentration': {'SIM_name': 'conc', 'sub_type': ['species']}, \n 'Net Production Rate': {'SIM_name': 'wdot', 'sub_type': ['species']},\n 'Creation Rate': {'SIM_name': 'wdotfor', 'sub_type': ['species']}, \n 'Destruction Rate': {'SIM_name': 'wdotrev', 'sub_type': ['species']},\n 'Heat Release Rate': {'SIM_name': 'HRR', 'sub_type': ['total', 'rxn']},\n 'Delta Enthalpy (Heat of Reaction)':{'SIM_name': 'delta_h', 'sub_type': ['rxn']},\n 'Delta Entropy': {'SIM_name': 'delta_s', 'sub_type': ['rxn']}, \n 'Equilibrium Constant': {'SIM_name': 'eq_con', 'sub_type': ['rxn']}, \n 'Forward Rate Constant': {'SIM_name': 'rate_con', 'sub_type': ['rxn']}, \n 'Reverse Rate Constant': {'SIM_name': 'rate_con_rev', 'sub_type': ['rxn']}, \n 'Net Rate of Progress': {'SIM_name': 'net_ROP', 'sub_type': ['rxn']}, \n 'Forward Rate of Progress': {'SIM_name': 'for_ROP', 'sub_type': ['rxn']}, \n 'Reverse Rate of Progress': {'SIM_name': 'rev_ROP', 'sub_type': ['rxn']}}\n\nrev_all_var = {all_var[key]['SIM_name']: \n {'name': key, 'sub_type': all_var[key]['sub_type']} for key in all_var.keys()}\n\n# translation dictionary between SIM name and ct.SolutionArray name\nSIM_Dict = {'t_lab': 't', 't_shock': 't_shock', 'z': 'z', 'A': 'A', 'vel': 'vel', 'T': 'T', 'P': 'P', \n 'h_tot': 'enthalpy_mole', 'h': 'partial_molar_enthalpies', \n 's_tot': 'entropy_mole', 's': 'partial_molar_entropies',\n 'rho': 'density', 'drhodz_tot': 'drhodz_tot', 'drhodz': 'drhodz', 'perc_drhodz': 'perc_drhodz',\n 'Y': 'Y', 'X': 'X', 'conc': 'concentrations', 'wdot': 'net_production_rates', \n 'wdotfor': 'creation_rates', 'wdotrev': 'destruction_rates', \n 'HRR_tot': 'heat_release_rate', 'HRR': 'heat_production_rates',\n 'delta_h': 'delta_enthalpy', 'delta_s': 'delta_entropy', \n 'eq_con': 'equilibrium_constants', 'rate_con': 'forward_rate_constants', \n 'rate_con_rev': 'reverse_rate_constants', 'net_ROP': 'net_rates_of_progress',\n 'for_ROP': 'forward_rates_of_progress', 'rev_ROP': 'reverse_rates_of_progress'}\n\nclass SIM_Property:\n def __init__(self, name, parent=None):\n self.name = name\n self.parent = parent\n self.conversion = None # this needs to be assigned per property\n self.value = {'SI': np.array([]), 'CGS': np.array([])}\n self.ndim = self.value['SI'].ndim\n\n def clear(self):\n self.value = {'SI': np.array([]), 'CGS': np.array([])}\n self.ndim = self.value['SI'].ndim\n\n def __call__(self, idx=None, units='CGS'): # units must be 'CGS' or 'SI'\n # assumes Sim data comes in as SI and is converted to CGS\n # values to be calculated post-simulation\n if len(self.value['SI']) == 0 or np.isnan(self.value['SI']).all():\n parent = self.parent\n if self.name == 'drhodz_tot':\n self.value['SI'] = shock_fcns.drhodz(parent.states)\n elif self.name == 'drhodz':\n self.value['SI'] = shock_fcns.drhodz_per_rxn(parent.states)\n elif self.name == 'perc_drhodz':\n self.value['SI'] = parent.drhodz(units='SI').T*100/parent.drhodz_tot(units='SI')[:,None]\n else:\n self.value['SI'] = getattr(parent.states, SIM_Dict[self.name])\n\n if self.value['SI'].ndim > 1: # Transpose if matrix\n self.value['SI'] = self.value['SI'].T\n\n self.ndim = self.value['SI'].ndim\n\n # currently converts entire list of properties rather than by index\n if units == 'CGS' and len(self.value['CGS']) == 0:\n if self.conversion is None:\n self.value['CGS'] = self.value['SI']\n else:\n self.value['CGS'] = self.conversion(self.value['SI'])\n\n return self.value[units]\n\n\nclass Simulation_Result:\n def __init__(self, num=None, states=None, reactor_vars=[]):\n self.states = states\n self.all_var = all_var\n self.rev_all_var = rev_all_var\n self.reactor_var = {}\n for var in reactor_vars:\n if var in self.rev_all_var:\n self.reactor_var[self.rev_all_var[var]['name']] = var\n\n if num is None: # if no simulation stop here\n self.reactor_var = {}\n return\n\n self.conv = {'conc': 1E-3, 'wdot': 1E-3, 'P': 760/101325, 'vel': 1E2, \n 'rho': 1E-3, 'drhodz_tot': 1E-5, 'drhodz': 1E-5, \n 'delta_h': 1E-3/4184, 'h_tot': 1E-3/4184, 'h': 1E-3/4184, # to kcal\n 'delta_s': 1/4184, 's_tot': 1/4184, 's': 1/4184, \n 'eq_con': 1E3**np.array(num['reac'] - num['prod'])[:,None],\n 'rate_con': np.power(1E3,num['reac']-1)[:,None],\n 'rate_con_rev': np.power(1E3,num['prod']-1)[:,None],\n 'net_ROP': 1E-3/3.8, # Don't understand 3.8 value\n 'for_ROP': 1E-3/3.8, # Don't understand 3.8 value\n 'rev_ROP': 1E-3/3.8} # Don't understand 3.8 value\n\n for name in reactor_vars:\n property = SIM_Property(name, parent=self)\n if name in self.conv:\n property.conversion = lambda x, s=self.conv[name]: x*s\n setattr(self, name, property)\n\n def set_independent_var(self, ind_var, units='CGS'):\n self.independent_var = getattr(self, ind_var)(units=units)\n\n def set_observable(self, observable, units='CGS'):\n k = observable['sub']\n if observable['main'] == 'Temperature':\n self.observable = self.T(units=units)\n elif observable['main'] == 'Pressure':\n self.observable = self.P(units=units)\n elif observable['main'] == 'Density Gradient':\n self.observable = self.drhodz_tot(units=units)\n elif observable['main'] == 'Heat Release Rate':\n self.observable = self.HRR_tot(units=units)\n elif observable['main'] == 'Mole Fraction':\n self.observable = self.X(units=units)\n elif observable['main'] == 'Mass Fraction':\n self.observable = self.Y(units=units)\n elif observable['main'] == 'Concentration':\n self.observable = self.conc(units=units)\n\n if self.observable.ndim > 1: # reduce observable down to only plotted information\n self.observable = self.observable[k]\n\n def finalize(self, success, ind_var, observable, units='CGS'): \n self.set_independent_var(ind_var, units)\n self.set_observable(observable, units)\n \n self.success = success\n \n\nclass Chemical_Mechanism:\n def __init__(self):\n self.isLoaded = False\n self.reactor = Reactor(self)\n\n def load_mechanism(self, path, silent=False):\n def loader(self, path):\n # path is assumed to be the path dictionary\n surfaces = []\n if path['mech'].suffix in ['.yaml', '.yml']: # check if it's a yaml cantera file\n mech_path = str(path['mech'])\n else: # if not convert into yaml cantera file\n mech_path = str(path['Cantera_Mech'])\n \n if path['mech'].suffix == '.cti':\n cti2yaml.convert(path['mech'], path['Cantera_Mech'])\n elif path['mech'].suffix in ['.ctml', '.xml']:\n raise Exception('not implemented')\n #ctml2yaml.convert(path['mech'], path['Cantera_Mech'])\n else: # if not a cantera file, assume chemkin\n surfaces = self.chemkin2cantera(path)\n \n print('Validating mechanism...', end='') \n try: # This test taken from ck2cti\n self.yaml_txt = path['Cantera_Mech'].read_text() # Storing full text could be bad if large\n self.gas = ct.Solution(yaml=self.yaml_txt)\n for surfname in surfaces:\n phase = ct.Interface(outName, surfname, [self.gas])\n print('PASSED.')\n except RuntimeError as e:\n print('FAILED.')\n print(e)\n \n output = {'success': False, 'message': []}\n # Intialize and report any problems to log, not to console window\n stdout = io.StringIO()\n stderr = io.StringIO()\n with contextlib.redirect_stderr(stderr):\n with contextlib.redirect_stdout(stdout):\n try:\n loader(self, path)\n output['success'] = True\n except Exception as e:\n output['message'].append('Error in loading mech\\n{:s}'.format(str(e)))\n except:\n pass\n # output['message'].append('Error when loading mech:\\n')\n \n ct_out = stdout.getvalue()\n ct_err = stderr.getvalue().replace('INFO:root:', 'Warning: ')\n \n if 'FAILED' in ct_out:\n output['success'] = False\n self.isLoaded = False\n elif 'PASSED' in ct_out:\n output['success'] = True\n self.isLoaded = True\n \n for log_str in [ct_out, ct_err]:\n if log_str != '' and not silent:\n if (path['Cantera_Mech'], pathlib.WindowsPath): # reformat string to remove \\\\ making it unable to be copy paste\n cantera_path = str(path['Cantera_Mech']).replace('\\\\', '\\\\\\\\')\n log_str = log_str.replace(cantera_path, str(path['Cantera_Mech']))\n output['message'].append(log_str)\n output['message'].append('\\n')\n \n if self.isLoaded:\n self.set_rate_expression_coeffs() # set copy of coeffs\n self.set_thermo_expression_coeffs() # set copy of thermo coeffs\n \n return output\n \n def chemkin2cantera(self, path):\n if path['thermo'] is not None:\n surfaces = ck2yaml.convert_mech(path['mech'], thermo_file=path['thermo'], transport_file=None, surface_file=None,\n phase_name='gas', out_name=path['Cantera_Mech'], quiet=False, permissive=True)\n else:\n surfaces = ck2yaml.convert_mech(path['mech'], thermo_file=None, transport_file=None, surface_file=None,\n phase_name='gas', out_name=path['Cantera_Mech'], quiet=False, permissive=True)\n \n return surfaces\n \n def set_mechanism(self, mech_txt):\n self.gas = ct.Solution(yaml=mech_txt)\n \n self.set_rate_expression_coeffs() # set copy of coeffs\n self.set_thermo_expression_coeffs() # set copy of thermo coeffs\n \n def gas(self): return self.gas \n \n def set_rate_expression_coeffs(self):\n coeffs = []\n coeffs_bnds = []\n rate_bnds = []\n for rxnNum, rxn in enumerate(self.gas.reactions()):\n if hasattr(rxn, 'rate'):\n attrs = [p for p in dir(rxn.rate) if not p.startswith('_')] # attributes not including __ \n coeffs.append({attr: getattr(rxn.rate, attr) for attr in attrs})\n coeffs_bnds.append({attr: {'resetVal': getattr(rxn.rate, attr), \n 'value': np.nan, 'type': 'F'} for attr in attrs})\n for coef_name in coeffs_bnds[-1].keys():\n coeffs_bnds[-1][coef_name]['limits'] = Uncertainty('coef', rxnNum, \n coef_name=coef_name, coeffs_bnds=coeffs_bnds)\n \n rate_bnds.append({'value': np.nan, 'limits': None, 'type': 'F', 'opt': False})\n rate_bnds[-1]['limits'] = Uncertainty('rate', rxnNum, rate_bnds=rate_bnds)\n else:\n coeffs.append({})\n coeffs_bnds.append({})\n rate_bnds.append({})\n\n self.coeffs = coeffs\n self.coeffs_bnds = coeffs_bnds\n self.rate_bnds = rate_bnds\n \n def set_thermo_expression_coeffs(self): # TODO Doesn't work with NASA 9\n self.thermo_coeffs = []\n for i in range(self.gas.n_species):\n S = self.gas.species(i)\n thermo_dict = {'name': S.name}\n thermo_dict['h_scaler'] = 1\n thermo_dict['s_scaler'] = 1\n try:\n thermo_dict['type'] = type(S.thermo)\n thermo_dict['coeffs'] = np.array(S.thermo.coeffs)\n except:\n thermo_dict['type'] = 'unknown'\n thermo_dict['coeffs'] = []\n \n self.thermo_coeffs.append(thermo_dict) \n \n def modify_reactions(self, coeffs, rxnNums=[]): # Only works for Arrhenius equations currently\n if not rxnNums: # if rxnNums does not exist, modify all\n rxnNums = range(len(coeffs))\n else:\n if isinstance(rxnNums, (float, int)): # if single reaction given, run that one\n rxnNums = [rxnNums]\n \n for rxnNum in rxnNums:\n rxn = self.gas.reaction(rxnNum)\n rxnChanged = False\n if type(rxn) is ct.ElementaryReaction or type(rxn) is ct.ThreeBodyReaction:\n for coefName in ['activation_energy', 'pre_exponential_factor', 'temperature_exponent']:\n if coeffs[rxnNum][coefName] != eval(f'rxn.rate.{coefName}'):\n rxnChanged = True\n \n if rxnChanged: # Update reaction rate\n A = coeffs[rxnNum]['pre_exponential_factor']\n b = coeffs[rxnNum]['temperature_exponent']\n Ea = coeffs[rxnNum]['activation_energy']\n rxn.rate = ct.Arrhenius(A, b, Ea)\n # elif type(rxn) is ct.PlogReaction:\n # print(dir(rxn))\n # print(rxn.rates[rxn_num])\n # elif type(rxn) is ct.ChebyshevReaction: \n # print(dir(rxn))\n # print(rxn.rates[rxn_num])\n else:\n continue\n \n if rxnChanged:\n self.gas.modify_reaction(rxnNum, rxn)\n\n time.sleep(5E-3) # Not sure if this is necessary, but it reduces strange behavior in incident shock reactor\n \n def modify_thermo(self, multipliers): # Only works for NasaPoly2 (NASA 7) currently\n for i in range(np.shape(self.gas.species_names)[0]):\n S_initial = self.gas.species(i)\n S = self.gas.species(i)\n if type(S.thermo) is ct.NasaPoly2:\n # Get current values \n T_low = S_initial.thermo.min_temp\n T_high = S_initial.thermo.max_temp\n P_ref = S_initial.thermo.reference_pressure\n coeffs = S_initial.thermo.coeffs\n \n # Update thermo properties\n coeffs[1:] *= multipliers[i]\n S.thermo = ct.NasaPoly2(T_low, T_high, P_ref, coeffs)\n # elif type(S.thermo) is ct.ShomatePoly2: continue\n # elif type(S.thermo) is ct.NasaPoly1: continue\n # elif type(S.thermo) is ct.Nasa9PolyMultiTempRegion: continue\n # elif type(S.thermo) is ct.Nasa9Poly1: continue\n # elif type(S.thermo) is ct.ShomatePoly: continue\n else:\n print(\"{:.s}'s thermo is type: {:s}\".format(self.gas.species_names[i], type(S.thermo)))\n continue\n\n self.gas.modify_species(i, S)\n \n def set_TPX(self, T, P, X=[]):\n output = {'success': False, 'message': []}\n if T <= 0 or np.isnan(T):\n output['message'].append('Error: Temperature is invalid')\n return output\n if P <= 0 or np.isnan(P):\n output['message'].append('Error: Pressure is invalid')\n return output\n if len(X) > 0:\n for species in X:\n if species not in self.gas.species_names:\n output['message'].append('Species: {:s} is not in the mechanism'.format(species))\n return output\n \n self.gas.TPX = T, P, X\n else:\n self.gas.TP = T, P\n \n output['success'] = True\n return output\n\n def run(self, reactor_choice, t_end, T_reac, P_reac, mix, **kwargs):\n return self.reactor.run(reactor_choice, t_end, T_reac, P_reac, mix, **kwargs)\n\n\nclass Uncertainty: # alternate name: why I hate pickle part 10\n def __init__(self, unc_type, rxnNum, **kwargs):\n # self.gas = gas\n self.unc_type = unc_type\n self.rxnNum = rxnNum\n self.unc_dict = kwargs\n \n def unc_fcn(self, x, uncVal, uncType): # uncertainty function\n if np.isnan(uncVal):\n return [np.nan, np.nan]\n elif uncType == 'F':\n return np.sort([x/uncVal, x*uncVal])\n elif uncType == '%':\n return np.sort([x/(1+uncVal), x*(1+uncVal)])\n elif uncType == '±':\n return np.sort([x-uncVal, x+uncVal])\n elif uncType == '+':\n return np.sort([x, x+uncVal])\n elif uncType == '-':\n return np.sort([x-uncVal, x])\n\n def __call__(self, x=None):\n if self.unc_type == 'rate':\n #if x is None: # defaults to giving current rate bounds\n # x = self.gas.forward_rate_constants[self.rxnNum]\n rate_bnds = self.unc_dict['rate_bnds']\n unc_value = rate_bnds[self.rxnNum]['value']\n unc_type = rate_bnds[self.rxnNum]['type']\n return self.unc_fcn(x, unc_value, unc_type)\n else:\n coeffs_bnds = self.unc_dict['coeffs_bnds']\n coefName = self.unc_dict['coef_name']\n coef_dict = coeffs_bnds[self.rxnNum][coefName]\n coef_val = coef_dict['resetVal']\n unc_value = coef_dict['value']\n unc_type = coef_dict['type']\n return self.unc_fcn(coef_val, unc_value, unc_type)\n \n\nclass Reactor:\n def __init__(self, mech):\n self.mech = mech\n self.ODE_success = False\n\n def run(self, reactor_choice, t_end, T_reac, P_reac, mix, **kwargs):\n def list2ct_mixture(mix): # list in the form of [[species, mol_frac], [species, mol_frac],...]\n return ', '.join(\"{!s}:{!r}\".format(species, mol_frac) for (species, mol_frac) in mix)\n \n details = {'success': False, 'message': []}\n \n if isinstance(mix, list):\n mix = list2ct_mixture(mix)\n \n mech_out = self.mech.set_TPX(T_reac, P_reac, mix)\n if not mech_out['success']:\n details['success'] = False\n details['message'] = mech_out['message']\n return None, mech_out\n \n #start = timer()\n if reactor_choice == 'Incident Shock Reactor':\n SIM, details = self.incident_shock_reactor(self.mech.gas, details, t_end, **kwargs)\n elif '0d Reactor' in reactor_choice:\n if reactor_choice == '0d Reactor - Constant Volume':\n reactor = ct.IdealGasReactor(self.mech.gas)\n elif reactor_choice == '0d Reactor - Constant Pressure':\n reactor = ct.IdealGasConstPressureReactor(self.mech.gas)\n \n SIM, details = self.zero_d_ideal_gas_reactor(self.mech.gas, reactor, details, t_end, **kwargs)\n \n #print('{:0.1f} us'.format((timer() - start)*1E3))\n return SIM, details\n \n def checkRxnRates(self, gas):\n limit = [1E9, 1E15, 1E21] # reaction limit [first order, second order, third order]\n checkRxn = []\n for rxnIdx in range(gas.n_reactions):\n coef_sum = int(sum(gas.reaction(rxnIdx).reactants.values()))\n if type(gas.reactions()[rxnIdx]) is ct.ThreeBodyReaction:\n coef_sum += 1\n if coef_sum > 0 and coef_sum-1 <= len(limit): # check that the limit is specified\n rate = [gas.forward_rate_constants[rxnIdx], gas.reverse_rate_constants[rxnIdx]]\n if (np.array(rate) > limit[coef_sum-1]).any(): # if forward or reverse rate exceeds limit\n checkRxn.append(rxnIdx+1)\n \n return checkRxn\n\n def incident_shock_reactor(self, gas, details, t_end, **kwargs):\n if 'u_reac' not in kwargs or 'rho1' not in kwargs:\n details['success'] = False\n details['message'] = 'velocity and rho1 not specified\\n'\n return None, details\n \n # set default values\n var = {'sim_int_f': 1, 'observable': {'main': 'Density Gradient', 'sub': 0},\n 'A1': 0.2, 'As': 0.2, 'L': 0.1, 't_lab_save': None, \n 'ODE_solver': 'BDF', 'rtol': 1E-4, 'atol': 1E-7} \n var.update(kwargs)\n \n y0 = np.hstack((0.0, var['A1'], gas.density, var['u_reac'], gas.T, 0.0, gas.Y)) # Initial condition\n ode = shock_fcns.ReactorOde(gas, t_end, var['rho1'], var['L'], var['As'], var['A1'], False)\n\n sol = integrate.solve_ivp(ode, [0, t_end], y0, method=var['ODE_solver'],\n dense_output=True, rtol=var['rtol'], atol=var['atol'])\n \n if sol.success:\n self.ODE_success = True # this is passed to SIM to inform saving output function\n details['success'] = True\n else:\n self.ODE_success = False # this is passed to SIM to inform saving output function\n details['success'] = False\n \n # Generate log output\n explanation = '\\nCheck for: Fast rates or bad thermo data'\n checkRxns = self.checkRxnRates(gas)\n if len(checkRxns) > 0:\n explanation += '\\nSuggested Reactions: ' + ', '.join([str(x) for x in checkRxns])\n details['message'] = '\\nODE Error: {:s}\\n{:s}\\n'.format(sol.message, explanation)\n \n if var['sim_int_f'] > np.shape(sol.t)[0]: # in case of integration failure\n var['sim_int_f'] = np.shape(sol.t)[0]\n \n if var['sim_int_f'] == 1:\n t_sim = sol.t\n else: # perform interpolation if integrator sample factor > 1\n j = 0\n t_sim = np.zeros(var['sim_int_f']*(np.shape(sol.t)[0] - 1) + 1) # preallocate array\n for i in range(np.shape(sol.t)[0]-1):\n t_interp = np.interp(np.linspace(i, i+1, var['sim_int_f']+1), [i, i+1], sol.t[i:i+2])\n t_sim[j:j+len(t_interp)] = t_interp\n j += len(t_interp) - 1\n \n ind_var = 't_lab' # INDEPENDENT VARIABLE CURRENTLY HARDCODED FOR t_lab\n if var['t_lab_save'] is None: # if t_save is not being sent, only plotting variables are needed\n t_all = t_sim\n else:\n t_all = np.sort(np.unique(np.concatenate((t_sim, var['t_lab_save'])))) # combine t_all and t_save, sort, only unique values\n \n states = ct.SolutionArray(gas, extra=['t', 't_shock', 'z', 'A', 'vel', 'drhodz_tot', 'drhodz', 'perc_drhodz'])\n for i, t in enumerate(t_all): # calculate from solution\n y = sol.sol(t) \n z, A, rho, v, T, t_shock = y[0:6]\n Y = y[6:]\n\n states.append(TDY=(T, rho, Y), t=t, t_shock=t_shock, z=z, A=A, vel=v, drhodz_tot=np.nan, drhodz=np.nan, perc_drhodz=np.nan)\n \n reactor_vars = ['t_lab', 't_shock', 'z', 'A', 'vel', 'T', 'P', 'h_tot', 'h', \n 's_tot', 's', 'rho', 'drhodz_tot', 'drhodz', 'perc_drhodz',\n 'Y', 'X', 'conc', 'wdot', 'wdotfor', 'wdotrev', \n 'HRR_tot', 'HRR', 'delta_h', 'delta_s', \n 'eq_con', 'rate_con', 'rate_con_rev', 'net_ROP', 'for_ROP', 'rev_ROP']\n\n num = {'reac': np.sum(gas.reactant_stoich_coeffs(), axis=0),\n 'prod': np.sum(gas.product_stoich_coeffs(), axis=0),\n 'rxns': gas.n_reactions}\n \n SIM = Simulation_Result(num, states, reactor_vars)\n SIM.finalize(self.ODE_success, ind_var, var['observable'], units='CGS')\n \n return SIM, details\n \n def zero_d_ideal_gas_reactor(self, gas, reactor, details, t_end, **kwargs):\n # set default values\n var = {'sim_int_f': 1, 'observable': {'main': 'Concentration', 'sub': 0},\n 't_lab_save': None, 'rtol': 1E-4, 'atol': 1E-7}\n \n var.update(kwargs)\n \n # Modify reactor if necessary for frozen composition and isothermal\n reactor.energy_enabled = var['solve_energy']\n reactor.chemistry_enabled = not var['frozen_comp']\n \n # Create Sim\n sim = ct.ReactorNet([reactor])\n sim.atol = var['atol']\n sim.rtol = var['rtol']\n \n # set up times and observables\n ind_var = 't_lab' # INDEPENDENT VARIABLE CURRENTLY HARDCODED FOR t_lab\n if var['t_lab_save'] is None:\n t_all = [t_end]\n else:\n t_all = np.sort(np.unique(np.concatenate(([t_end], var['t_lab_save'])))) # combine t_end and t_save, sort, only unique values\n \n states = ct.SolutionArray(gas, extra=['t'])\n states.append(reactor.thermo.state, t = 0.0)\n for t in t_all:\n while sim.time < t: # integrator step until time > target time\n sim.step()\n if sim.time > t: # force interpolation to target time\n sim.advance(t)\n states.append(reactor.thermo.state, t=sim.time)\n \n self.ODE_success = True # TODO: NEED REAL ERROR CHECKING OF REACTOR SUCCESS\n details['success'] = True\n \n reactor_vars = ['t_lab', 'T', 'P', 'h_tot', 'h', 's_tot', 's', 'rho', \n 'Y', 'X', 'conc', 'wdot', 'wdotfor', 'wdotrev', 'HRR_tot', 'HRR',\n 'delta_h', 'delta_s', 'eq_con', 'rate_con', 'rate_con_rev', \n 'net_ROP', 'for_ROP', 'rev_ROP']\n\n num = {'reac': np.sum(gas.reactant_stoich_coeffs(), axis=0),\n 'prod': np.sum(gas.product_stoich_coeffs(), axis=0),\n 'rxns': gas.n_reactions}\n \n SIM = Simulation_Result(num, states, reactor_vars)\n SIM.finalize(self.ODE_success, ind_var, var['observable'], units='CGS')\n\n return SIM, details",
"import matplotlib.pyplot as plt\nfrom scipy import signal\nimport numpy as np\n\ndef lag_finder(x1, y1, x2, y2):\n n = len(y1)\n if len(y1) > len(y2):\n i_match = np.argwhere(np.in1d(x1, x2)).flatten()\n print(i_match)\n y2 = np.append(np.zeros([1, i_match[0]]), y2)\n y2 = np.append(y2, np.zeros([1, len(x1)-i_match[-1]-1]))\n \n corr = signal.correlate(y1, y2, mode='full')\n # corr = np.convolve(y1, y2[::-1], mode='full')\n \n dt = np.linspace(-x1[-1], x1[-1], 2*x1.size-1)\n # delay = dt[corr.argmax()]\n plt.figure()\n plt.plot(dt, corr)\n plt.show()\n \n print(x2[0], x2[-1])\n print(x1[0], x1[-1])\n delay = np.mean(np.diff(x1))*corr.argmax() - x1[-1]\n \n return delay\n\ntime_shift = 2\nx1 = np.arange(0, 2*np.pi, np.pi/2**9)\nx2 = x1[int(len(x1)/4):int(len(x1)*1/2)]\nx2 = x1\ny1 = np.sin(x1)\ny2 = np.sin(x2-time_shift)\n# y1 *= np.random.normal(0.95, 1.05, y1.shape)\n# y1 += np.random.normal(0, 0.025, y1.shape)\n\ndelay = lag_finder(x1, y1, x2, y2)\nprint(delay)\n\nplt.figure()\nplt.plot(x1, y1)\nplt.plot(x2, y2)\nplt.plot(x2+delay, y2)\nplt.show()"
] | [
[
"numpy.hstack",
"numpy.linspace",
"numpy.power",
"numpy.isnan",
"numpy.sort",
"numpy.concatenate",
"numpy.shape",
"numpy.array"
],
[
"scipy.signal.correlate",
"numpy.linspace",
"numpy.arange",
"numpy.in1d",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.diff",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Jos33y/student-performance-knn | [
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4",
"4e965434f52dd6a1380904aa257df1edfaebb3c4"
] | [
"venv/Lib/site-packages/sklearn/tree/tests/test_reingold_tilford.py",
"venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_loss.py",
"venv/Lib/site-packages/pandas/tests/indexing/multiindex/conftest.py",
"venv/Lib/site-packages/pandas/core/groupby/grouper.py",
"venv/Lib/site-packages/pandas/tests/series/test_duplicates.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_append.py",
"venv/Lib/site-packages/sklearn/tests/test_common.py",
"venv/Lib/site-packages/pandas/core/resample.py",
"venv/Lib/site-packages/pandas/core/computation/pytables.py",
"venv/Lib/site-packages/pandas/io/date_converters.py",
"venv/Lib/site-packages/pandas/tests/tslibs/test_timedeltas.py",
"venv/Lib/site-packages/sklearn/metrics/_plot/roc_curve.py",
"venv/Lib/site-packages/pandas/core/indexes/numeric.py",
"venv/Lib/site-packages/pandas/core/arrays/interval.py",
"venv/Lib/site-packages/pandas/tests/window/test_rolling.py",
"venv/Lib/site-packages/pandas/core/computation/expressions.py",
"venv/Lib/site-packages/pandas/tests/resample/test_datetime_index.py",
"venv/Lib/site-packages/sklearn/neural_network/_base.py",
"venv/Lib/site-packages/sklearn/impute/_knn.py"
] | [
"import numpy as np\r\nimport pytest\r\nfrom sklearn.tree._reingold_tilford import buchheim, Tree\r\n\r\nsimple_tree = Tree(\"\", 0,\r\n Tree(\"\", 1),\r\n Tree(\"\", 2))\r\n\r\nbigger_tree = Tree(\"\", 0,\r\n Tree(\"\", 1,\r\n Tree(\"\", 3),\r\n Tree(\"\", 4,\r\n Tree(\"\", 7),\r\n Tree(\"\", 8)\r\n ),\r\n ),\r\n Tree(\"\", 2,\r\n Tree(\"\", 5),\r\n Tree(\"\", 6)\r\n )\r\n )\r\n\r\n\r\[email protected](\"tree, n_nodes\", [(simple_tree, 3), (bigger_tree, 9)])\r\ndef test_buchheim(tree, n_nodes):\r\n def walk_tree(draw_tree):\r\n res = [(draw_tree.x, draw_tree.y)]\r\n for child in draw_tree.children:\r\n # parents higher than children:\r\n assert child.y == draw_tree.y + 1\r\n res.extend(walk_tree(child))\r\n if len(draw_tree.children):\r\n # these trees are always binary\r\n # parents are centered above children\r\n assert draw_tree.x == (draw_tree.children[0].x\r\n + draw_tree.children[1].x) / 2\r\n return res\r\n\r\n layout = buchheim(tree)\r\n coordinates = walk_tree(layout)\r\n assert len(coordinates) == n_nodes\r\n # test that x values are unique per depth / level\r\n # we could also do it quicker using defaultdicts..\r\n depth = 0\r\n while True:\r\n x_at_this_depth = [coordinates[0] for node in coordinates\r\n if coordinates[1] == depth]\r\n if not x_at_this_depth:\r\n # reached all leafs\r\n break\r\n assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth)\r\n depth += 1\r\n",
"import numpy as np\r\nfrom numpy.testing import assert_almost_equal\r\nfrom numpy.testing import assert_allclose\r\nfrom scipy.optimize import newton\r\nfrom sklearn.utils import assert_all_finite\r\nfrom sklearn.utils.fixes import sp_version\r\nimport pytest\r\n\r\nfrom sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES\r\nfrom sklearn.ensemble._hist_gradient_boosting.common import Y_DTYPE\r\nfrom sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE\r\n\r\n\r\ndef get_derivatives_helper(loss):\r\n \"\"\"Return get_gradients() and get_hessians() functions for a given loss.\r\n \"\"\"\r\n\r\n def get_gradients(y_true, raw_predictions):\r\n # create gradients and hessians array, update inplace, and return\r\n gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)\r\n hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)\r\n loss.update_gradients_and_hessians(gradients, hessians, y_true,\r\n raw_predictions)\r\n return gradients\r\n\r\n def get_hessians(y_true, raw_predictions):\r\n # create gradients and hessians array, update inplace, and return\r\n gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)\r\n hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)\r\n loss.update_gradients_and_hessians(gradients, hessians, y_true,\r\n raw_predictions)\r\n\r\n if loss.__class__.__name__ == 'LeastSquares':\r\n # hessians aren't updated because they're constant:\r\n # the value is 1 (and not 2) because the loss is actually an half\r\n # least squares loss.\r\n hessians = np.full_like(raw_predictions, fill_value=1)\r\n elif loss.__class__.__name__ == 'LeastAbsoluteDeviation':\r\n # hessians aren't updated because they're constant\r\n hessians = np.full_like(raw_predictions, fill_value=0)\r\n\r\n return hessians\r\n\r\n return get_gradients, get_hessians\r\n\r\n\r\[email protected]('loss, x0, y_true', [\r\n ('least_squares', -2., 42),\r\n ('least_squares', 117., 1.05),\r\n ('least_squares', 0., 0.),\r\n # I don't understand why but y_true == 0 fails :/\r\n # ('binary_crossentropy', 0.3, 0),\r\n ('binary_crossentropy', -12, 1),\r\n ('binary_crossentropy', 30, 1),\r\n])\r\[email protected](sp_version == (1, 2, 0),\r\n reason='bug in scipy 1.2.0, see scipy issue #9608')\r\[email protected](Y_DTYPE != np.float64,\r\n reason='Newton internally uses float64 != Y_DTYPE')\r\ndef test_derivatives(loss, x0, y_true):\r\n # Check that gradients are zero when the loss is minimized on 1D array\r\n # using Halley's method with the first and second order derivatives\r\n # computed by the Loss instance.\r\n\r\n loss = _LOSSES[loss]()\r\n y_true = np.array([y_true], dtype=Y_DTYPE)\r\n x0 = np.array([x0], dtype=Y_DTYPE).reshape(1, 1)\r\n get_gradients, get_hessians = get_derivatives_helper(loss)\r\n\r\n def func(x):\r\n return loss(y_true, x)\r\n\r\n def fprime(x):\r\n return get_gradients(y_true, x)\r\n\r\n def fprime2(x):\r\n return get_hessians(y_true, x)\r\n\r\n optimum = newton(func, x0=x0, fprime=fprime, fprime2=fprime2)\r\n assert np.allclose(loss.inverse_link_function(optimum), y_true)\r\n assert np.allclose(loss(y_true, optimum), 0)\r\n assert np.allclose(get_gradients(y_true, optimum), 0)\r\n\r\n\r\[email protected]('loss, n_classes, prediction_dim', [\r\n ('least_squares', 0, 1),\r\n ('least_absolute_deviation', 0, 1),\r\n ('binary_crossentropy', 2, 1),\r\n ('categorical_crossentropy', 3, 3),\r\n])\r\[email protected](Y_DTYPE != np.float64,\r\n reason='Need 64 bits float precision for numerical checks')\r\ndef test_numerical_gradients(loss, n_classes, prediction_dim, seed=0):\r\n # Make sure gradients and hessians computed in the loss are correct, by\r\n # comparing with their approximations computed with finite central\r\n # differences.\r\n # See https://en.wikipedia.org/wiki/Finite_difference.\r\n\r\n rng = np.random.RandomState(seed)\r\n n_samples = 100\r\n if loss in ('least_squares', 'least_absolute_deviation'):\r\n y_true = rng.normal(size=n_samples).astype(Y_DTYPE)\r\n else:\r\n y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)\r\n raw_predictions = rng.normal(\r\n size=(prediction_dim, n_samples)\r\n ).astype(Y_DTYPE)\r\n loss = _LOSSES[loss]()\r\n get_gradients, get_hessians = get_derivatives_helper(loss)\r\n\r\n # only take gradients and hessians of first tree / class.\r\n gradients = get_gradients(y_true, raw_predictions)[0, :].ravel()\r\n hessians = get_hessians(y_true, raw_predictions)[0, :].ravel()\r\n\r\n # Approximate gradients\r\n # For multiclass loss, we should only change the predictions of one tree\r\n # (here the first), hence the use of offset[:, 0] += eps\r\n # As a softmax is computed, offsetting the whole array by a constant would\r\n # have no effect on the probabilities, and thus on the loss\r\n eps = 1e-9\r\n offset = np.zeros_like(raw_predictions)\r\n offset[0, :] = eps\r\n f_plus_eps = loss(y_true, raw_predictions + offset / 2, average=False)\r\n f_minus_eps = loss(y_true, raw_predictions - offset / 2, average=False)\r\n numerical_gradients = (f_plus_eps - f_minus_eps) / eps\r\n\r\n # Approximate hessians\r\n eps = 1e-4 # need big enough eps as we divide by its square\r\n offset[0, :] = eps\r\n f_plus_eps = loss(y_true, raw_predictions + offset, average=False)\r\n f_minus_eps = loss(y_true, raw_predictions - offset, average=False)\r\n f = loss(y_true, raw_predictions, average=False)\r\n numerical_hessians = (f_plus_eps + f_minus_eps - 2 * f) / eps**2\r\n\r\n assert_allclose(numerical_gradients, gradients, rtol=1e-4, atol=1e-7)\r\n assert_allclose(numerical_hessians, hessians, rtol=1e-4, atol=1e-7)\r\n\r\n\r\ndef test_baseline_least_squares():\r\n rng = np.random.RandomState(0)\r\n\r\n loss = _LOSSES['least_squares']()\r\n y_train = rng.normal(size=100)\r\n baseline_prediction = loss.get_baseline_prediction(y_train, 1)\r\n assert baseline_prediction.shape == tuple() # scalar\r\n assert baseline_prediction.dtype == y_train.dtype\r\n # Make sure baseline prediction is the mean of all targets\r\n assert_almost_equal(baseline_prediction, y_train.mean())\r\n assert np.allclose(loss.inverse_link_function(baseline_prediction),\r\n baseline_prediction)\r\n\r\n\r\ndef test_baseline_least_absolute_deviation():\r\n rng = np.random.RandomState(0)\r\n\r\n loss = _LOSSES['least_absolute_deviation']()\r\n y_train = rng.normal(size=100)\r\n baseline_prediction = loss.get_baseline_prediction(y_train, 1)\r\n assert baseline_prediction.shape == tuple() # scalar\r\n assert baseline_prediction.dtype == y_train.dtype\r\n # Make sure baseline prediction is the median of all targets\r\n assert np.allclose(loss.inverse_link_function(baseline_prediction),\r\n baseline_prediction)\r\n assert baseline_prediction == pytest.approx(np.median(y_train))\r\n\r\n\r\ndef test_baseline_binary_crossentropy():\r\n rng = np.random.RandomState(0)\r\n\r\n loss = _LOSSES['binary_crossentropy']()\r\n for y_train in (np.zeros(shape=100), np.ones(shape=100)):\r\n y_train = y_train.astype(np.float64)\r\n baseline_prediction = loss.get_baseline_prediction(y_train, 1)\r\n assert_all_finite(baseline_prediction)\r\n assert np.allclose(loss.inverse_link_function(baseline_prediction),\r\n y_train[0])\r\n\r\n # Make sure baseline prediction is equal to link_function(p), where p\r\n # is the proba of the positive class. We want predict_proba() to return p,\r\n # and by definition\r\n # p = inverse_link_function(raw_prediction) = sigmoid(raw_prediction)\r\n # So we want raw_prediction = link_function(p) = log(p / (1 - p))\r\n y_train = rng.randint(0, 2, size=100).astype(np.float64)\r\n baseline_prediction = loss.get_baseline_prediction(y_train, 1)\r\n assert baseline_prediction.shape == tuple() # scalar\r\n assert baseline_prediction.dtype == y_train.dtype\r\n p = y_train.mean()\r\n assert np.allclose(baseline_prediction, np.log(p / (1 - p)))\r\n\r\n\r\ndef test_baseline_categorical_crossentropy():\r\n rng = np.random.RandomState(0)\r\n\r\n prediction_dim = 4\r\n loss = _LOSSES['categorical_crossentropy']()\r\n for y_train in (np.zeros(shape=100), np.ones(shape=100)):\r\n y_train = y_train.astype(np.float64)\r\n baseline_prediction = loss.get_baseline_prediction(y_train,\r\n prediction_dim)\r\n assert baseline_prediction.dtype == y_train.dtype\r\n assert_all_finite(baseline_prediction)\r\n\r\n # Same logic as for above test. Here inverse_link_function = softmax and\r\n # link_function = log\r\n y_train = rng.randint(0, prediction_dim + 1, size=100).astype(np.float32)\r\n baseline_prediction = loss.get_baseline_prediction(y_train, prediction_dim)\r\n assert baseline_prediction.shape == (prediction_dim, 1)\r\n for k in range(prediction_dim):\r\n p = (y_train == k).mean()\r\n assert np.allclose(baseline_prediction[k, :], np.log(p))\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas import DataFrame, Index, MultiIndex\r\nimport pandas._testing as tm\r\n\r\n\r\[email protected]\r\ndef multiindex_dataframe_random_data():\r\n \"\"\"DataFrame with 2 level MultiIndex with random data\"\"\"\r\n index = MultiIndex(\r\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\r\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\r\n names=[\"first\", \"second\"],\r\n )\r\n return DataFrame(\r\n np.random.randn(10, 3), index=index, columns=Index([\"A\", \"B\", \"C\"], name=\"exp\")\r\n )\r\n\r\n\r\[email protected]\r\ndef multiindex_year_month_day_dataframe_random_data():\r\n \"\"\"DataFrame with 3 level MultiIndex (year, month, day) covering\r\n first 100 business days from 2000-01-01 with random data\"\"\"\r\n tdf = tm.makeTimeDataFrame(100)\r\n ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\r\n # use Int64Index, to make sure things work\r\n ymd.index.set_levels([lev.astype(\"i8\") for lev in ymd.index.levels], inplace=True)\r\n ymd.index.set_names([\"year\", \"month\", \"day\"], inplace=True)\r\n return ymd\r\n",
"\"\"\"\r\nProvide user facing operators for doing the split part of the\r\nsplit-apply-combine paradigm.\r\n\"\"\"\r\n\r\nfrom typing import Dict, Hashable, List, Optional, Tuple\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._typing import FrameOrSeries\r\nfrom pandas.util._decorators import cache_readonly\r\n\r\nfrom pandas.core.dtypes.common import (\r\n ensure_categorical,\r\n is_categorical_dtype,\r\n is_datetime64_dtype,\r\n is_list_like,\r\n is_scalar,\r\n is_timedelta64_dtype,\r\n)\r\nfrom pandas.core.dtypes.generic import ABCSeries\r\n\r\nimport pandas.core.algorithms as algorithms\r\nfrom pandas.core.arrays import Categorical, ExtensionArray\r\nimport pandas.core.common as com\r\nfrom pandas.core.frame import DataFrame\r\nfrom pandas.core.groupby import ops\r\nfrom pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby\r\nfrom pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex\r\nfrom pandas.core.series import Series\r\n\r\nfrom pandas.io.formats.printing import pprint_thing\r\n\r\n\r\nclass Grouper:\r\n \"\"\"\r\n A Grouper allows the user to specify a groupby instruction for an object.\r\n\r\n This specification will select a column via the key parameter, or if the\r\n level and/or axis parameters are given, a level of the index of the target\r\n object.\r\n\r\n If `axis` and/or `level` are passed as keywords to both `Grouper` and\r\n `groupby`, the values passed to `Grouper` take precedence.\r\n\r\n Parameters\r\n ----------\r\n key : str, defaults to None\r\n Groupby key, which selects the grouping column of the target.\r\n level : name/number, defaults to None\r\n The level for the target index.\r\n freq : str / frequency object, defaults to None\r\n This will groupby the specified frequency if the target selection\r\n (via key or level) is a datetime-like object. For full specification\r\n of available frequencies, please see `here\r\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.\r\n axis : str, int, defaults to 0\r\n Number/name of the axis.\r\n sort : bool, default to False\r\n Whether to sort the resulting labels.\r\n closed : {'left' or 'right'}\r\n Closed end of interval. Only when `freq` parameter is passed.\r\n label : {'left' or 'right'}\r\n Interval boundary to use for labeling.\r\n Only when `freq` parameter is passed.\r\n convention : {'start', 'end', 'e', 's'}\r\n If grouper is PeriodIndex and `freq` parameter is passed.\r\n base : int, default 0\r\n Only when `freq` parameter is passed.\r\n loffset : str, DateOffset, timedelta object\r\n Only when `freq` parameter is passed.\r\n\r\n Returns\r\n -------\r\n A specification for a groupby instruction\r\n\r\n Examples\r\n --------\r\n\r\n Syntactic sugar for ``df.groupby('A')``\r\n\r\n >>> df.groupby(Grouper(key='A'))\r\n\r\n Specify a resample operation on the column 'date'\r\n\r\n >>> df.groupby(Grouper(key='date', freq='60s'))\r\n\r\n Specify a resample operation on the level 'date' on the columns axis\r\n with a frequency of 60s\r\n\r\n >>> df.groupby(Grouper(level='date', freq='60s', axis=1))\r\n \"\"\"\r\n\r\n _attributes: Tuple[str, ...] = (\"key\", \"level\", \"freq\", \"axis\", \"sort\")\r\n\r\n def __new__(cls, *args, **kwargs):\r\n if kwargs.get(\"freq\") is not None:\r\n from pandas.core.resample import TimeGrouper\r\n\r\n cls = TimeGrouper\r\n return super().__new__(cls)\r\n\r\n def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):\r\n self.key = key\r\n self.level = level\r\n self.freq = freq\r\n self.axis = axis\r\n self.sort = sort\r\n\r\n self.grouper = None\r\n self.obj = None\r\n self.indexer = None\r\n self.binner = None\r\n self._grouper = None\r\n\r\n @property\r\n def ax(self):\r\n return self.grouper\r\n\r\n def _get_grouper(self, obj, validate: bool = True):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n obj : the subject object\r\n validate : boolean, default True\r\n if True, validate the grouper\r\n\r\n Returns\r\n -------\r\n a tuple of binner, grouper, obj (possibly sorted)\r\n \"\"\"\r\n\r\n self._set_grouper(obj)\r\n self.grouper, _, self.obj = get_grouper(\r\n self.obj,\r\n [self.key],\r\n axis=self.axis,\r\n level=self.level,\r\n sort=self.sort,\r\n validate=validate,\r\n )\r\n return self.binner, self.grouper, self.obj\r\n\r\n def _set_grouper(self, obj: FrameOrSeries, sort: bool = False):\r\n \"\"\"\r\n given an object and the specifications, setup the internal grouper\r\n for this particular specification\r\n\r\n Parameters\r\n ----------\r\n obj : Series or DataFrame\r\n sort : bool, default False\r\n whether the resulting grouper should be sorted\r\n \"\"\"\r\n assert obj is not None\r\n\r\n if self.key is not None and self.level is not None:\r\n raise ValueError(\"The Grouper cannot specify both a key and a level!\")\r\n\r\n # Keep self.grouper value before overriding\r\n if self._grouper is None:\r\n self._grouper = self.grouper\r\n\r\n # the key must be a valid info item\r\n if self.key is not None:\r\n key = self.key\r\n # The 'on' is already defined\r\n if getattr(self.grouper, \"name\", None) == key and isinstance(\r\n obj, ABCSeries\r\n ):\r\n ax = self._grouper.take(obj.index)\r\n else:\r\n if key not in obj._info_axis:\r\n raise KeyError(f\"The grouper name {key} is not found\")\r\n ax = Index(obj[key], name=key)\r\n\r\n else:\r\n ax = obj._get_axis(self.axis)\r\n if self.level is not None:\r\n level = self.level\r\n\r\n # if a level is given it must be a mi level or\r\n # equivalent to the axis name\r\n if isinstance(ax, MultiIndex):\r\n level = ax._get_level_number(level)\r\n ax = Index(ax._get_level_values(level), name=ax.names[level])\r\n\r\n else:\r\n if level not in (0, ax.name):\r\n raise ValueError(f\"The level {level} is not valid\")\r\n\r\n # possibly sort\r\n if (self.sort or sort) and not ax.is_monotonic:\r\n # use stable sort to support first, last, nth\r\n indexer = self.indexer = ax.argsort(kind=\"mergesort\")\r\n ax = ax.take(indexer)\r\n obj = obj.take(indexer, axis=self.axis)\r\n\r\n self.obj = obj\r\n self.grouper = ax\r\n return self.grouper\r\n\r\n @property\r\n def groups(self):\r\n return self.grouper.groups\r\n\r\n def __repr__(self) -> str:\r\n attrs_list = (\r\n f\"{attr_name}={repr(getattr(self, attr_name))}\"\r\n for attr_name in self._attributes\r\n if getattr(self, attr_name) is not None\r\n )\r\n attrs = \", \".join(attrs_list)\r\n cls_name = type(self).__name__\r\n return f\"{cls_name}({attrs})\"\r\n\r\n\r\nclass Grouping:\r\n \"\"\"\r\n Holds the grouping information for a single key\r\n\r\n Parameters\r\n ----------\r\n index : Index\r\n grouper :\r\n obj Union[DataFrame, Series]:\r\n name :\r\n level :\r\n observed : bool, default False\r\n If we are a Categorical, use the observed values\r\n in_axis : if the Grouping is a column in self.obj and hence among\r\n Groupby.exclusions list\r\n\r\n Returns\r\n -------\r\n **Attributes**:\r\n * indices : dict of {group -> index_list}\r\n * codes : ndarray, group codes\r\n * group_index : unique groups\r\n * groups : dict of {group -> label_list}\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n index: Index,\r\n grouper=None,\r\n obj: Optional[FrameOrSeries] = None,\r\n name=None,\r\n level=None,\r\n sort: bool = True,\r\n observed: bool = False,\r\n in_axis: bool = False,\r\n ):\r\n self.name = name\r\n self.level = level\r\n self.grouper = _convert_grouper(index, grouper)\r\n self.all_grouper = None\r\n self.index = index\r\n self.sort = sort\r\n self.obj = obj\r\n self.observed = observed\r\n self.in_axis = in_axis\r\n\r\n # right place for this?\r\n if isinstance(grouper, (Series, Index)) and name is None:\r\n self.name = grouper.name\r\n\r\n if isinstance(grouper, MultiIndex):\r\n self.grouper = grouper.values\r\n\r\n # we have a single grouper which may be a myriad of things,\r\n # some of which are dependent on the passing in level\r\n\r\n if level is not None:\r\n if not isinstance(level, int):\r\n if level not in index.names:\r\n raise AssertionError(f\"Level {level} not in index\")\r\n level = index.names.index(level)\r\n\r\n if self.name is None:\r\n self.name = index.names[level]\r\n\r\n (\r\n self.grouper,\r\n self._codes,\r\n self._group_index,\r\n ) = index._get_grouper_for_level(self.grouper, level)\r\n\r\n # a passed Grouper like, directly get the grouper in the same way\r\n # as single grouper groupby, use the group_info to get codes\r\n elif isinstance(self.grouper, Grouper):\r\n # get the new grouper; we already have disambiguated\r\n # what key/level refer to exactly, don't need to\r\n # check again as we have by this point converted these\r\n # to an actual value (rather than a pd.Grouper)\r\n _, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)\r\n if self.name is None:\r\n self.name = grouper.result_index.name\r\n self.obj = self.grouper.obj\r\n self.grouper = grouper._get_grouper()\r\n\r\n else:\r\n if self.grouper is None and self.name is not None and self.obj is not None:\r\n self.grouper = self.obj[self.name]\r\n\r\n elif isinstance(self.grouper, (list, tuple)):\r\n self.grouper = com.asarray_tuplesafe(self.grouper)\r\n\r\n # a passed Categorical\r\n elif is_categorical_dtype(self.grouper):\r\n\r\n self.grouper, self.all_grouper = recode_for_groupby(\r\n self.grouper, self.sort, observed\r\n )\r\n categories = self.grouper.categories\r\n\r\n # we make a CategoricalIndex out of the cat grouper\r\n # preserving the categories / ordered attributes\r\n self._codes = self.grouper.codes\r\n if observed:\r\n codes = algorithms.unique1d(self.grouper.codes)\r\n codes = codes[codes != -1]\r\n if sort or self.grouper.ordered:\r\n codes = np.sort(codes)\r\n else:\r\n codes = np.arange(len(categories))\r\n\r\n self._group_index = CategoricalIndex(\r\n Categorical.from_codes(\r\n codes=codes, categories=categories, ordered=self.grouper.ordered\r\n ),\r\n name=self.name,\r\n )\r\n\r\n # we are done\r\n if isinstance(self.grouper, Grouping):\r\n self.grouper = self.grouper.grouper\r\n\r\n # no level passed\r\n elif not isinstance(\r\n self.grouper, (Series, Index, ExtensionArray, np.ndarray)\r\n ):\r\n if getattr(self.grouper, \"ndim\", 1) != 1:\r\n t = self.name or str(type(self.grouper))\r\n raise ValueError(f\"Grouper for '{t}' not 1-dimensional\")\r\n self.grouper = self.index.map(self.grouper)\r\n if not (\r\n hasattr(self.grouper, \"__len__\")\r\n and len(self.grouper) == len(self.index)\r\n ):\r\n grper = pprint_thing(self.grouper)\r\n errmsg = (\r\n \"Grouper result violates len(labels) == \"\r\n f\"len(data)\\nresult: {grper}\"\r\n )\r\n self.grouper = None # Try for sanity\r\n raise AssertionError(errmsg)\r\n\r\n # if we have a date/time-like grouper, make sure that we have\r\n # Timestamps like\r\n if getattr(self.grouper, \"dtype\", None) is not None:\r\n if is_datetime64_dtype(self.grouper):\r\n self.grouper = self.grouper.astype(\"datetime64[ns]\")\r\n elif is_timedelta64_dtype(self.grouper):\r\n\r\n self.grouper = self.grouper.astype(\"timedelta64[ns]\")\r\n\r\n def __repr__(self) -> str:\r\n return f\"Grouping({self.name})\"\r\n\r\n def __iter__(self):\r\n return iter(self.indices)\r\n\r\n _codes: Optional[np.ndarray] = None\r\n _group_index: Optional[Index] = None\r\n\r\n @property\r\n def ngroups(self) -> int:\r\n return len(self.group_index)\r\n\r\n @cache_readonly\r\n def indices(self):\r\n # we have a list of groupers\r\n if isinstance(self.grouper, ops.BaseGrouper):\r\n return self.grouper.indices\r\n\r\n values = ensure_categorical(self.grouper)\r\n return values._reverse_indexer()\r\n\r\n @property\r\n def codes(self) -> np.ndarray:\r\n if self._codes is None:\r\n self._make_codes()\r\n return self._codes\r\n\r\n @cache_readonly\r\n def result_index(self) -> Index:\r\n if self.all_grouper is not None:\r\n return recode_from_groupby(self.all_grouper, self.sort, self.group_index)\r\n return self.group_index\r\n\r\n @property\r\n def group_index(self) -> Index:\r\n if self._group_index is None:\r\n self._make_codes()\r\n assert self._group_index is not None\r\n return self._group_index\r\n\r\n def _make_codes(self) -> None:\r\n if self._codes is None or self._group_index is None:\r\n # we have a list of groupers\r\n if isinstance(self.grouper, ops.BaseGrouper):\r\n codes = self.grouper.codes_info\r\n uniques = self.grouper.result_index\r\n else:\r\n codes, uniques = algorithms.factorize(self.grouper, sort=self.sort)\r\n uniques = Index(uniques, name=self.name)\r\n self._codes = codes\r\n self._group_index = uniques\r\n\r\n @cache_readonly\r\n def groups(self) -> Dict[Hashable, np.ndarray]:\r\n return self.index.groupby(Categorical.from_codes(self.codes, self.group_index))\r\n\r\n\r\ndef get_grouper(\r\n obj: FrameOrSeries,\r\n key=None,\r\n axis: int = 0,\r\n level=None,\r\n sort: bool = True,\r\n observed: bool = False,\r\n mutated: bool = False,\r\n validate: bool = True,\r\n) -> \"Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]\":\r\n \"\"\"\r\n Create and return a BaseGrouper, which is an internal\r\n mapping of how to create the grouper indexers.\r\n This may be composed of multiple Grouping objects, indicating\r\n multiple groupers\r\n\r\n Groupers are ultimately index mappings. They can originate as:\r\n index mappings, keys to columns, functions, or Groupers\r\n\r\n Groupers enable local references to axis,level,sort, while\r\n the passed in axis, level, and sort are 'global'.\r\n\r\n This routine tries to figure out what the passing in references\r\n are and then creates a Grouping for each one, combined into\r\n a BaseGrouper.\r\n\r\n If observed & we have a categorical grouper, only show the observed\r\n values.\r\n\r\n If validate, then check for key/level overlaps.\r\n\r\n \"\"\"\r\n group_axis = obj._get_axis(axis)\r\n\r\n # validate that the passed single level is compatible with the passed\r\n # axis of the object\r\n if level is not None:\r\n # TODO: These if-block and else-block are almost same.\r\n # MultiIndex instance check is removable, but it seems that there are\r\n # some processes only for non-MultiIndex in else-block,\r\n # eg. `obj.index.name != level`. We have to consider carefully whether\r\n # these are applicable for MultiIndex. Even if these are applicable,\r\n # we need to check if it makes no side effect to subsequent processes\r\n # on the outside of this condition.\r\n # (GH 17621)\r\n if isinstance(group_axis, MultiIndex):\r\n if is_list_like(level) and len(level) == 1:\r\n level = level[0]\r\n\r\n if key is None and is_scalar(level):\r\n # Get the level values from group_axis\r\n key = group_axis.get_level_values(level)\r\n level = None\r\n\r\n else:\r\n # allow level to be a length-one list-like object\r\n # (e.g., level=[0])\r\n # GH 13901\r\n if is_list_like(level):\r\n nlevels = len(level)\r\n if nlevels == 1:\r\n level = level[0]\r\n elif nlevels == 0:\r\n raise ValueError(\"No group keys passed!\")\r\n else:\r\n raise ValueError(\"multiple levels only valid with MultiIndex\")\r\n\r\n if isinstance(level, str):\r\n if obj._get_axis(axis).name != level:\r\n raise ValueError(\r\n f\"level name {level} is not the name \"\r\n f\"of the {obj._get_axis_name(axis)}\"\r\n )\r\n elif level > 0 or level < -1:\r\n raise ValueError(\"level > 0 or level < -1 only valid with MultiIndex\")\r\n\r\n # NOTE: `group_axis` and `group_axis.get_level_values(level)`\r\n # are same in this section.\r\n level = None\r\n key = group_axis\r\n\r\n # a passed-in Grouper, directly convert\r\n if isinstance(key, Grouper):\r\n binner, grouper, obj = key._get_grouper(obj, validate=False)\r\n if key.key is None:\r\n return grouper, [], obj\r\n else:\r\n return grouper, [key.key], obj\r\n\r\n # already have a BaseGrouper, just return it\r\n elif isinstance(key, ops.BaseGrouper):\r\n return key, [], obj\r\n\r\n if not isinstance(key, list):\r\n keys = [key]\r\n match_axis_length = False\r\n else:\r\n keys = key\r\n match_axis_length = len(keys) == len(group_axis)\r\n\r\n # what are we after, exactly?\r\n any_callable = any(callable(g) or isinstance(g, dict) for g in keys)\r\n any_groupers = any(isinstance(g, Grouper) for g in keys)\r\n any_arraylike = any(\r\n isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys\r\n )\r\n\r\n # is this an index replacement?\r\n if (\r\n not any_callable\r\n and not any_arraylike\r\n and not any_groupers\r\n and match_axis_length\r\n and level is None\r\n ):\r\n if isinstance(obj, DataFrame):\r\n all_in_columns_index = all(\r\n g in obj.columns or g in obj.index.names for g in keys\r\n )\r\n else:\r\n assert isinstance(obj, Series)\r\n all_in_columns_index = all(g in obj.index.names for g in keys)\r\n\r\n if not all_in_columns_index:\r\n keys = [com.asarray_tuplesafe(keys)]\r\n\r\n if isinstance(level, (tuple, list)):\r\n if key is None:\r\n keys = [None] * len(level)\r\n levels = level\r\n else:\r\n levels = [level] * len(keys)\r\n\r\n groupings: List[Grouping] = []\r\n exclusions: List[Hashable] = []\r\n\r\n # if the actual grouper should be obj[key]\r\n def is_in_axis(key) -> bool:\r\n if not _is_label_like(key):\r\n items = obj._data.items\r\n try:\r\n items.get_loc(key)\r\n except (KeyError, TypeError):\r\n # TypeError shows up here if we pass e.g. Int64Index\r\n return False\r\n\r\n return True\r\n\r\n # if the grouper is obj[name]\r\n def is_in_obj(gpr) -> bool:\r\n if not hasattr(gpr, \"name\"):\r\n return False\r\n try:\r\n return gpr is obj[gpr.name]\r\n except (KeyError, IndexError):\r\n return False\r\n\r\n for i, (gpr, level) in enumerate(zip(keys, levels)):\r\n\r\n if is_in_obj(gpr): # df.groupby(df['name'])\r\n in_axis, name = True, gpr.name\r\n exclusions.append(name)\r\n\r\n elif is_in_axis(gpr): # df.groupby('name')\r\n if gpr in obj:\r\n if validate:\r\n obj._check_label_or_level_ambiguity(gpr, axis=axis)\r\n in_axis, name, gpr = True, gpr, obj[gpr]\r\n exclusions.append(name)\r\n elif obj._is_level_reference(gpr, axis=axis):\r\n in_axis, name, level, gpr = False, None, gpr, None\r\n else:\r\n raise KeyError(gpr)\r\n elif isinstance(gpr, Grouper) and gpr.key is not None:\r\n # Add key to exclusions\r\n exclusions.append(gpr.key)\r\n in_axis, name = False, None\r\n else:\r\n in_axis, name = False, None\r\n\r\n if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:\r\n raise ValueError(\r\n f\"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) \"\r\n \"must be same length\"\r\n )\r\n\r\n # create the Grouping\r\n # allow us to passing the actual Grouping as the gpr\r\n ping = (\r\n Grouping(\r\n group_axis,\r\n gpr,\r\n obj=obj,\r\n name=name,\r\n level=level,\r\n sort=sort,\r\n observed=observed,\r\n in_axis=in_axis,\r\n )\r\n if not isinstance(gpr, Grouping)\r\n else gpr\r\n )\r\n\r\n groupings.append(ping)\r\n\r\n if len(groupings) == 0 and len(obj):\r\n raise ValueError(\"No group keys passed!\")\r\n elif len(groupings) == 0:\r\n groupings.append(Grouping(Index([], dtype=\"int\"), np.array([], dtype=np.intp)))\r\n\r\n # create the internals grouper\r\n grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)\r\n return grouper, exclusions, obj\r\n\r\n\r\ndef _is_label_like(val) -> bool:\r\n return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))\r\n\r\n\r\ndef _convert_grouper(axis: Index, grouper):\r\n if isinstance(grouper, dict):\r\n return grouper.get\r\n elif isinstance(grouper, Series):\r\n if grouper.index.equals(axis):\r\n return grouper._values\r\n else:\r\n return grouper.reindex(axis)._values\r\n elif isinstance(grouper, (list, Series, Index, np.ndarray)):\r\n if len(grouper) != len(axis):\r\n raise ValueError(\"Grouper and axis must be same length\")\r\n return grouper\r\n else:\r\n return grouper\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas import Categorical, Series\r\nimport pandas._testing as tm\r\nfrom pandas.core.construction import create_series_with_explicit_dtype\r\n\r\n\r\ndef test_nunique():\r\n # basics.rst doc example\r\n series = Series(np.random.randn(500))\r\n series[20:500] = np.nan\r\n series[10:20] = 5000\r\n result = series.nunique()\r\n assert result == 11\r\n\r\n # GH 18051\r\n s = Series(Categorical([]))\r\n assert s.nunique() == 0\r\n s = Series(Categorical([np.nan]))\r\n assert s.nunique() == 0\r\n\r\n\r\ndef test_unique():\r\n # GH714 also, dtype=float\r\n s = Series([1.2345] * 100)\r\n s[::2] = np.nan\r\n result = s.unique()\r\n assert len(result) == 2\r\n\r\n s = Series([1.2345] * 100, dtype=\"f4\")\r\n s[::2] = np.nan\r\n result = s.unique()\r\n assert len(result) == 2\r\n\r\n # NAs in object arrays #714\r\n s = Series([\"foo\"] * 100, dtype=\"O\")\r\n s[::2] = np.nan\r\n result = s.unique()\r\n assert len(result) == 2\r\n\r\n # decision about None\r\n s = Series([1, 2, 3, None, None, None], dtype=object)\r\n result = s.unique()\r\n expected = np.array([1, 2, 3, None], dtype=object)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n # GH 18051\r\n s = Series(Categorical([]))\r\n tm.assert_categorical_equal(s.unique(), Categorical([]), check_dtype=False)\r\n s = Series(Categorical([np.nan]))\r\n tm.assert_categorical_equal(s.unique(), Categorical([np.nan]), check_dtype=False)\r\n\r\n\r\ndef test_unique_data_ownership():\r\n # it works! #1807\r\n Series(Series([\"a\", \"c\", \"b\"]).unique()).sort_values()\r\n\r\n\r\[email protected](\r\n \"data, expected\",\r\n [\r\n (np.random.randint(0, 10, size=1000), False),\r\n (np.arange(1000), True),\r\n ([], True),\r\n ([np.nan], True),\r\n ([\"foo\", \"bar\", np.nan], True),\r\n ([\"foo\", \"foo\", np.nan], False),\r\n ([\"foo\", \"bar\", np.nan, np.nan], False),\r\n ],\r\n)\r\ndef test_is_unique(data, expected):\r\n # GH11946 / GH25180\r\n s = create_series_with_explicit_dtype(data, dtype_if_empty=object)\r\n assert s.is_unique is expected\r\n\r\n\r\ndef test_is_unique_class_ne(capsys):\r\n # GH 20661\r\n class Foo:\r\n def __init__(self, val):\r\n self._value = val\r\n\r\n def __ne__(self, other):\r\n raise Exception(\"NEQ not supported\")\r\n\r\n with capsys.disabled():\r\n li = [Foo(i) for i in range(5)]\r\n s = Series(li, index=list(range(5)))\r\n s.is_unique\r\n captured = capsys.readouterr()\r\n assert len(captured.err) == 0\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series, Timestamp\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDataFrameAppend:\r\n def test_append_empty_list(self):\r\n # GH 28769\r\n df = DataFrame()\r\n result = df.append([])\r\n expected = df\r\n tm.assert_frame_equal(result, expected)\r\n assert result is not df\r\n\r\n df = DataFrame(np.random.randn(5, 4), columns=[\"foo\", \"bar\", \"baz\", \"qux\"])\r\n result = df.append([])\r\n expected = df\r\n tm.assert_frame_equal(result, expected)\r\n assert result is not df # .append() should return a new object\r\n\r\n def test_append_series_dict(self):\r\n df = DataFrame(np.random.randn(5, 4), columns=[\"foo\", \"bar\", \"baz\", \"qux\"])\r\n\r\n series = df.loc[4]\r\n msg = \"Indexes have overlapping values\"\r\n with pytest.raises(ValueError, match=msg):\r\n df.append(series, verify_integrity=True)\r\n\r\n series.name = None\r\n msg = \"Can only append a Series if ignore_index=True\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.append(series, verify_integrity=True)\r\n\r\n result = df.append(series[::-1], ignore_index=True)\r\n expected = df.append(\r\n DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # dict\r\n result = df.append(series.to_dict(), ignore_index=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.append(series[::-1][:3], ignore_index=True)\r\n expected = df.append(\r\n DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True\r\n )\r\n tm.assert_frame_equal(result, expected.loc[:, result.columns])\r\n\r\n # can append when name set\r\n row = df.loc[4]\r\n row.name = 5\r\n result = df.append(row)\r\n expected = df.append(df[-1:], ignore_index=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_append_list_of_series_dicts(self):\r\n df = DataFrame(np.random.randn(5, 4), columns=[\"foo\", \"bar\", \"baz\", \"qux\"])\r\n\r\n dicts = [x.to_dict() for idx, x in df.iterrows()]\r\n\r\n result = df.append(dicts, ignore_index=True)\r\n expected = df.append(df, ignore_index=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # different columns\r\n dicts = [\r\n {\"foo\": 1, \"bar\": 2, \"baz\": 3, \"peekaboo\": 4},\r\n {\"foo\": 5, \"bar\": 6, \"baz\": 7, \"peekaboo\": 8},\r\n ]\r\n result = df.append(dicts, ignore_index=True, sort=True)\r\n expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_append_missing_cols(self):\r\n # GH22252\r\n # exercise the conditional branch in append method where the data\r\n # to be appended is a list and does not contain all columns that are in\r\n # the target DataFrame\r\n df = DataFrame(np.random.randn(5, 4), columns=[\"foo\", \"bar\", \"baz\", \"qux\"])\r\n\r\n dicts = [{\"foo\": 9}, {\"bar\": 10}]\r\n with tm.assert_produces_warning(None):\r\n result = df.append(dicts, ignore_index=True, sort=True)\r\n\r\n expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_append_empty_dataframe(self):\r\n\r\n # Empty df append empty df\r\n df1 = DataFrame()\r\n df2 = DataFrame()\r\n result = df1.append(df2)\r\n expected = df1.copy()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Non-empty df append empty df\r\n df1 = DataFrame(np.random.randn(5, 2))\r\n df2 = DataFrame()\r\n result = df1.append(df2)\r\n expected = df1.copy()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Empty df with columns append empty df\r\n df1 = DataFrame(columns=[\"bar\", \"foo\"])\r\n df2 = DataFrame()\r\n result = df1.append(df2)\r\n expected = df1.copy()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Non-Empty df with columns append empty df\r\n df1 = DataFrame(np.random.randn(5, 2), columns=[\"bar\", \"foo\"])\r\n df2 = DataFrame()\r\n result = df1.append(df2)\r\n expected = df1.copy()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_append_dtypes(self):\r\n\r\n # GH 5754\r\n # row appends of different dtypes (so need to do by-item)\r\n # can sometimes infer the correct type\r\n\r\n df1 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(5))\r\n df2 = DataFrame()\r\n result = df1.append(df2)\r\n expected = df1.copy()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df1 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(1))\r\n df2 = DataFrame({\"bar\": \"foo\"}, index=range(1, 2))\r\n result = df1.append(df2)\r\n expected = DataFrame({\"bar\": [Timestamp(\"20130101\"), \"foo\"]})\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df1 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(1))\r\n df2 = DataFrame({\"bar\": np.nan}, index=range(1, 2))\r\n result = df1.append(df2)\r\n expected = DataFrame(\r\n {\"bar\": Series([Timestamp(\"20130101\"), np.nan], dtype=\"M8[ns]\")}\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df1 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(1))\r\n df2 = DataFrame({\"bar\": np.nan}, index=range(1, 2), dtype=object)\r\n result = df1.append(df2)\r\n expected = DataFrame(\r\n {\"bar\": Series([Timestamp(\"20130101\"), np.nan], dtype=\"M8[ns]\")}\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df1 = DataFrame({\"bar\": np.nan}, index=range(1))\r\n df2 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(1, 2))\r\n result = df1.append(df2)\r\n expected = DataFrame(\r\n {\"bar\": Series([np.nan, Timestamp(\"20130101\")], dtype=\"M8[ns]\")}\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df1 = DataFrame({\"bar\": Timestamp(\"20130101\")}, index=range(1))\r\n df2 = DataFrame({\"bar\": 1}, index=range(1, 2), dtype=object)\r\n result = df1.append(df2)\r\n expected = DataFrame({\"bar\": Series([Timestamp(\"20130101\"), 1])})\r\n tm.assert_frame_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"timestamp\", [\"2019-07-19 07:04:57+0100\", \"2019-07-19 07:04:57\"]\r\n )\r\n def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp):\r\n # GH 30238\r\n tz = tz_naive_fixture\r\n df = pd.DataFrame([pd.Timestamp(timestamp, tz=tz)])\r\n result = df.append(df.iloc[0]).iloc[-1]\r\n expected = pd.Series(pd.Timestamp(timestamp, tz=tz), name=0)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"data, dtype\",\r\n [\r\n ([1], pd.Int64Dtype()),\r\n ([1], pd.CategoricalDtype()),\r\n ([pd.Interval(left=0, right=5)], pd.IntervalDtype()),\r\n ([pd.Period(\"2000-03\", freq=\"M\")], pd.PeriodDtype(\"M\")),\r\n ([1], pd.SparseDtype()),\r\n ],\r\n )\r\n def test_other_dtypes(self, data, dtype):\r\n df = pd.DataFrame(data, dtype=dtype)\r\n result = df.append(df.iloc[0]).iloc[-1]\r\n expected = pd.Series(data, name=0, dtype=dtype)\r\n tm.assert_series_equal(result, expected)\r\n",
"\"\"\"\r\nGeneral tests for all estimators in sklearn.\r\n\"\"\"\r\n\r\n# Authors: Andreas Mueller <[email protected]>\r\n# Gael Varoquaux [email protected]\r\n# License: BSD 3 clause\r\n\r\nimport os\r\nimport warnings\r\nimport sys\r\nimport re\r\nimport pkgutil\r\nfrom inspect import isgenerator\r\nfrom functools import partial\r\n\r\nimport pytest\r\n\r\n\r\nfrom sklearn.utils import all_estimators\r\nfrom sklearn.utils._testing import ignore_warnings\r\nfrom sklearn.exceptions import ConvergenceWarning\r\nfrom sklearn.utils.estimator_checks import check_estimator\r\n\r\nimport sklearn\r\nfrom sklearn.base import RegressorMixin, BiclusterMixin\r\n\r\nfrom sklearn.linear_model._base import LinearClassifierMixin\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.utils import IS_PYPY\r\nfrom sklearn.utils._testing import SkipTest\r\nfrom sklearn.utils.estimator_checks import (\r\n _construct_instance,\r\n _set_checking_parameters,\r\n _set_check_estimator_ids,\r\n check_parameters_default_constructible,\r\n check_class_weight_balanced_linear_classifier,\r\n parametrize_with_checks)\r\n\r\n\r\ndef test_all_estimator_no_base_class():\r\n # test that all_estimators doesn't find abstract classes.\r\n for name, Estimator in all_estimators():\r\n msg = (\"Base estimators such as {0} should not be included\"\r\n \" in all_estimators\").format(name)\r\n assert not name.lower().startswith('base'), msg\r\n\r\n\r\[email protected](\r\n 'name, Estimator',\r\n all_estimators()\r\n)\r\ndef test_parameters_default_constructible(name, Estimator):\r\n # Test that estimators are default-constructible\r\n check_parameters_default_constructible(name, Estimator)\r\n\r\n\r\ndef _sample_func(x, y=1):\r\n pass\r\n\r\n\r\[email protected](\"val, expected\", [\r\n (partial(_sample_func, y=1), \"_sample_func(y=1)\"),\r\n (_sample_func, \"_sample_func\"),\r\n (partial(_sample_func, 'world'), \"_sample_func\"),\r\n (LogisticRegression(C=2.0), \"LogisticRegression(C=2.0)\"),\r\n (LogisticRegression(random_state=1, solver='newton-cg',\r\n class_weight='balanced', warm_start=True),\r\n \"LogisticRegression(class_weight='balanced',random_state=1,\"\r\n \"solver='newton-cg',warm_start=True)\")\r\n])\r\ndef test_set_check_estimator_ids(val, expected):\r\n assert _set_check_estimator_ids(val) == expected\r\n\r\n\r\ndef _tested_estimators():\r\n for name, Estimator in all_estimators():\r\n if issubclass(Estimator, BiclusterMixin):\r\n continue\r\n try:\r\n estimator = _construct_instance(Estimator)\r\n except SkipTest:\r\n continue\r\n\r\n yield estimator\r\n\r\n\r\n@parametrize_with_checks(_tested_estimators())\r\ndef test_estimators(estimator, check):\r\n # Common tests for estimator instances\r\n with ignore_warnings(category=(FutureWarning,\r\n ConvergenceWarning,\r\n UserWarning, FutureWarning)):\r\n _set_checking_parameters(estimator)\r\n check(estimator)\r\n\r\n\r\ndef test_check_estimator_generate_only():\r\n estimator_cls_gen_checks = check_estimator(LogisticRegression,\r\n generate_only=True)\r\n all_instance_gen_checks = check_estimator(LogisticRegression(),\r\n generate_only=True)\r\n assert isgenerator(estimator_cls_gen_checks)\r\n assert isgenerator(all_instance_gen_checks)\r\n\r\n estimator_cls_checks = list(estimator_cls_gen_checks)\r\n all_instance_checks = list(all_instance_gen_checks)\r\n\r\n # all classes checks include check_parameters_default_constructible\r\n assert len(estimator_cls_checks) == len(all_instance_checks) + 1\r\n\r\n # TODO: meta-estimators like GridSearchCV has required parameters\r\n # that do not have default values. This is expected to change in the future\r\n with pytest.raises(SkipTest):\r\n for estimator, check in check_estimator(GridSearchCV,\r\n generate_only=True):\r\n check(estimator)\r\n\r\n\r\n@ignore_warnings(category=(DeprecationWarning, FutureWarning))\r\n# ignore deprecated open(.., 'U') in numpy distutils\r\ndef test_configure():\r\n # Smoke test the 'configure' step of setup, this tests all the\r\n # 'configure' functions in the setup.pys in scikit-learn\r\n # This test requires Cython which is not necessarily there when running\r\n # the tests of an installed version of scikit-learn or when scikit-learn\r\n # is installed in editable mode by pip build isolation enabled.\r\n pytest.importorskip(\"Cython\")\r\n cwd = os.getcwd()\r\n setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))\r\n setup_filename = os.path.join(setup_path, 'setup.py')\r\n if not os.path.exists(setup_filename):\r\n return\r\n try:\r\n os.chdir(setup_path)\r\n old_argv = sys.argv\r\n sys.argv = ['setup.py', 'config']\r\n\r\n with warnings.catch_warnings():\r\n # The configuration spits out warnings when not finding\r\n # Blas/Atlas development headers\r\n warnings.simplefilter('ignore', UserWarning)\r\n with open('setup.py') as f:\r\n exec(f.read(), dict(__name__='__main__'))\r\n finally:\r\n sys.argv = old_argv\r\n os.chdir(cwd)\r\n\r\n\r\ndef _tested_linear_classifiers():\r\n classifiers = all_estimators(type_filter='classifier')\r\n\r\n with warnings.catch_warnings(record=True):\r\n for name, clazz in classifiers:\r\n required_parameters = getattr(clazz, \"_required_parameters\", [])\r\n if len(required_parameters):\r\n # FIXME\r\n continue\r\n\r\n if ('class_weight' in clazz().get_params().keys() and\r\n issubclass(clazz, LinearClassifierMixin)):\r\n yield name, clazz\r\n\r\n\r\[email protected](\"name, Classifier\",\r\n _tested_linear_classifiers())\r\ndef test_class_weight_balanced_linear_classifiers(name, Classifier):\r\n check_class_weight_balanced_linear_classifier(name, Classifier)\r\n\r\n\r\n@ignore_warnings\r\ndef test_import_all_consistency():\r\n # Smoke test to check that any name in a __all__ list is actually defined\r\n # in the namespace of the module or package.\r\n pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',\r\n onerror=lambda _: None)\r\n submods = [modname for _, modname, _ in pkgs]\r\n for modname in submods + ['sklearn']:\r\n if \".tests.\" in modname:\r\n continue\r\n if IS_PYPY and ('_svmlight_format_io' in modname or\r\n 'feature_extraction._hashing_fast' in modname):\r\n continue\r\n package = __import__(modname, fromlist=\"dummy\")\r\n for name in getattr(package, '__all__', ()):\r\n if getattr(package, name, None) is None:\r\n raise AttributeError(\r\n \"Module '{0}' has no attribute '{1}'\".format(\r\n modname, name))\r\n\r\n\r\ndef test_root_import_all_completeness():\r\n EXCEPTIONS = ('utils', 'tests', 'base', 'setup', 'conftest')\r\n for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,\r\n onerror=lambda _: None):\r\n if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:\r\n continue\r\n assert modname in sklearn.__all__\r\n\r\n\r\ndef test_all_tests_are_importable():\r\n # Ensure that for each contentful subpackage, there is a test directory\r\n # within it that is also a subpackage (i.e. a directory with __init__.py)\r\n\r\n HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)\r\n \\.externals(\\.|$)|\r\n \\.tests(\\.|$)|\r\n \\._\r\n ''')\r\n lookup = {name: ispkg\r\n for _, name, ispkg\r\n in pkgutil.walk_packages(sklearn.__path__, prefix='sklearn.')}\r\n missing_tests = [name for name, ispkg in lookup.items()\r\n if ispkg\r\n and not HAS_TESTS_EXCEPTIONS.search(name)\r\n and name + '.tests' not in lookup]\r\n assert missing_tests == [], ('{0} do not have `tests` subpackages. '\r\n 'Perhaps they require '\r\n '__init__.py or an add_subpackage directive '\r\n 'in the parent '\r\n 'setup.py'.format(missing_tests))\r\n",
"import copy\r\nfrom datetime import timedelta\r\nfrom textwrap import dedent\r\nfrom typing import Dict, no_type_check\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import lib\r\nfrom pandas._libs.tslibs import NaT, Period, Timestamp\r\nfrom pandas._libs.tslibs.frequencies import is_subperiod, is_superperiod\r\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\r\nfrom pandas.compat.numpy import function as nv\r\nfrom pandas.errors import AbstractMethodError\r\nfrom pandas.util._decorators import Appender, Substitution\r\n\r\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\r\n\r\nimport pandas.core.algorithms as algos\r\nfrom pandas.core.base import DataError, ShallowMixin\r\nfrom pandas.core.generic import _shared_docs\r\nfrom pandas.core.groupby.base import GroupByMixin\r\nfrom pandas.core.groupby.generic import SeriesGroupBy\r\nfrom pandas.core.groupby.groupby import GroupBy, _GroupBy, _pipe_template, get_groupby\r\nfrom pandas.core.groupby.grouper import Grouper\r\nfrom pandas.core.groupby.ops import BinGrouper\r\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\r\nfrom pandas.core.indexes.period import PeriodIndex, period_range\r\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range\r\n\r\nfrom pandas.tseries.frequencies import to_offset\r\nfrom pandas.tseries.offsets import DateOffset, Day, Nano, Tick\r\n\r\n_shared_docs_kwargs: Dict[str, str] = dict()\r\n\r\n\r\nclass Resampler(_GroupBy, ShallowMixin):\r\n \"\"\"\r\n Class for resampling datetimelike data, a groupby-like operation.\r\n See aggregate, transform, and apply functions on this object.\r\n\r\n It's easiest to use obj.resample(...) to use Resampler.\r\n\r\n Parameters\r\n ----------\r\n obj : pandas object\r\n groupby : a TimeGrouper object\r\n axis : int, default 0\r\n kind : str or None\r\n 'period', 'timestamp' to override default index treatment\r\n\r\n Returns\r\n -------\r\n a Resampler of the appropriate type\r\n\r\n Notes\r\n -----\r\n After resampling, see aggregate, apply, and transform functions.\r\n \"\"\"\r\n\r\n # to the groupby descriptor\r\n _attributes = [\r\n \"freq\",\r\n \"axis\",\r\n \"closed\",\r\n \"label\",\r\n \"convention\",\r\n \"loffset\",\r\n \"base\",\r\n \"kind\",\r\n ]\r\n\r\n def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):\r\n self.groupby = groupby\r\n self.keys = None\r\n self.sort = True\r\n self.axis = axis\r\n self.kind = kind\r\n self.squeeze = False\r\n self.group_keys = True\r\n self.as_index = True\r\n self.exclusions = set()\r\n self.binner = None\r\n self.grouper = None\r\n\r\n if self.groupby is not None:\r\n self.groupby._set_grouper(self._convert_obj(obj), sort=True)\r\n\r\n def __str__(self) -> str:\r\n \"\"\"\r\n Provide a nice str repr of our rolling object.\r\n \"\"\"\r\n attrs = (\r\n f\"{k}={getattr(self.groupby, k)}\"\r\n for k in self._attributes\r\n if getattr(self.groupby, k, None) is not None\r\n )\r\n return f\"{type(self).__name__} [{', '.join(attrs)}]\"\r\n\r\n def __getattr__(self, attr: str):\r\n if attr in self._internal_names_set:\r\n return object.__getattribute__(self, attr)\r\n if attr in self._attributes:\r\n return getattr(self.groupby, attr)\r\n if attr in self.obj:\r\n return self[attr]\r\n\r\n return object.__getattribute__(self, attr)\r\n\r\n def __iter__(self):\r\n \"\"\"\r\n Resampler iterator.\r\n\r\n Returns\r\n -------\r\n Generator yielding sequence of (name, subsetted object)\r\n for each group.\r\n\r\n See Also\r\n --------\r\n GroupBy.__iter__\r\n \"\"\"\r\n self._set_binner()\r\n return super().__iter__()\r\n\r\n @property\r\n def obj(self):\r\n return self.groupby.obj\r\n\r\n @property\r\n def ax(self):\r\n return self.groupby.ax\r\n\r\n @property\r\n def _typ(self) -> str:\r\n \"\"\"\r\n Masquerade for compat as a Series or a DataFrame.\r\n \"\"\"\r\n if isinstance(self._selected_obj, ABCSeries):\r\n return \"series\"\r\n return \"dataframe\"\r\n\r\n @property\r\n def _from_selection(self) -> bool:\r\n \"\"\"\r\n Is the resampling from a DataFrame column or MultiIndex level.\r\n \"\"\"\r\n # upsampling and PeriodIndex resampling do not work\r\n # with selection, this state used to catch and raise an error\r\n return self.groupby is not None and (\r\n self.groupby.key is not None or self.groupby.level is not None\r\n )\r\n\r\n def _convert_obj(self, obj):\r\n \"\"\"\r\n Provide any conversions for the object in order to correctly handle.\r\n\r\n Parameters\r\n ----------\r\n obj : the object to be resampled\r\n\r\n Returns\r\n -------\r\n obj : converted object\r\n \"\"\"\r\n obj = obj._consolidate()\r\n return obj\r\n\r\n def _get_binner_for_time(self):\r\n raise AbstractMethodError(self)\r\n\r\n def _set_binner(self):\r\n \"\"\"\r\n Setup our binners.\r\n\r\n Cache these as we are an immutable object\r\n \"\"\"\r\n if self.binner is None:\r\n self.binner, self.grouper = self._get_binner()\r\n\r\n def _get_binner(self):\r\n \"\"\"\r\n Create the BinGrouper, assume that self.set_grouper(obj)\r\n has already been called.\r\n \"\"\"\r\n\r\n binner, bins, binlabels = self._get_binner_for_time()\r\n assert len(bins) == len(binlabels)\r\n bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)\r\n return binner, bin_grouper\r\n\r\n def _assure_grouper(self):\r\n \"\"\"\r\n Make sure that we are creating our binner & grouper.\r\n \"\"\"\r\n self._set_binner()\r\n\r\n @Substitution(\r\n klass=\"Resampler\",\r\n versionadded=\".. versionadded:: 0.23.0\",\r\n examples=\"\"\"\r\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4]},\r\n ... index=pd.date_range('2012-08-02', periods=4))\r\n >>> df\r\n A\r\n 2012-08-02 1\r\n 2012-08-03 2\r\n 2012-08-04 3\r\n 2012-08-05 4\r\n\r\n To get the difference between each 2-day period's maximum and minimum\r\n value in one pass, you can do\r\n\r\n >>> df.resample('2D').pipe(lambda x: x.max() - x.min())\r\n A\r\n 2012-08-02 1\r\n 2012-08-04 1\"\"\",\r\n )\r\n @Appender(_pipe_template)\r\n def pipe(self, func, *args, **kwargs):\r\n return super().pipe(func, *args, **kwargs)\r\n\r\n _agg_see_also_doc = dedent(\r\n \"\"\"\r\n See Also\r\n --------\r\n DataFrame.groupby.aggregate\r\n DataFrame.resample.transform\r\n DataFrame.aggregate\r\n \"\"\"\r\n )\r\n\r\n _agg_examples_doc = dedent(\r\n \"\"\"\r\n Examples\r\n --------\r\n >>> s = pd.Series([1,2,3,4,5],\r\n index=pd.date_range('20130101', periods=5,freq='s'))\r\n 2013-01-01 00:00:00 1\r\n 2013-01-01 00:00:01 2\r\n 2013-01-01 00:00:02 3\r\n 2013-01-01 00:00:03 4\r\n 2013-01-01 00:00:04 5\r\n Freq: S, dtype: int64\r\n\r\n >>> r = s.resample('2s')\r\n DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,\r\n label=left, convention=start, base=0]\r\n\r\n >>> r.agg(np.sum)\r\n 2013-01-01 00:00:00 3\r\n 2013-01-01 00:00:02 7\r\n 2013-01-01 00:00:04 5\r\n Freq: 2S, dtype: int64\r\n\r\n >>> r.agg(['sum','mean','max'])\r\n sum mean max\r\n 2013-01-01 00:00:00 3 1.5 2\r\n 2013-01-01 00:00:02 7 3.5 4\r\n 2013-01-01 00:00:04 5 5.0 5\r\n\r\n >>> r.agg({'result' : lambda x: x.mean() / x.std(),\r\n 'total' : np.sum})\r\n total result\r\n 2013-01-01 00:00:00 3 2.121320\r\n 2013-01-01 00:00:02 7 4.949747\r\n 2013-01-01 00:00:04 5 NaN\r\n \"\"\"\r\n )\r\n\r\n @Substitution(\r\n see_also=_agg_see_also_doc,\r\n examples=_agg_examples_doc,\r\n versionadded=\"\",\r\n klass=\"DataFrame\",\r\n axis=\"\",\r\n )\r\n @Appender(_shared_docs[\"aggregate\"])\r\n def aggregate(self, func, *args, **kwargs):\r\n\r\n self._set_binner()\r\n result, how = self._aggregate(func, *args, **kwargs)\r\n if result is None:\r\n how = func\r\n grouper = None\r\n result = self._groupby_and_aggregate(how, grouper, *args, **kwargs)\r\n\r\n result = self._apply_loffset(result)\r\n return result\r\n\r\n agg = aggregate\r\n apply = aggregate\r\n\r\n def transform(self, arg, *args, **kwargs):\r\n \"\"\"\r\n Call function producing a like-indexed Series on each group and return\r\n a Series with the transformed values.\r\n\r\n Parameters\r\n ----------\r\n arg : function\r\n To apply to each group. Should return a Series with the same index.\r\n\r\n Returns\r\n -------\r\n transformed : Series\r\n\r\n Examples\r\n --------\r\n >>> resampled.transform(lambda x: (x - x.mean()) / x.std())\r\n \"\"\"\r\n return self._selected_obj.groupby(self.groupby).transform(arg, *args, **kwargs)\r\n\r\n def _downsample(self, f):\r\n raise AbstractMethodError(self)\r\n\r\n def _upsample(self, f, limit=None, fill_value=None):\r\n raise AbstractMethodError(self)\r\n\r\n def _gotitem(self, key, ndim: int, subset=None):\r\n \"\"\"\r\n Sub-classes to define. Return a sliced object.\r\n\r\n Parameters\r\n ----------\r\n key : string / list of selections\r\n ndim : 1,2\r\n requested ndim of result\r\n subset : object, default None\r\n subset to act on\r\n \"\"\"\r\n self._set_binner()\r\n grouper = self.grouper\r\n if subset is None:\r\n subset = self.obj\r\n grouped = get_groupby(subset, by=None, grouper=grouper, axis=self.axis)\r\n\r\n # try the key selection\r\n try:\r\n return grouped[key]\r\n except KeyError:\r\n return grouped\r\n\r\n def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):\r\n \"\"\"\r\n Re-evaluate the obj with a groupby aggregation.\r\n \"\"\"\r\n\r\n if grouper is None:\r\n self._set_binner()\r\n grouper = self.grouper\r\n\r\n obj = self._selected_obj\r\n\r\n grouped = get_groupby(obj, by=None, grouper=grouper, axis=self.axis)\r\n\r\n try:\r\n if isinstance(obj, ABCDataFrame) and callable(how):\r\n # Check if the function is reducing or not.\r\n result = grouped._aggregate_item_by_item(how, *args, **kwargs)\r\n else:\r\n result = grouped.aggregate(how, *args, **kwargs)\r\n except DataError:\r\n # we have a non-reducing function; try to evaluate\r\n result = grouped.apply(how, *args, **kwargs)\r\n except ValueError as err:\r\n if \"Must produce aggregated value\" in str(err):\r\n # raised in _aggregate_named\r\n pass\r\n elif \"len(index) != len(labels)\" in str(err):\r\n # raised in libgroupby validation\r\n pass\r\n elif \"No objects to concatenate\" in str(err):\r\n # raised in concat call\r\n # In tests this is reached via either\r\n # _apply_to_column_groupbys (ohlc) or DataFrameGroupBy.nunique\r\n pass\r\n else:\r\n raise\r\n\r\n # we have a non-reducing function\r\n # try to evaluate\r\n result = grouped.apply(how, *args, **kwargs)\r\n\r\n result = self._apply_loffset(result)\r\n return self._wrap_result(result)\r\n\r\n def _apply_loffset(self, result):\r\n \"\"\"\r\n If loffset is set, offset the result index.\r\n\r\n This is NOT an idempotent routine, it will be applied\r\n exactly once to the result.\r\n\r\n Parameters\r\n ----------\r\n result : Series or DataFrame\r\n the result of resample\r\n \"\"\"\r\n\r\n needs_offset = (\r\n isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64))\r\n and isinstance(result.index, DatetimeIndex)\r\n and len(result.index) > 0\r\n )\r\n\r\n if needs_offset:\r\n result.index = result.index + self.loffset\r\n\r\n self.loffset = None\r\n return result\r\n\r\n def _get_resampler_for_grouping(self, groupby, **kwargs):\r\n \"\"\"\r\n Return the correct class for resampling with groupby.\r\n \"\"\"\r\n return self._resampler_for_grouping(self, groupby=groupby, **kwargs)\r\n\r\n def _wrap_result(self, result):\r\n \"\"\"\r\n Potentially wrap any results.\r\n \"\"\"\r\n if isinstance(result, ABCSeries) and self._selection is not None:\r\n result.name = self._selection\r\n\r\n if isinstance(result, ABCSeries) and result.empty:\r\n obj = self.obj\r\n if isinstance(obj.index, PeriodIndex):\r\n result.index = obj.index.asfreq(self.freq)\r\n else:\r\n result.index = obj.index._shallow_copy(freq=self.freq)\r\n result.name = getattr(obj, \"name\", None)\r\n\r\n return result\r\n\r\n def pad(self, limit=None):\r\n \"\"\"\r\n Forward fill the values.\r\n\r\n Parameters\r\n ----------\r\n limit : int, optional\r\n Limit of how many values to fill.\r\n\r\n Returns\r\n -------\r\n An upsampled Series.\r\n\r\n See Also\r\n --------\r\n Series.fillna\r\n DataFrame.fillna\r\n \"\"\"\r\n return self._upsample(\"pad\", limit=limit)\r\n\r\n ffill = pad\r\n\r\n def nearest(self, limit=None):\r\n \"\"\"\r\n Resample by using the nearest value.\r\n\r\n When resampling data, missing values may appear (e.g., when the\r\n resampling frequency is higher than the original frequency).\r\n The `nearest` method will replace ``NaN`` values that appeared in\r\n the resampled data with the value from the nearest member of the\r\n sequence, based on the index value.\r\n Missing values that existed in the original data will not be modified.\r\n If `limit` is given, fill only this many values in each direction for\r\n each of the original values.\r\n\r\n Parameters\r\n ----------\r\n limit : int, optional\r\n Limit of how many values to fill.\r\n\r\n .. versionadded:: 0.21.0\r\n\r\n Returns\r\n -------\r\n Series or DataFrame\r\n An upsampled Series or DataFrame with ``NaN`` values filled with\r\n their nearest value.\r\n\r\n See Also\r\n --------\r\n backfill : Backward fill the new missing values in the resampled data.\r\n pad : Forward fill ``NaN`` values.\r\n\r\n Examples\r\n --------\r\n >>> s = pd.Series([1, 2],\r\n ... index=pd.date_range('20180101',\r\n ... periods=2,\r\n ... freq='1h'))\r\n >>> s\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 01:00:00 2\r\n Freq: H, dtype: int64\r\n\r\n >>> s.resample('15min').nearest()\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 00:15:00 1\r\n 2018-01-01 00:30:00 2\r\n 2018-01-01 00:45:00 2\r\n 2018-01-01 01:00:00 2\r\n Freq: 15T, dtype: int64\r\n\r\n Limit the number of upsampled values imputed by the nearest:\r\n\r\n >>> s.resample('15min').nearest(limit=1)\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:15:00 1.0\r\n 2018-01-01 00:30:00 NaN\r\n 2018-01-01 00:45:00 2.0\r\n 2018-01-01 01:00:00 2.0\r\n Freq: 15T, dtype: float64\r\n \"\"\"\r\n return self._upsample(\"nearest\", limit=limit)\r\n\r\n def backfill(self, limit=None):\r\n \"\"\"\r\n Backward fill the new missing values in the resampled data.\r\n\r\n In statistics, imputation is the process of replacing missing data with\r\n substituted values [1]_. When resampling data, missing values may\r\n appear (e.g., when the resampling frequency is higher than the original\r\n frequency). The backward fill will replace NaN values that appeared in\r\n the resampled data with the next value in the original sequence.\r\n Missing values that existed in the original data will not be modified.\r\n\r\n Parameters\r\n ----------\r\n limit : int, optional\r\n Limit of how many values to fill.\r\n\r\n Returns\r\n -------\r\n Series, DataFrame\r\n An upsampled Series or DataFrame with backward filled NaN values.\r\n\r\n See Also\r\n --------\r\n bfill : Alias of backfill.\r\n fillna : Fill NaN values using the specified method, which can be\r\n 'backfill'.\r\n nearest : Fill NaN values with nearest neighbor starting from center.\r\n pad : Forward fill NaN values.\r\n Series.fillna : Fill NaN values in the Series using the\r\n specified method, which can be 'backfill'.\r\n DataFrame.fillna : Fill NaN values in the DataFrame using the\r\n specified method, which can be 'backfill'.\r\n\r\n References\r\n ----------\r\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\r\n\r\n Examples\r\n --------\r\n\r\n Resampling a Series:\r\n\r\n >>> s = pd.Series([1, 2, 3],\r\n ... index=pd.date_range('20180101', periods=3, freq='h'))\r\n >>> s\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 02:00:00 3\r\n Freq: H, dtype: int64\r\n\r\n >>> s.resample('30min').backfill()\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 00:30:00 2\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 01:30:00 3\r\n 2018-01-01 02:00:00 3\r\n Freq: 30T, dtype: int64\r\n\r\n >>> s.resample('15min').backfill(limit=2)\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:15:00 NaN\r\n 2018-01-01 00:30:00 2.0\r\n 2018-01-01 00:45:00 2.0\r\n 2018-01-01 01:00:00 2.0\r\n 2018-01-01 01:15:00 NaN\r\n 2018-01-01 01:30:00 3.0\r\n 2018-01-01 01:45:00 3.0\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 15T, dtype: float64\r\n\r\n Resampling a DataFrame that has missing values:\r\n\r\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\r\n ... index=pd.date_range('20180101', periods=3,\r\n ... freq='h'))\r\n >>> df\r\n a b\r\n 2018-01-01 00:00:00 2.0 1\r\n 2018-01-01 01:00:00 NaN 3\r\n 2018-01-01 02:00:00 6.0 5\r\n\r\n >>> df.resample('30min').backfill()\r\n a b\r\n 2018-01-01 00:00:00 2.0 1\r\n 2018-01-01 00:30:00 NaN 3\r\n 2018-01-01 01:00:00 NaN 3\r\n 2018-01-01 01:30:00 6.0 5\r\n 2018-01-01 02:00:00 6.0 5\r\n\r\n >>> df.resample('15min').backfill(limit=2)\r\n a b\r\n 2018-01-01 00:00:00 2.0 1.0\r\n 2018-01-01 00:15:00 NaN NaN\r\n 2018-01-01 00:30:00 NaN 3.0\r\n 2018-01-01 00:45:00 NaN 3.0\r\n 2018-01-01 01:00:00 NaN 3.0\r\n 2018-01-01 01:15:00 NaN NaN\r\n 2018-01-01 01:30:00 6.0 5.0\r\n 2018-01-01 01:45:00 6.0 5.0\r\n 2018-01-01 02:00:00 6.0 5.0\r\n \"\"\"\r\n return self._upsample(\"backfill\", limit=limit)\r\n\r\n bfill = backfill\r\n\r\n def fillna(self, method, limit=None):\r\n \"\"\"\r\n Fill missing values introduced by upsampling.\r\n\r\n In statistics, imputation is the process of replacing missing data with\r\n substituted values [1]_. When resampling data, missing values may\r\n appear (e.g., when the resampling frequency is higher than the original\r\n frequency).\r\n\r\n Missing values that existed in the original data will\r\n not be modified.\r\n\r\n Parameters\r\n ----------\r\n method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}\r\n Method to use for filling holes in resampled data\r\n\r\n * 'pad' or 'ffill': use previous valid observation to fill gap\r\n (forward fill).\r\n * 'backfill' or 'bfill': use next valid observation to fill gap.\r\n * 'nearest': use nearest valid observation to fill gap.\r\n\r\n limit : int, optional\r\n Limit of how many consecutive missing values to fill.\r\n\r\n Returns\r\n -------\r\n Series or DataFrame\r\n An upsampled Series or DataFrame with missing values filled.\r\n\r\n See Also\r\n --------\r\n backfill : Backward fill NaN values in the resampled data.\r\n pad : Forward fill NaN values in the resampled data.\r\n nearest : Fill NaN values in the resampled data\r\n with nearest neighbor starting from center.\r\n interpolate : Fill NaN values using interpolation.\r\n Series.fillna : Fill NaN values in the Series using the\r\n specified method, which can be 'bfill' and 'ffill'.\r\n DataFrame.fillna : Fill NaN values in the DataFrame using the\r\n specified method, which can be 'bfill' and 'ffill'.\r\n\r\n References\r\n ----------\r\n .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)\r\n\r\n Examples\r\n --------\r\n Resampling a Series:\r\n\r\n >>> s = pd.Series([1, 2, 3],\r\n ... index=pd.date_range('20180101', periods=3, freq='h'))\r\n >>> s\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 02:00:00 3\r\n Freq: H, dtype: int64\r\n\r\n Without filling the missing values you get:\r\n\r\n >>> s.resample(\"30min\").asfreq()\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:30:00 NaN\r\n 2018-01-01 01:00:00 2.0\r\n 2018-01-01 01:30:00 NaN\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 30T, dtype: float64\r\n\r\n >>> s.resample('30min').fillna(\"backfill\")\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 00:30:00 2\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 01:30:00 3\r\n 2018-01-01 02:00:00 3\r\n Freq: 30T, dtype: int64\r\n\r\n >>> s.resample('15min').fillna(\"backfill\", limit=2)\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:15:00 NaN\r\n 2018-01-01 00:30:00 2.0\r\n 2018-01-01 00:45:00 2.0\r\n 2018-01-01 01:00:00 2.0\r\n 2018-01-01 01:15:00 NaN\r\n 2018-01-01 01:30:00 3.0\r\n 2018-01-01 01:45:00 3.0\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 15T, dtype: float64\r\n\r\n >>> s.resample('30min').fillna(\"pad\")\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 00:30:00 1\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 01:30:00 2\r\n 2018-01-01 02:00:00 3\r\n Freq: 30T, dtype: int64\r\n\r\n >>> s.resample('30min').fillna(\"nearest\")\r\n 2018-01-01 00:00:00 1\r\n 2018-01-01 00:30:00 2\r\n 2018-01-01 01:00:00 2\r\n 2018-01-01 01:30:00 3\r\n 2018-01-01 02:00:00 3\r\n Freq: 30T, dtype: int64\r\n\r\n Missing values present before the upsampling are not affected.\r\n\r\n >>> sm = pd.Series([1, None, 3],\r\n ... index=pd.date_range('20180101', periods=3, freq='h'))\r\n >>> sm\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 01:00:00 NaN\r\n 2018-01-01 02:00:00 3.0\r\n Freq: H, dtype: float64\r\n\r\n >>> sm.resample('30min').fillna('backfill')\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:30:00 NaN\r\n 2018-01-01 01:00:00 NaN\r\n 2018-01-01 01:30:00 3.0\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 30T, dtype: float64\r\n\r\n >>> sm.resample('30min').fillna('pad')\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:30:00 1.0\r\n 2018-01-01 01:00:00 NaN\r\n 2018-01-01 01:30:00 NaN\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 30T, dtype: float64\r\n\r\n >>> sm.resample('30min').fillna('nearest')\r\n 2018-01-01 00:00:00 1.0\r\n 2018-01-01 00:30:00 NaN\r\n 2018-01-01 01:00:00 NaN\r\n 2018-01-01 01:30:00 3.0\r\n 2018-01-01 02:00:00 3.0\r\n Freq: 30T, dtype: float64\r\n\r\n DataFrame resampling is done column-wise. All the same options are\r\n available.\r\n\r\n >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},\r\n ... index=pd.date_range('20180101', periods=3,\r\n ... freq='h'))\r\n >>> df\r\n a b\r\n 2018-01-01 00:00:00 2.0 1\r\n 2018-01-01 01:00:00 NaN 3\r\n 2018-01-01 02:00:00 6.0 5\r\n\r\n >>> df.resample('30min').fillna(\"bfill\")\r\n a b\r\n 2018-01-01 00:00:00 2.0 1\r\n 2018-01-01 00:30:00 NaN 3\r\n 2018-01-01 01:00:00 NaN 3\r\n 2018-01-01 01:30:00 6.0 5\r\n 2018-01-01 02:00:00 6.0 5\r\n \"\"\"\r\n return self._upsample(method, limit=limit)\r\n\r\n @Appender(_shared_docs[\"interpolate\"] % _shared_docs_kwargs)\r\n def interpolate(\r\n self,\r\n method=\"linear\",\r\n axis=0,\r\n limit=None,\r\n inplace=False,\r\n limit_direction=\"forward\",\r\n limit_area=None,\r\n downcast=None,\r\n **kwargs,\r\n ):\r\n \"\"\"\r\n Interpolate values according to different methods.\r\n \"\"\"\r\n result = self._upsample(None)\r\n return result.interpolate(\r\n method=method,\r\n axis=axis,\r\n limit=limit,\r\n inplace=inplace,\r\n limit_direction=limit_direction,\r\n limit_area=limit_area,\r\n downcast=downcast,\r\n **kwargs,\r\n )\r\n\r\n def asfreq(self, fill_value=None):\r\n \"\"\"\r\n Return the values at the new freq, essentially a reindex.\r\n\r\n Parameters\r\n ----------\r\n fill_value : scalar, optional\r\n Value to use for missing values, applied during upsampling (note\r\n this does not fill NaNs that already were present).\r\n\r\n Returns\r\n -------\r\n DataFrame or Series\r\n Values at the specified freq.\r\n\r\n See Also\r\n --------\r\n Series.asfreq\r\n DataFrame.asfreq\r\n \"\"\"\r\n return self._upsample(\"asfreq\", fill_value=fill_value)\r\n\r\n def std(self, ddof=1, *args, **kwargs):\r\n \"\"\"\r\n Compute standard deviation of groups, excluding missing values.\r\n\r\n Parameters\r\n ----------\r\n ddof : int, default 1\r\n Degrees of freedom.\r\n\r\n Returns\r\n -------\r\n DataFrame or Series\r\n Standard deviation of values within each group.\r\n \"\"\"\r\n nv.validate_resampler_func(\"std\", args, kwargs)\r\n return self._downsample(\"std\", ddof=ddof)\r\n\r\n def var(self, ddof=1, *args, **kwargs):\r\n \"\"\"\r\n Compute variance of groups, excluding missing values.\r\n\r\n Parameters\r\n ----------\r\n ddof : int, default 1\r\n Degrees of freedom.\r\n\r\n Returns\r\n -------\r\n DataFrame or Series\r\n Variance of values within each group.\r\n \"\"\"\r\n nv.validate_resampler_func(\"var\", args, kwargs)\r\n return self._downsample(\"var\", ddof=ddof)\r\n\r\n @Appender(GroupBy.size.__doc__)\r\n def size(self):\r\n result = self._downsample(\"size\")\r\n if not len(self.ax):\r\n from pandas import Series\r\n\r\n if self._selected_obj.ndim == 1:\r\n name = self._selected_obj.name\r\n else:\r\n name = None\r\n result = Series([], index=result.index, dtype=\"int64\", name=name)\r\n return result\r\n\r\n @Appender(GroupBy.count.__doc__)\r\n def count(self):\r\n result = self._downsample(\"count\")\r\n if not len(self.ax):\r\n if self._selected_obj.ndim == 1:\r\n result = type(self._selected_obj)(\r\n [], index=result.index, dtype=\"int64\", name=self._selected_obj.name\r\n )\r\n else:\r\n from pandas import DataFrame\r\n\r\n result = DataFrame(\r\n [], index=result.index, columns=result.columns, dtype=\"int64\"\r\n )\r\n\r\n return result\r\n\r\n def quantile(self, q=0.5, **kwargs):\r\n \"\"\"\r\n Return value at the given quantile.\r\n\r\n .. versionadded:: 0.24.0\r\n\r\n Parameters\r\n ----------\r\n q : float or array-like, default 0.5 (50% quantile)\r\n\r\n Returns\r\n -------\r\n DataFrame or Series\r\n Quantile of values within each group.\r\n\r\n See Also\r\n --------\r\n Series.quantile\r\n DataFrame.quantile\r\n DataFrameGroupBy.quantile\r\n \"\"\"\r\n return self._downsample(\"quantile\", q=q, **kwargs)\r\n\r\n\r\n# downsample methods\r\nfor method in [\"sum\", \"prod\"]:\r\n\r\n def f(self, _method=method, min_count=0, *args, **kwargs):\r\n nv.validate_resampler_func(_method, args, kwargs)\r\n return self._downsample(_method, min_count=min_count)\r\n\r\n f.__doc__ = getattr(GroupBy, method).__doc__\r\n setattr(Resampler, method, f)\r\n\r\n\r\n# downsample methods\r\nfor method in [\"min\", \"max\", \"first\", \"last\", \"mean\", \"sem\", \"median\", \"ohlc\"]:\r\n\r\n def g(self, _method=method, *args, **kwargs):\r\n nv.validate_resampler_func(_method, args, kwargs)\r\n return self._downsample(_method)\r\n\r\n g.__doc__ = getattr(GroupBy, method).__doc__\r\n setattr(Resampler, method, g)\r\n\r\n\r\n# series only methods\r\nfor method in [\"nunique\"]:\r\n\r\n def h(self, _method=method):\r\n return self._downsample(_method)\r\n\r\n h.__doc__ = getattr(SeriesGroupBy, method).__doc__\r\n setattr(Resampler, method, h)\r\n\r\n\r\nclass _GroupByMixin(GroupByMixin):\r\n \"\"\"\r\n Provide the groupby facilities.\r\n \"\"\"\r\n\r\n def __init__(self, obj, *args, **kwargs):\r\n\r\n parent = kwargs.pop(\"parent\", None)\r\n groupby = kwargs.pop(\"groupby\", None)\r\n if parent is None:\r\n parent = obj\r\n\r\n # initialize our GroupByMixin object with\r\n # the resampler attributes\r\n for attr in self._attributes:\r\n setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))\r\n\r\n super().__init__(None)\r\n self._groupby = groupby\r\n self._groupby.mutated = True\r\n self._groupby.grouper.mutated = True\r\n self.groupby = copy.copy(parent.groupby)\r\n\r\n @no_type_check\r\n def _apply(self, f, grouper=None, *args, **kwargs):\r\n \"\"\"\r\n Dispatch to _upsample; we are stripping all of the _upsample kwargs and\r\n performing the original function call on the grouped object.\r\n \"\"\"\r\n\r\n def func(x):\r\n x = self._shallow_copy(x, groupby=self.groupby)\r\n\r\n if isinstance(f, str):\r\n return getattr(x, f)(**kwargs)\r\n\r\n return x.apply(f, *args, **kwargs)\r\n\r\n result = self._groupby.apply(func)\r\n return self._wrap_result(result)\r\n\r\n _upsample = _apply\r\n _downsample = _apply\r\n _groupby_and_aggregate = _apply\r\n\r\n\r\nclass DatetimeIndexResampler(Resampler):\r\n @property\r\n def _resampler_for_grouping(self):\r\n return DatetimeIndexResamplerGroupby\r\n\r\n def _get_binner_for_time(self):\r\n\r\n # this is how we are actually creating the bins\r\n if self.kind == \"period\":\r\n return self.groupby._get_time_period_bins(self.ax)\r\n return self.groupby._get_time_bins(self.ax)\r\n\r\n def _downsample(self, how, **kwargs):\r\n \"\"\"\r\n Downsample the cython defined function.\r\n\r\n Parameters\r\n ----------\r\n how : string / cython mapped function\r\n **kwargs : kw args passed to how function\r\n \"\"\"\r\n self._set_binner()\r\n how = self._get_cython_func(how) or how\r\n ax = self.ax\r\n obj = self._selected_obj\r\n\r\n if not len(ax):\r\n # reset to the new freq\r\n obj = obj.copy()\r\n obj.index._set_freq(self.freq)\r\n return obj\r\n\r\n # do we have a regular frequency\r\n if ax.freq is not None or ax.inferred_freq is not None:\r\n\r\n if len(self.grouper.binlabels) > len(ax) and how is None:\r\n\r\n # let's do an asfreq\r\n return self.asfreq()\r\n\r\n # we are downsampling\r\n # we want to call the actual grouper method here\r\n result = obj.groupby(self.grouper, axis=self.axis).aggregate(how, **kwargs)\r\n\r\n result = self._apply_loffset(result)\r\n return self._wrap_result(result)\r\n\r\n def _adjust_binner_for_upsample(self, binner):\r\n \"\"\"\r\n Adjust our binner when upsampling.\r\n\r\n The range of a new index should not be outside specified range\r\n \"\"\"\r\n if self.closed == \"right\":\r\n binner = binner[1:]\r\n else:\r\n binner = binner[:-1]\r\n return binner\r\n\r\n def _upsample(self, method, limit=None, fill_value=None):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n method : string {'backfill', 'bfill', 'pad',\r\n 'ffill', 'asfreq'} method for upsampling\r\n limit : int, default None\r\n Maximum size gap to fill when reindexing\r\n fill_value : scalar, default None\r\n Value to use for missing values\r\n\r\n See Also\r\n --------\r\n .fillna\r\n\r\n \"\"\"\r\n self._set_binner()\r\n if self.axis:\r\n raise AssertionError(\"axis must be 0\")\r\n if self._from_selection:\r\n raise ValueError(\r\n \"Upsampling from level= or on= selection \"\r\n \"is not supported, use .set_index(...) \"\r\n \"to explicitly set index to datetime-like\"\r\n )\r\n\r\n ax = self.ax\r\n obj = self._selected_obj\r\n binner = self.binner\r\n res_index = self._adjust_binner_for_upsample(binner)\r\n\r\n # if we have the same frequency as our axis, then we are equal sampling\r\n if limit is None and to_offset(ax.inferred_freq) == self.freq:\r\n result = obj.copy()\r\n result.index = res_index\r\n else:\r\n result = obj.reindex(\r\n res_index, method=method, limit=limit, fill_value=fill_value\r\n )\r\n\r\n result = self._apply_loffset(result)\r\n return self._wrap_result(result)\r\n\r\n def _wrap_result(self, result):\r\n result = super()._wrap_result(result)\r\n\r\n # we may have a different kind that we were asked originally\r\n # convert if needed\r\n if self.kind == \"period\" and not isinstance(result.index, PeriodIndex):\r\n result.index = result.index.to_period(self.freq)\r\n return result\r\n\r\n\r\nclass DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):\r\n \"\"\"\r\n Provides a resample of a groupby implementation\r\n \"\"\"\r\n\r\n @property\r\n def _constructor(self):\r\n return DatetimeIndexResampler\r\n\r\n\r\nclass PeriodIndexResampler(DatetimeIndexResampler):\r\n @property\r\n def _resampler_for_grouping(self):\r\n return PeriodIndexResamplerGroupby\r\n\r\n def _get_binner_for_time(self):\r\n if self.kind == \"timestamp\":\r\n return super()._get_binner_for_time()\r\n return self.groupby._get_period_bins(self.ax)\r\n\r\n def _convert_obj(self, obj):\r\n obj = super()._convert_obj(obj)\r\n\r\n if self._from_selection:\r\n # see GH 14008, GH 12871\r\n msg = (\r\n \"Resampling from level= or on= selection \"\r\n \"with a PeriodIndex is not currently supported, \"\r\n \"use .set_index(...) to explicitly set index\"\r\n )\r\n raise NotImplementedError(msg)\r\n\r\n if self.loffset is not None:\r\n # Cannot apply loffset/timedelta to PeriodIndex -> convert to\r\n # timestamps\r\n self.kind = \"timestamp\"\r\n\r\n # convert to timestamp\r\n if self.kind == \"timestamp\":\r\n obj = obj.to_timestamp(how=self.convention)\r\n\r\n return obj\r\n\r\n def _downsample(self, how, **kwargs):\r\n \"\"\"\r\n Downsample the cython defined function.\r\n\r\n Parameters\r\n ----------\r\n how : string / cython mapped function\r\n **kwargs : kw args passed to how function\r\n \"\"\"\r\n\r\n # we may need to actually resample as if we are timestamps\r\n if self.kind == \"timestamp\":\r\n return super()._downsample(how, **kwargs)\r\n\r\n how = self._get_cython_func(how) or how\r\n ax = self.ax\r\n\r\n if is_subperiod(ax.freq, self.freq):\r\n # Downsampling\r\n return self._groupby_and_aggregate(how, grouper=self.grouper, **kwargs)\r\n elif is_superperiod(ax.freq, self.freq):\r\n if how == \"ohlc\":\r\n # GH #13083\r\n # upsampling to subperiods is handled as an asfreq, which works\r\n # for pure aggregating/reducing methods\r\n # OHLC reduces along the time dimension, but creates multiple\r\n # values for each period -> handle by _groupby_and_aggregate()\r\n return self._groupby_and_aggregate(how, grouper=self.grouper)\r\n return self.asfreq()\r\n elif ax.freq == self.freq:\r\n return self.asfreq()\r\n\r\n raise IncompatibleFrequency(\r\n f\"Frequency {ax.freq} cannot be resampled to {self.freq}, \"\r\n \"as they are not sub or super periods\"\r\n )\r\n\r\n def _upsample(self, method, limit=None, fill_value=None):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n method : string {'backfill', 'bfill', 'pad', 'ffill'}\r\n Method for upsampling.\r\n limit : int, default None\r\n Maximum size gap to fill when reindexing.\r\n fill_value : scalar, default None\r\n Value to use for missing values.\r\n\r\n See Also\r\n --------\r\n .fillna\r\n\r\n \"\"\"\r\n\r\n # we may need to actually resample as if we are timestamps\r\n if self.kind == \"timestamp\":\r\n return super()._upsample(method, limit=limit, fill_value=fill_value)\r\n\r\n self._set_binner()\r\n ax = self.ax\r\n obj = self.obj\r\n new_index = self.binner\r\n\r\n # Start vs. end of period\r\n memb = ax.asfreq(self.freq, how=self.convention)\r\n\r\n # Get the fill indexer\r\n indexer = memb.get_indexer(new_index, method=method, limit=limit)\r\n return self._wrap_result(\r\n _take_new_index(obj, indexer, new_index, axis=self.axis)\r\n )\r\n\r\n\r\nclass PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):\r\n \"\"\"\r\n Provides a resample of a groupby implementation.\r\n \"\"\"\r\n\r\n @property\r\n def _constructor(self):\r\n return PeriodIndexResampler\r\n\r\n\r\nclass TimedeltaIndexResampler(DatetimeIndexResampler):\r\n @property\r\n def _resampler_for_grouping(self):\r\n return TimedeltaIndexResamplerGroupby\r\n\r\n def _get_binner_for_time(self):\r\n return self.groupby._get_time_delta_bins(self.ax)\r\n\r\n def _adjust_binner_for_upsample(self, binner):\r\n \"\"\"\r\n Adjust our binner when upsampling.\r\n\r\n The range of a new index is allowed to be greater than original range\r\n so we don't need to change the length of a binner, GH 13022\r\n \"\"\"\r\n return binner\r\n\r\n\r\nclass TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):\r\n \"\"\"\r\n Provides a resample of a groupby implementation.\r\n \"\"\"\r\n\r\n @property\r\n def _constructor(self):\r\n return TimedeltaIndexResampler\r\n\r\n\r\ndef resample(obj, kind=None, **kwds):\r\n \"\"\"\r\n Create a TimeGrouper and return our resampler.\r\n \"\"\"\r\n tg = TimeGrouper(**kwds)\r\n return tg._get_resampler(obj, kind=kind)\r\n\r\n\r\nresample.__doc__ = Resampler.__doc__\r\n\r\n\r\ndef get_resampler_for_grouping(\r\n groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs\r\n):\r\n \"\"\"\r\n Return our appropriate resampler when grouping as well.\r\n \"\"\"\r\n\r\n # .resample uses 'on' similar to how .groupby uses 'key'\r\n kwargs[\"key\"] = kwargs.pop(\"on\", None)\r\n\r\n tg = TimeGrouper(freq=rule, **kwargs)\r\n resampler = tg._get_resampler(groupby.obj, kind=kind)\r\n return resampler._get_resampler_for_grouping(groupby=groupby)\r\n\r\n\r\nclass TimeGrouper(Grouper):\r\n \"\"\"\r\n Custom groupby class for time-interval grouping.\r\n\r\n Parameters\r\n ----------\r\n freq : pandas date offset or offset alias for identifying bin edges\r\n closed : closed end of interval; 'left' or 'right'\r\n label : interval boundary to use for labeling; 'left' or 'right'\r\n convention : {'start', 'end', 'e', 's'}\r\n If axis is PeriodIndex\r\n \"\"\"\r\n\r\n _attributes = Grouper._attributes + (\r\n \"closed\",\r\n \"label\",\r\n \"how\",\r\n \"loffset\",\r\n \"kind\",\r\n \"convention\",\r\n \"base\",\r\n )\r\n\r\n def __init__(\r\n self,\r\n freq=\"Min\",\r\n closed=None,\r\n label=None,\r\n how=\"mean\",\r\n axis=0,\r\n fill_method=None,\r\n limit=None,\r\n loffset=None,\r\n kind=None,\r\n convention=None,\r\n base=0,\r\n **kwargs,\r\n ):\r\n # Check for correctness of the keyword arguments which would\r\n # otherwise silently use the default if misspelled\r\n if label not in {None, \"left\", \"right\"}:\r\n raise ValueError(f\"Unsupported value {label} for `label`\")\r\n if closed not in {None, \"left\", \"right\"}:\r\n raise ValueError(f\"Unsupported value {closed} for `closed`\")\r\n if convention not in {None, \"start\", \"end\", \"e\", \"s\"}:\r\n raise ValueError(f\"Unsupported value {convention} for `convention`\")\r\n\r\n freq = to_offset(freq)\r\n\r\n end_types = {\"M\", \"A\", \"Q\", \"BM\", \"BA\", \"BQ\", \"W\"}\r\n rule = freq.rule_code\r\n if rule in end_types or (\"-\" in rule and rule[: rule.find(\"-\")] in end_types):\r\n if closed is None:\r\n closed = \"right\"\r\n if label is None:\r\n label = \"right\"\r\n else:\r\n if closed is None:\r\n closed = \"left\"\r\n if label is None:\r\n label = \"left\"\r\n\r\n self.closed = closed\r\n self.label = label\r\n self.kind = kind\r\n\r\n self.convention = convention or \"E\"\r\n self.convention = self.convention.lower()\r\n\r\n if isinstance(loffset, str):\r\n loffset = to_offset(loffset)\r\n self.loffset = loffset\r\n\r\n self.how = how\r\n self.fill_method = fill_method\r\n self.limit = limit\r\n self.base = base\r\n\r\n # always sort time groupers\r\n kwargs[\"sort\"] = True\r\n\r\n super().__init__(freq=freq, axis=axis, **kwargs)\r\n\r\n def _get_resampler(self, obj, kind=None):\r\n \"\"\"\r\n Return my resampler or raise if we have an invalid axis.\r\n\r\n Parameters\r\n ----------\r\n obj : input object\r\n kind : string, optional\r\n 'period','timestamp','timedelta' are valid\r\n\r\n Returns\r\n -------\r\n a Resampler\r\n\r\n Raises\r\n ------\r\n TypeError if incompatible axis\r\n\r\n \"\"\"\r\n self._set_grouper(obj)\r\n\r\n ax = self.ax\r\n if isinstance(ax, DatetimeIndex):\r\n return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)\r\n elif isinstance(ax, PeriodIndex) or kind == \"period\":\r\n return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)\r\n elif isinstance(ax, TimedeltaIndex):\r\n return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis)\r\n\r\n raise TypeError(\r\n \"Only valid with DatetimeIndex, \"\r\n \"TimedeltaIndex or PeriodIndex, \"\r\n f\"but got an instance of '{type(ax).__name__}'\"\r\n )\r\n\r\n def _get_grouper(self, obj, validate: bool = True):\r\n # create the resampler and return our binner\r\n r = self._get_resampler(obj)\r\n r._set_binner()\r\n return r.binner, r.grouper, r.obj\r\n\r\n def _get_time_bins(self, ax):\r\n if not isinstance(ax, DatetimeIndex):\r\n raise TypeError(\r\n \"axis must be a DatetimeIndex, but got \"\r\n f\"an instance of {type(ax).__name__}\"\r\n )\r\n\r\n if len(ax) == 0:\r\n binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name)\r\n return binner, [], labels\r\n\r\n first, last = _get_timestamp_range_edges(\r\n ax.min(), ax.max(), self.freq, closed=self.closed, base=self.base\r\n )\r\n # GH #12037\r\n # use first/last directly instead of call replace() on them\r\n # because replace() will swallow the nanosecond part\r\n # thus last bin maybe slightly before the end if the end contains\r\n # nanosecond part and lead to `Values falls after last bin` error\r\n binner = labels = date_range(\r\n freq=self.freq,\r\n start=first,\r\n end=last,\r\n tz=ax.tz,\r\n name=ax.name,\r\n ambiguous=\"infer\",\r\n nonexistent=\"shift_forward\",\r\n )\r\n\r\n ax_values = ax.asi8\r\n binner, bin_edges = self._adjust_bin_edges(binner, ax_values)\r\n\r\n # general version, knowing nothing about relative frequencies\r\n bins = lib.generate_bins_dt64(\r\n ax_values, bin_edges, self.closed, hasnans=ax.hasnans\r\n )\r\n\r\n if self.closed == \"right\":\r\n labels = binner\r\n if self.label == \"right\":\r\n labels = labels[1:]\r\n elif self.label == \"right\":\r\n labels = labels[1:]\r\n\r\n if ax.hasnans:\r\n binner = binner.insert(0, NaT)\r\n labels = labels.insert(0, NaT)\r\n\r\n # if we end up with more labels than bins\r\n # adjust the labels\r\n # GH4076\r\n if len(bins) < len(labels):\r\n labels = labels[: len(bins)]\r\n\r\n return binner, bins, labels\r\n\r\n def _adjust_bin_edges(self, binner, ax_values):\r\n # Some hacks for > daily data, see #1471, #1458, #1483\r\n\r\n if self.freq != \"D\" and is_superperiod(self.freq, \"D\"):\r\n if self.closed == \"right\":\r\n # GH 21459, GH 9119: Adjust the bins relative to the wall time\r\n bin_edges = binner.tz_localize(None)\r\n bin_edges = bin_edges + timedelta(1) - Nano(1)\r\n bin_edges = bin_edges.tz_localize(binner.tz).asi8\r\n else:\r\n bin_edges = binner.asi8\r\n\r\n # intraday values on last day\r\n if bin_edges[-2] > ax_values.max():\r\n bin_edges = bin_edges[:-1]\r\n binner = binner[:-1]\r\n else:\r\n bin_edges = binner.asi8\r\n return binner, bin_edges\r\n\r\n def _get_time_delta_bins(self, ax):\r\n if not isinstance(ax, TimedeltaIndex):\r\n raise TypeError(\r\n \"axis must be a TimedeltaIndex, but got \"\r\n f\"an instance of {type(ax).__name__}\"\r\n )\r\n\r\n if not len(ax):\r\n binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name)\r\n return binner, [], labels\r\n\r\n start, end = ax.min(), ax.max()\r\n labels = binner = timedelta_range(\r\n start=start, end=end, freq=self.freq, name=ax.name\r\n )\r\n\r\n end_stamps = labels + self.freq\r\n bins = ax.searchsorted(end_stamps, side=\"left\")\r\n\r\n # Addresses GH #10530\r\n if self.base > 0:\r\n labels += type(self.freq)(self.base)\r\n\r\n return binner, bins, labels\r\n\r\n def _get_time_period_bins(self, ax):\r\n if not isinstance(ax, DatetimeIndex):\r\n raise TypeError(\r\n \"axis must be a DatetimeIndex, but got \"\r\n f\"an instance of {type(ax).__name__}\"\r\n )\r\n\r\n freq = self.freq\r\n\r\n if not len(ax):\r\n binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)\r\n return binner, [], labels\r\n\r\n labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name)\r\n\r\n end_stamps = (labels + freq).asfreq(freq, \"s\").to_timestamp()\r\n if ax.tzinfo:\r\n end_stamps = end_stamps.tz_localize(ax.tzinfo)\r\n bins = ax.searchsorted(end_stamps, side=\"left\")\r\n\r\n return binner, bins, labels\r\n\r\n def _get_period_bins(self, ax):\r\n if not isinstance(ax, PeriodIndex):\r\n raise TypeError(\r\n \"axis must be a PeriodIndex, but got \"\r\n f\"an instance of {type(ax).__name__}\"\r\n )\r\n\r\n memb = ax.asfreq(self.freq, how=self.convention)\r\n\r\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\r\n nat_count = 0\r\n if memb.hasnans:\r\n nat_count = np.sum(memb._isnan)\r\n memb = memb[~memb._isnan]\r\n\r\n # if index contains no valid (non-NaT) values, return empty index\r\n if not len(memb):\r\n binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name)\r\n return binner, [], labels\r\n\r\n freq_mult = self.freq.n\r\n\r\n start = ax.min().asfreq(self.freq, how=self.convention)\r\n end = ax.max().asfreq(self.freq, how=\"end\")\r\n bin_shift = 0\r\n\r\n # GH 23882\r\n if self.base:\r\n # get base adjusted bin edge labels\r\n p_start, end = _get_period_range_edges(\r\n start, end, self.freq, closed=self.closed, base=self.base\r\n )\r\n\r\n # Get offset for bin edge (not label edge) adjustment\r\n start_offset = Period(start, self.freq) - Period(p_start, self.freq)\r\n bin_shift = start_offset.n % freq_mult\r\n start = p_start\r\n\r\n labels = binner = period_range(\r\n start=start, end=end, freq=self.freq, name=ax.name\r\n )\r\n\r\n i8 = memb.asi8\r\n\r\n # when upsampling to subperiods, we need to generate enough bins\r\n expected_bins_count = len(binner) * freq_mult\r\n i8_extend = expected_bins_count - (i8[-1] - i8[0])\r\n rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)\r\n rng += freq_mult\r\n # adjust bin edge indexes to account for base\r\n rng -= bin_shift\r\n\r\n # Wrap in PeriodArray for PeriodArray.searchsorted\r\n prng = type(memb._data)(rng, dtype=memb.dtype)\r\n bins = memb.searchsorted(prng, side=\"left\")\r\n\r\n if nat_count > 0:\r\n # NaT handling as in pandas._lib.lib.generate_bins_dt64()\r\n # shift bins by the number of NaT\r\n bins += nat_count\r\n bins = np.insert(bins, 0, nat_count)\r\n binner = binner.insert(0, NaT)\r\n labels = labels.insert(0, NaT)\r\n\r\n return binner, bins, labels\r\n\r\n\r\ndef _take_new_index(obj, indexer, new_index, axis=0):\r\n\r\n if isinstance(obj, ABCSeries):\r\n new_values = algos.take_1d(obj.values, indexer)\r\n return obj._constructor(new_values, index=new_index, name=obj.name)\r\n elif isinstance(obj, ABCDataFrame):\r\n if axis == 1:\r\n raise NotImplementedError(\"axis 1 is not supported\")\r\n return obj._constructor(\r\n obj._data.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1)\r\n )\r\n else:\r\n raise ValueError(\"'obj' should be either a Series or a DataFrame\")\r\n\r\n\r\ndef _get_timestamp_range_edges(first, last, offset, closed=\"left\", base=0):\r\n \"\"\"\r\n Adjust the `first` Timestamp to the preceding Timestamp that resides on\r\n the provided offset. Adjust the `last` Timestamp to the following\r\n Timestamp that resides on the provided offset. Input Timestamps that\r\n already reside on the offset will be adjusted depending on the type of\r\n offset and the `closed` parameter.\r\n\r\n Parameters\r\n ----------\r\n first : pd.Timestamp\r\n The beginning Timestamp of the range to be adjusted.\r\n last : pd.Timestamp\r\n The ending Timestamp of the range to be adjusted.\r\n offset : pd.DateOffset\r\n The dateoffset to which the Timestamps will be adjusted.\r\n closed : {'right', 'left'}, default None\r\n Which side of bin interval is closed.\r\n base : int, default 0\r\n The \"origin\" of the adjusted Timestamps.\r\n\r\n Returns\r\n -------\r\n A tuple of length 2, containing the adjusted pd.Timestamp objects.\r\n \"\"\"\r\n if isinstance(offset, Tick):\r\n if isinstance(offset, Day):\r\n # _adjust_dates_anchored assumes 'D' means 24H, but first/last\r\n # might contain a DST transition (23H, 24H, or 25H).\r\n # So \"pretend\" the dates are naive when adjusting the endpoints\r\n tz = first.tz\r\n first = first.tz_localize(None)\r\n last = last.tz_localize(None)\r\n\r\n first, last = _adjust_dates_anchored(\r\n first, last, offset, closed=closed, base=base\r\n )\r\n if isinstance(offset, Day):\r\n first = first.tz_localize(tz)\r\n last = last.tz_localize(tz)\r\n return first, last\r\n\r\n else:\r\n first = first.normalize()\r\n last = last.normalize()\r\n\r\n if closed == \"left\":\r\n first = Timestamp(offset.rollback(first))\r\n else:\r\n first = Timestamp(first - offset)\r\n\r\n last = Timestamp(last + offset)\r\n\r\n return first, last\r\n\r\n\r\ndef _get_period_range_edges(first, last, offset, closed=\"left\", base=0):\r\n \"\"\"\r\n Adjust the provided `first` and `last` Periods to the respective Period of\r\n the given offset that encompasses them.\r\n\r\n Parameters\r\n ----------\r\n first : pd.Period\r\n The beginning Period of the range to be adjusted.\r\n last : pd.Period\r\n The ending Period of the range to be adjusted.\r\n offset : pd.DateOffset\r\n The dateoffset to which the Periods will be adjusted.\r\n closed : {'right', 'left'}, default None\r\n Which side of bin interval is closed.\r\n base : int, default 0\r\n The \"origin\" of the adjusted Periods.\r\n\r\n Returns\r\n -------\r\n A tuple of length 2, containing the adjusted pd.Period objects.\r\n \"\"\"\r\n if not all(isinstance(obj, Period) for obj in [first, last]):\r\n raise TypeError(\"'first' and 'last' must be instances of type Period\")\r\n\r\n # GH 23882\r\n first = first.to_timestamp()\r\n last = last.to_timestamp()\r\n adjust_first = not offset.is_on_offset(first)\r\n adjust_last = offset.is_on_offset(last)\r\n\r\n first, last = _get_timestamp_range_edges(\r\n first, last, offset, closed=closed, base=base\r\n )\r\n\r\n first = (first + adjust_first * offset).to_period(offset)\r\n last = (last - adjust_last * offset).to_period(offset)\r\n return first, last\r\n\r\n\r\ndef _adjust_dates_anchored(first, last, offset, closed=\"right\", base=0):\r\n # First and last offsets should be calculated from the start day to fix an\r\n # error cause by resampling across multiple days when a one day period is\r\n # not a multiple of the frequency.\r\n #\r\n # See https://github.com/pandas-dev/pandas/issues/8683\r\n\r\n # GH 10117 & GH 19375. If first and last contain timezone information,\r\n # Perform the calculation in UTC in order to avoid localizing on an\r\n # Ambiguous or Nonexistent time.\r\n first_tzinfo = first.tzinfo\r\n last_tzinfo = last.tzinfo\r\n start_day_nanos = first.normalize().value\r\n if first_tzinfo is not None:\r\n first = first.tz_convert(\"UTC\")\r\n if last_tzinfo is not None:\r\n last = last.tz_convert(\"UTC\")\r\n\r\n base_nanos = (base % offset.n) * offset.nanos // offset.n\r\n start_day_nanos += base_nanos\r\n\r\n foffset = (first.value - start_day_nanos) % offset.nanos\r\n loffset = (last.value - start_day_nanos) % offset.nanos\r\n\r\n if closed == \"right\":\r\n if foffset > 0:\r\n # roll back\r\n fresult = first.value - foffset\r\n else:\r\n fresult = first.value - offset.nanos\r\n\r\n if loffset > 0:\r\n # roll forward\r\n lresult = last.value + (offset.nanos - loffset)\r\n else:\r\n # already the end of the road\r\n lresult = last.value\r\n else: # closed == 'left'\r\n if foffset > 0:\r\n fresult = first.value - foffset\r\n else:\r\n # start of the road\r\n fresult = first.value\r\n\r\n if loffset > 0:\r\n # roll forward\r\n lresult = last.value + (offset.nanos - loffset)\r\n else:\r\n lresult = last.value + offset.nanos\r\n fresult = Timestamp(fresult)\r\n lresult = Timestamp(lresult)\r\n if first_tzinfo is not None:\r\n fresult = fresult.tz_localize(\"UTC\").tz_convert(first_tzinfo)\r\n if last_tzinfo is not None:\r\n lresult = lresult.tz_localize(\"UTC\").tz_convert(last_tzinfo)\r\n return fresult, lresult\r\n\r\n\r\ndef asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):\r\n \"\"\"\r\n Utility frequency conversion method for Series/DataFrame.\r\n \"\"\"\r\n if isinstance(obj.index, PeriodIndex):\r\n if method is not None:\r\n raise NotImplementedError(\"'method' argument is not supported\")\r\n\r\n if how is None:\r\n how = \"E\"\r\n\r\n new_obj = obj.copy()\r\n new_obj.index = obj.index.asfreq(freq, how=how)\r\n\r\n elif len(obj.index) == 0:\r\n new_obj = obj.copy()\r\n new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))\r\n\r\n else:\r\n dti = date_range(obj.index[0], obj.index[-1], freq=freq)\r\n dti.name = obj.index.name\r\n new_obj = obj.reindex(dti, method=method, fill_value=fill_value)\r\n if normalize:\r\n new_obj.index = new_obj.index.normalize()\r\n\r\n return new_obj\r\n",
"\"\"\" manage PyTables query interface via Expressions \"\"\"\r\n\r\nimport ast\r\nfrom functools import partial\r\nfrom typing import Any, Dict, Optional, Tuple\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs.tslibs import Timedelta, Timestamp\r\nfrom pandas.compat.chainmap import DeepChainMap\r\n\r\nfrom pandas.core.dtypes.common import is_list_like\r\n\r\nimport pandas as pd\r\nimport pandas.core.common as com\r\nfrom pandas.core.computation import expr, ops, scope as _scope\r\nfrom pandas.core.computation.common import _ensure_decoded\r\nfrom pandas.core.computation.expr import BaseExprVisitor\r\nfrom pandas.core.computation.ops import UndefinedVariableError, is_term\r\n\r\nfrom pandas.io.formats.printing import pprint_thing, pprint_thing_encoded\r\n\r\n\r\nclass PyTablesScope(_scope.Scope):\r\n __slots__ = (\"queryables\",)\r\n\r\n queryables: Dict[str, Any]\r\n\r\n def __init__(\r\n self,\r\n level: int,\r\n global_dict=None,\r\n local_dict=None,\r\n queryables: Optional[Dict[str, Any]] = None,\r\n ):\r\n super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)\r\n self.queryables = queryables or dict()\r\n\r\n\r\nclass Term(ops.Term):\r\n env: PyTablesScope\r\n\r\n def __new__(cls, name, env, side=None, encoding=None):\r\n klass = Constant if not isinstance(name, str) else cls\r\n return object.__new__(klass)\r\n\r\n def __init__(self, name, env: PyTablesScope, side=None, encoding=None):\r\n super().__init__(name, env, side=side, encoding=encoding)\r\n\r\n def _resolve_name(self):\r\n # must be a queryables\r\n if self.side == \"left\":\r\n # Note: The behavior of __new__ ensures that self.name is a str here\r\n if self.name not in self.env.queryables:\r\n raise NameError(f\"name {repr(self.name)} is not defined\")\r\n return self.name\r\n\r\n # resolve the rhs (and allow it to be None)\r\n try:\r\n return self.env.resolve(self.name, is_local=False)\r\n except UndefinedVariableError:\r\n return self.name\r\n\r\n # read-only property overwriting read/write property\r\n @property # type: ignore\r\n def value(self):\r\n return self._value\r\n\r\n\r\nclass Constant(Term):\r\n def __init__(self, value, env: PyTablesScope, side=None, encoding=None):\r\n assert isinstance(env, PyTablesScope), type(env)\r\n super().__init__(value, env, side=side, encoding=encoding)\r\n\r\n def _resolve_name(self):\r\n return self._name\r\n\r\n\r\nclass BinOp(ops.BinOp):\r\n\r\n _max_selectors = 31\r\n\r\n op: str\r\n queryables: Dict[str, Any]\r\n\r\n def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):\r\n super().__init__(op, lhs, rhs)\r\n self.queryables = queryables\r\n self.encoding = encoding\r\n self.condition = None\r\n\r\n def _disallow_scalar_only_bool_ops(self):\r\n pass\r\n\r\n def prune(self, klass):\r\n def pr(left, right):\r\n \"\"\" create and return a new specialized BinOp from myself \"\"\"\r\n\r\n if left is None:\r\n return right\r\n elif right is None:\r\n return left\r\n\r\n k = klass\r\n if isinstance(left, ConditionBinOp):\r\n if isinstance(right, ConditionBinOp):\r\n k = JointConditionBinOp\r\n elif isinstance(left, k):\r\n return left\r\n elif isinstance(right, k):\r\n return right\r\n\r\n elif isinstance(left, FilterBinOp):\r\n if isinstance(right, FilterBinOp):\r\n k = JointFilterBinOp\r\n elif isinstance(left, k):\r\n return left\r\n elif isinstance(right, k):\r\n return right\r\n\r\n return k(\r\n self.op, left, right, queryables=self.queryables, encoding=self.encoding\r\n ).evaluate()\r\n\r\n left, right = self.lhs, self.rhs\r\n\r\n if is_term(left) and is_term(right):\r\n res = pr(left.value, right.value)\r\n elif not is_term(left) and is_term(right):\r\n res = pr(left.prune(klass), right.value)\r\n elif is_term(left) and not is_term(right):\r\n res = pr(left.value, right.prune(klass))\r\n elif not (is_term(left) or is_term(right)):\r\n res = pr(left.prune(klass), right.prune(klass))\r\n\r\n return res\r\n\r\n def conform(self, rhs):\r\n \"\"\" inplace conform rhs \"\"\"\r\n if not is_list_like(rhs):\r\n rhs = [rhs]\r\n if isinstance(rhs, np.ndarray):\r\n rhs = rhs.ravel()\r\n return rhs\r\n\r\n @property\r\n def is_valid(self) -> bool:\r\n \"\"\" return True if this is a valid field \"\"\"\r\n return self.lhs in self.queryables\r\n\r\n @property\r\n def is_in_table(self) -> bool:\r\n \"\"\" return True if this is a valid column name for generation (e.g. an\r\n actual column in the table) \"\"\"\r\n return self.queryables.get(self.lhs) is not None\r\n\r\n @property\r\n def kind(self):\r\n \"\"\" the kind of my field \"\"\"\r\n return getattr(self.queryables.get(self.lhs), \"kind\", None)\r\n\r\n @property\r\n def meta(self):\r\n \"\"\" the meta of my field \"\"\"\r\n return getattr(self.queryables.get(self.lhs), \"meta\", None)\r\n\r\n @property\r\n def metadata(self):\r\n \"\"\" the metadata of my field \"\"\"\r\n return getattr(self.queryables.get(self.lhs), \"metadata\", None)\r\n\r\n def generate(self, v) -> str:\r\n \"\"\" create and return the op string for this TermValue \"\"\"\r\n val = v.tostring(self.encoding)\r\n return f\"({self.lhs} {self.op} {val})\"\r\n\r\n def convert_value(self, v) -> \"TermValue\":\r\n \"\"\" convert the expression that is in the term to something that is\r\n accepted by pytables \"\"\"\r\n\r\n def stringify(value):\r\n if self.encoding is not None:\r\n encoder = partial(pprint_thing_encoded, encoding=self.encoding)\r\n else:\r\n encoder = pprint_thing\r\n return encoder(value)\r\n\r\n kind = _ensure_decoded(self.kind)\r\n meta = _ensure_decoded(self.meta)\r\n if kind == \"datetime64\" or kind == \"datetime\":\r\n if isinstance(v, (int, float)):\r\n v = stringify(v)\r\n v = _ensure_decoded(v)\r\n v = Timestamp(v)\r\n if v.tz is not None:\r\n v = v.tz_convert(\"UTC\")\r\n return TermValue(v, v.value, kind)\r\n elif kind == \"timedelta64\" or kind == \"timedelta\":\r\n v = Timedelta(v, unit=\"s\").value\r\n return TermValue(int(v), v, kind)\r\n elif meta == \"category\":\r\n metadata = com.values_from_object(self.metadata)\r\n result = metadata.searchsorted(v, side=\"left\")\r\n\r\n # result returns 0 if v is first element or if v is not in metadata\r\n # check that metadata contains v\r\n if not result and v not in metadata:\r\n result = -1\r\n return TermValue(result, result, \"integer\")\r\n elif kind == \"integer\":\r\n v = int(float(v))\r\n return TermValue(v, v, kind)\r\n elif kind == \"float\":\r\n v = float(v)\r\n return TermValue(v, v, kind)\r\n elif kind == \"bool\":\r\n if isinstance(v, str):\r\n v = not v.strip().lower() in [\r\n \"false\",\r\n \"f\",\r\n \"no\",\r\n \"n\",\r\n \"none\",\r\n \"0\",\r\n \"[]\",\r\n \"{}\",\r\n \"\",\r\n ]\r\n else:\r\n v = bool(v)\r\n return TermValue(v, v, kind)\r\n elif isinstance(v, str):\r\n # string quoting\r\n return TermValue(v, stringify(v), \"string\")\r\n else:\r\n raise TypeError(f\"Cannot compare {v} of type {type(v)} to {kind} column\")\r\n\r\n def convert_values(self):\r\n pass\r\n\r\n\r\nclass FilterBinOp(BinOp):\r\n filter: Optional[Tuple[Any, Any, pd.Index]] = None\r\n\r\n def __repr__(self) -> str:\r\n if self.filter is None:\r\n return \"Filter: Not Initialized\"\r\n return pprint_thing(f\"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]\")\r\n\r\n def invert(self):\r\n \"\"\" invert the filter \"\"\"\r\n if self.filter is not None:\r\n f = list(self.filter)\r\n f[1] = self.generate_filter_op(invert=True)\r\n self.filter = tuple(f)\r\n return self\r\n\r\n def format(self):\r\n \"\"\" return the actual filter format \"\"\"\r\n return [self.filter]\r\n\r\n def evaluate(self):\r\n\r\n if not self.is_valid:\r\n raise ValueError(f\"query term is not valid [{self}]\")\r\n\r\n rhs = self.conform(self.rhs)\r\n values = list(rhs)\r\n\r\n if self.is_in_table:\r\n\r\n # if too many values to create the expression, use a filter instead\r\n if self.op in [\"==\", \"!=\"] and len(values) > self._max_selectors:\r\n\r\n filter_op = self.generate_filter_op()\r\n self.filter = (self.lhs, filter_op, pd.Index(values))\r\n\r\n return self\r\n return None\r\n\r\n # equality conditions\r\n if self.op in [\"==\", \"!=\"]:\r\n\r\n filter_op = self.generate_filter_op()\r\n self.filter = (self.lhs, filter_op, pd.Index(values))\r\n\r\n else:\r\n raise TypeError(\r\n f\"passing a filterable condition to a non-table indexer [{self}]\"\r\n )\r\n\r\n return self\r\n\r\n def generate_filter_op(self, invert: bool = False):\r\n if (self.op == \"!=\" and not invert) or (self.op == \"==\" and invert):\r\n return lambda axis, vals: ~axis.isin(vals)\r\n else:\r\n return lambda axis, vals: axis.isin(vals)\r\n\r\n\r\nclass JointFilterBinOp(FilterBinOp):\r\n def format(self):\r\n raise NotImplementedError(\"unable to collapse Joint Filters\")\r\n\r\n def evaluate(self):\r\n return self\r\n\r\n\r\nclass ConditionBinOp(BinOp):\r\n def __repr__(self) -> str:\r\n return pprint_thing(f\"[Condition : [{self.condition}]]\")\r\n\r\n def invert(self):\r\n \"\"\" invert the condition \"\"\"\r\n # if self.condition is not None:\r\n # self.condition = \"~(%s)\" % self.condition\r\n # return self\r\n raise NotImplementedError(\r\n \"cannot use an invert condition when passing to numexpr\"\r\n )\r\n\r\n def format(self):\r\n \"\"\" return the actual ne format \"\"\"\r\n return self.condition\r\n\r\n def evaluate(self):\r\n\r\n if not self.is_valid:\r\n raise ValueError(f\"query term is not valid [{self}]\")\r\n\r\n # convert values if we are in the table\r\n if not self.is_in_table:\r\n return None\r\n\r\n rhs = self.conform(self.rhs)\r\n values = [self.convert_value(v) for v in rhs]\r\n\r\n # equality conditions\r\n if self.op in [\"==\", \"!=\"]:\r\n\r\n # too many values to create the expression?\r\n if len(values) <= self._max_selectors:\r\n vs = [self.generate(v) for v in values]\r\n self.condition = f\"({' | '.join(vs)})\"\r\n\r\n # use a filter after reading\r\n else:\r\n return None\r\n else:\r\n self.condition = self.generate(values[0])\r\n\r\n return self\r\n\r\n\r\nclass JointConditionBinOp(ConditionBinOp):\r\n def evaluate(self):\r\n self.condition = f\"({self.lhs.condition} {self.op} {self.rhs.condition})\"\r\n return self\r\n\r\n\r\nclass UnaryOp(ops.UnaryOp):\r\n def prune(self, klass):\r\n\r\n if self.op != \"~\":\r\n raise NotImplementedError(\"UnaryOp only support invert type ops\")\r\n\r\n operand = self.operand\r\n operand = operand.prune(klass)\r\n\r\n if operand is not None:\r\n if issubclass(klass, ConditionBinOp):\r\n if operand.condition is not None:\r\n return operand.invert()\r\n elif issubclass(klass, FilterBinOp):\r\n if operand.filter is not None:\r\n return operand.invert()\r\n\r\n return None\r\n\r\n\r\nclass PyTablesExprVisitor(BaseExprVisitor):\r\n const_type = Constant\r\n term_type = Term\r\n\r\n def __init__(self, env, engine, parser, **kwargs):\r\n super().__init__(env, engine, parser)\r\n for bin_op in self.binary_ops:\r\n bin_node = self.binary_op_nodes_map[bin_op]\r\n setattr(\r\n self,\r\n f\"visit_{bin_node}\",\r\n lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),\r\n )\r\n\r\n def visit_UnaryOp(self, node, **kwargs):\r\n if isinstance(node.op, (ast.Not, ast.Invert)):\r\n return UnaryOp(\"~\", self.visit(node.operand))\r\n elif isinstance(node.op, ast.USub):\r\n return self.const_type(-self.visit(node.operand).value, self.env)\r\n elif isinstance(node.op, ast.UAdd):\r\n raise NotImplementedError(\"Unary addition not supported\")\r\n\r\n def visit_Index(self, node, **kwargs):\r\n return self.visit(node.value).value\r\n\r\n def visit_Assign(self, node, **kwargs):\r\n cmpr = ast.Compare(\r\n ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]\r\n )\r\n return self.visit(cmpr)\r\n\r\n def visit_Subscript(self, node, **kwargs):\r\n # only allow simple subscripts\r\n\r\n value = self.visit(node.value)\r\n slobj = self.visit(node.slice)\r\n try:\r\n value = value.value\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n return self.const_type(value[slobj], self.env)\r\n except TypeError:\r\n raise ValueError(f\"cannot subscript {repr(value)} with {repr(slobj)}\")\r\n\r\n def visit_Attribute(self, node, **kwargs):\r\n attr = node.attr\r\n value = node.value\r\n\r\n ctx = type(node.ctx)\r\n if ctx == ast.Load:\r\n # resolve the value\r\n resolved = self.visit(value)\r\n\r\n # try to get the value to see if we are another expression\r\n try:\r\n resolved = resolved.value\r\n except (AttributeError):\r\n pass\r\n\r\n try:\r\n return self.term_type(getattr(resolved, attr), self.env)\r\n except AttributeError:\r\n\r\n # something like datetime.datetime where scope is overridden\r\n if isinstance(value, ast.Name) and value.id == attr:\r\n return resolved\r\n\r\n raise ValueError(f\"Invalid Attribute context {ctx.__name__}\")\r\n\r\n def translate_In(self, op):\r\n return ast.Eq() if isinstance(op, ast.In) else op\r\n\r\n def _rewrite_membership_op(self, node, left, right):\r\n return self.visit(node.op), node.op, left, right\r\n\r\n\r\ndef _validate_where(w):\r\n \"\"\"\r\n Validate that the where statement is of the right type.\r\n\r\n The type may either be String, Expr, or list-like of Exprs.\r\n\r\n Parameters\r\n ----------\r\n w : String term expression, Expr, or list-like of Exprs.\r\n\r\n Returns\r\n -------\r\n where : The original where clause if the check was successful.\r\n\r\n Raises\r\n ------\r\n TypeError : An invalid data type was passed in for w (e.g. dict).\r\n \"\"\"\r\n\r\n if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):\r\n raise TypeError(\r\n \"where must be passed as a string, PyTablesExpr, \"\r\n \"or list-like of PyTablesExpr\"\r\n )\r\n\r\n return w\r\n\r\n\r\nclass PyTablesExpr(expr.Expr):\r\n \"\"\"\r\n Hold a pytables-like expression, comprised of possibly multiple 'terms'.\r\n\r\n Parameters\r\n ----------\r\n where : string term expression, PyTablesExpr, or list-like of PyTablesExprs\r\n queryables : a \"kinds\" map (dict of column name -> kind), or None if column\r\n is non-indexable\r\n encoding : an encoding that will encode the query terms\r\n\r\n Returns\r\n -------\r\n a PyTablesExpr object\r\n\r\n Examples\r\n --------\r\n\r\n 'index>=date'\r\n \"columns=['A', 'D']\"\r\n 'columns=A'\r\n 'columns==A'\r\n \"~(columns=['A','B'])\"\r\n 'index>df.index[3] & string=\"bar\"'\r\n '(index>df.index[3] & index<=df.index[6]) | string=\"bar\"'\r\n \"ts>=Timestamp('2012-02-01')\"\r\n \"major_axis>=20130101\"\r\n \"\"\"\r\n\r\n _visitor: Optional[PyTablesExprVisitor]\r\n env: PyTablesScope\r\n\r\n def __init__(\r\n self,\r\n where,\r\n queryables: Optional[Dict[str, Any]] = None,\r\n encoding=None,\r\n scope_level: int = 0,\r\n ):\r\n\r\n where = _validate_where(where)\r\n\r\n self.encoding = encoding\r\n self.condition = None\r\n self.filter = None\r\n self.terms = None\r\n self._visitor = None\r\n\r\n # capture the environment if needed\r\n local_dict: DeepChainMap[Any, Any] = DeepChainMap()\r\n\r\n if isinstance(where, PyTablesExpr):\r\n local_dict = where.env.scope\r\n _where = where.expr\r\n\r\n elif isinstance(where, (list, tuple)):\r\n where = list(where)\r\n for idx, w in enumerate(where):\r\n if isinstance(w, PyTablesExpr):\r\n local_dict = w.env.scope\r\n else:\r\n w = _validate_where(w)\r\n where[idx] = w\r\n _where = \" & \".join((f\"({w})\" for w in com.flatten(where)))\r\n else:\r\n _where = where\r\n\r\n self.expr = _where\r\n self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)\r\n\r\n if queryables is not None and isinstance(self.expr, str):\r\n self.env.queryables.update(queryables)\r\n self._visitor = PyTablesExprVisitor(\r\n self.env,\r\n queryables=queryables,\r\n parser=\"pytables\",\r\n engine=\"pytables\",\r\n encoding=encoding,\r\n )\r\n self.terms = self.parse()\r\n\r\n def __repr__(self) -> str:\r\n if self.terms is not None:\r\n return pprint_thing(self.terms)\r\n return pprint_thing(self.expr)\r\n\r\n def evaluate(self):\r\n \"\"\" create and return the numexpr condition and filter \"\"\"\r\n\r\n try:\r\n self.condition = self.terms.prune(ConditionBinOp)\r\n except AttributeError:\r\n raise ValueError(\r\n f\"cannot process expression [{self.expr}], [{self}] \"\r\n \"is not a valid condition\"\r\n )\r\n try:\r\n self.filter = self.terms.prune(FilterBinOp)\r\n except AttributeError:\r\n raise ValueError(\r\n f\"cannot process expression [{self.expr}], [{self}] \"\r\n \"is not a valid filter\"\r\n )\r\n\r\n return self.condition, self.filter\r\n\r\n\r\nclass TermValue:\r\n \"\"\" hold a term value the we use to construct a condition/filter \"\"\"\r\n\r\n def __init__(self, value, converted, kind: str):\r\n assert isinstance(kind, str), kind\r\n self.value = value\r\n self.converted = converted\r\n self.kind = kind\r\n\r\n def tostring(self, encoding) -> str:\r\n \"\"\" quote the string if not encoded\r\n else encode and return \"\"\"\r\n if self.kind == \"string\":\r\n if encoding is not None:\r\n return str(self.converted)\r\n return f'\"{self.converted}\"'\r\n elif self.kind == \"float\":\r\n # python 2 str(float) is not always\r\n # round-trippable so use repr()\r\n return repr(self.converted)\r\n return str(self.converted)\r\n\r\n\r\ndef maybe_expression(s) -> bool:\r\n \"\"\" loose checking if s is a pytables-acceptable expression \"\"\"\r\n if not isinstance(s, str):\r\n return False\r\n ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + (\"=\",)\r\n\r\n # make sure we have an op at least\r\n return any(op in s for op in ops)\r\n",
"\"\"\"This module is designed for community supported date conversion functions\"\"\"\r\nimport numpy as np\r\n\r\nfrom pandas._libs.tslibs import parsing\r\n\r\n\r\ndef parse_date_time(date_col, time_col):\r\n date_col = _maybe_cast(date_col)\r\n time_col = _maybe_cast(time_col)\r\n return parsing.try_parse_date_and_time(date_col, time_col)\r\n\r\n\r\ndef parse_date_fields(year_col, month_col, day_col):\r\n year_col = _maybe_cast(year_col)\r\n month_col = _maybe_cast(month_col)\r\n day_col = _maybe_cast(day_col)\r\n return parsing.try_parse_year_month_day(year_col, month_col, day_col)\r\n\r\n\r\ndef parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_col):\r\n year_col = _maybe_cast(year_col)\r\n month_col = _maybe_cast(month_col)\r\n day_col = _maybe_cast(day_col)\r\n hour_col = _maybe_cast(hour_col)\r\n minute_col = _maybe_cast(minute_col)\r\n second_col = _maybe_cast(second_col)\r\n return parsing.try_parse_datetime_components(\r\n year_col, month_col, day_col, hour_col, minute_col, second_col\r\n )\r\n\r\n\r\ndef generic_parser(parse_func, *cols):\r\n N = _check_columns(cols)\r\n results = np.empty(N, dtype=object)\r\n\r\n for i in range(N):\r\n args = [c[i] for c in cols]\r\n results[i] = parse_func(*args)\r\n\r\n return results\r\n\r\n\r\ndef _maybe_cast(arr):\r\n if not arr.dtype.type == np.object_:\r\n arr = np.array(arr, dtype=object)\r\n return arr\r\n\r\n\r\ndef _check_columns(cols):\r\n if not len(cols):\r\n raise AssertionError(\"There must be at least 1 column\")\r\n\r\n head, tail = cols[0], cols[1:]\r\n\r\n N = len(head)\r\n\r\n for i, n in enumerate(map(len, tail)):\r\n if n != N:\r\n raise AssertionError(\r\n f\"All columns must have the same length: {N}; \"\r\n f\"column {i} has length {n}\"\r\n )\r\n\r\n return N\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas._libs.tslibs.timedeltas import delta_to_nanoseconds\r\n\r\nfrom pandas import Timedelta, offsets\r\n\r\n\r\[email protected](\r\n \"obj,expected\",\r\n [\r\n (np.timedelta64(14, \"D\"), 14 * 24 * 3600 * 1e9),\r\n (Timedelta(minutes=-7), -7 * 60 * 1e9),\r\n (Timedelta(minutes=-7).to_pytimedelta(), -7 * 60 * 1e9),\r\n (offsets.Nano(125), 125),\r\n (1, 1),\r\n (np.int64(2), 2),\r\n (np.int32(3), 3),\r\n ],\r\n)\r\ndef test_delta_to_nanoseconds(obj, expected):\r\n result = delta_to_nanoseconds(obj)\r\n assert result == expected\r\n\r\n\r\ndef test_delta_to_nanoseconds_error():\r\n obj = np.array([123456789], dtype=\"m8[ns]\")\r\n\r\n with pytest.raises(TypeError, match=\"<class 'numpy.ndarray'>\"):\r\n delta_to_nanoseconds(obj)\r\n",
"from .. import auc\r\nfrom .. import roc_curve\r\n\r\nfrom .base import _check_classifer_response_method\r\nfrom ...utils import check_matplotlib_support\r\nfrom ...base import is_classifier\r\n\r\n\r\nclass RocCurveDisplay:\r\n \"\"\"ROC Curve visualization.\r\n\r\n It is recommend to use :func:`~sklearn.metrics.plot_roc_curve` to create a\r\n visualizer. All parameters are stored as attributes.\r\n\r\n Read more in the :ref:`User Guide <visualizations>`.\r\n\r\n Parameters\r\n ----------\r\n fpr : ndarray\r\n False positive rate.\r\n\r\n tpr : ndarray\r\n True positive rate.\r\n\r\n roc_auc : float\r\n Area under ROC curve.\r\n\r\n estimator_name : str\r\n Name of estimator.\r\n\r\n Attributes\r\n ----------\r\n line_ : matplotlib Artist\r\n ROC Curve.\r\n\r\n ax_ : matplotlib Axes\r\n Axes with ROC Curve.\r\n\r\n figure_ : matplotlib Figure\r\n Figure containing the curve.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt # doctest: +SKIP\r\n >>> import numpy as np\r\n >>> from sklearn import metrics\r\n >>> y = np.array([0, 0, 1, 1])\r\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\r\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)\r\n >>> roc_auc = metrics.auc(fpr, tpr)\r\n >>> display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc,\\\r\n estimator_name='example estimator')\r\n >>> display.plot() # doctest: +SKIP\r\n >>> plt.show() # doctest: +SKIP\r\n \"\"\"\r\n\r\n def __init__(self, fpr, tpr, roc_auc, estimator_name):\r\n self.fpr = fpr\r\n self.tpr = tpr\r\n self.roc_auc = roc_auc\r\n self.estimator_name = estimator_name\r\n\r\n def plot(self, ax=None, name=None, **kwargs):\r\n \"\"\"Plot visualization\r\n\r\n Extra keyword arguments will be passed to matplotlib's ``plot``.\r\n\r\n Parameters\r\n ----------\r\n ax : matplotlib axes, default=None\r\n Axes object to plot on. If `None`, a new figure and axes is\r\n created.\r\n\r\n name : str, default=None\r\n Name of ROC Curve for labeling. If `None`, use the name of the\r\n estimator.\r\n\r\n Returns\r\n -------\r\n display : :class:`~sklearn.metrics.plot.RocCurveDisplay`\r\n Object that stores computed values.\r\n \"\"\"\r\n check_matplotlib_support('RocCurveDisplay.plot')\r\n import matplotlib.pyplot as plt\r\n\r\n if ax is None:\r\n fig, ax = plt.subplots()\r\n\r\n name = self.estimator_name if name is None else name\r\n\r\n line_kwargs = {\r\n 'label': \"{} (AUC = {:0.2f})\".format(name, self.roc_auc)\r\n }\r\n line_kwargs.update(**kwargs)\r\n\r\n self.line_ = ax.plot(self.fpr, self.tpr, **line_kwargs)[0]\r\n ax.set_xlabel(\"False Positive Rate\")\r\n ax.set_ylabel(\"True Positive Rate\")\r\n ax.legend(loc='lower right')\r\n\r\n self.ax_ = ax\r\n self.figure_ = ax.figure\r\n return self\r\n\r\n\r\ndef plot_roc_curve(estimator, X, y, sample_weight=None,\r\n drop_intermediate=True, response_method=\"auto\",\r\n name=None, ax=None, **kwargs):\r\n \"\"\"Plot Receiver operating characteristic (ROC) curve.\r\n\r\n Extra keyword arguments will be passed to matplotlib's `plot`.\r\n\r\n Read more in the :ref:`User Guide <visualizations>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n Trained classifier.\r\n\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input values.\r\n\r\n y : array-like of shape (n_samples,)\r\n Target values.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n drop_intermediate : boolean, default=True\r\n Whether to drop some suboptimal thresholds which would not appear\r\n on a plotted ROC curve. This is useful in order to create lighter\r\n ROC curves.\r\n\r\n response_method : {'predict_proba', 'decision_function', 'auto'} \\\r\n default='auto'\r\n Specifies whether to use :term:`predict_proba` or\r\n :term:`decision_function` as the target response. If set to 'auto',\r\n :term:`predict_proba` is tried first and if it does not exist\r\n :term:`decision_function` is tried next.\r\n\r\n name : str, default=None\r\n Name of ROC Curve for labeling. If `None`, use the name of the\r\n estimator.\r\n\r\n ax : matplotlib axes, default=None\r\n Axes object to plot on. If `None`, a new figure and axes is created.\r\n\r\n Returns\r\n -------\r\n display : :class:`~sklearn.metrics.RocCurveDisplay`\r\n Object that stores computed values.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt # doctest: +SKIP\r\n >>> from sklearn import datasets, metrics, model_selection, svm\r\n >>> X, y = datasets.make_classification(random_state=0)\r\n >>> X_train, X_test, y_train, y_test = model_selection.train_test_split(\\\r\n X, y, random_state=0)\r\n >>> clf = svm.SVC(random_state=0)\r\n >>> clf.fit(X_train, y_train)\r\n SVC(random_state=0)\r\n >>> metrics.plot_roc_curve(clf, X_test, y_test) # doctest: +SKIP\r\n >>> plt.show() # doctest: +SKIP\r\n \"\"\"\r\n check_matplotlib_support('plot_roc_curve')\r\n\r\n classification_error = (\r\n \"{} should be a binary classifier\".format(estimator.__class__.__name__)\r\n )\r\n if not is_classifier(estimator):\r\n raise ValueError(classification_error)\r\n\r\n prediction_method = _check_classifer_response_method(estimator,\r\n response_method)\r\n y_pred = prediction_method(X)\r\n\r\n if y_pred.ndim != 1:\r\n if y_pred.shape[1] != 2:\r\n raise ValueError(classification_error)\r\n else:\r\n y_pred = y_pred[:, 1]\r\n\r\n pos_label = estimator.classes_[1]\r\n fpr, tpr, _ = roc_curve(y, y_pred, pos_label=pos_label,\r\n sample_weight=sample_weight,\r\n drop_intermediate=drop_intermediate)\r\n roc_auc = auc(fpr, tpr)\r\n name = estimator.__class__.__name__ if name is None else name\r\n viz = RocCurveDisplay(\r\n fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=name\r\n )\r\n return viz.plot(ax=ax, name=name, **kwargs)\r\n",
"import numpy as np\r\n\r\nfrom pandas._libs import index as libindex, lib\r\nfrom pandas._typing import Dtype\r\nfrom pandas.util._decorators import Appender, cache_readonly\r\n\r\nfrom pandas.core.dtypes.cast import astype_nansafe\r\nfrom pandas.core.dtypes.common import (\r\n is_bool,\r\n is_bool_dtype,\r\n is_dtype_equal,\r\n is_extension_array_dtype,\r\n is_float,\r\n is_float_dtype,\r\n is_integer_dtype,\r\n is_scalar,\r\n is_signed_integer_dtype,\r\n is_unsigned_integer_dtype,\r\n needs_i8_conversion,\r\n pandas_dtype,\r\n)\r\nfrom pandas.core.dtypes.generic import (\r\n ABCFloat64Index,\r\n ABCInt64Index,\r\n ABCRangeIndex,\r\n ABCSeries,\r\n ABCUInt64Index,\r\n)\r\nfrom pandas.core.dtypes.missing import isna\r\n\r\nfrom pandas.core import algorithms\r\nimport pandas.core.common as com\r\nfrom pandas.core.indexes.base import (\r\n Index,\r\n InvalidIndexError,\r\n _index_shared_docs,\r\n maybe_extract_name,\r\n)\r\nfrom pandas.core.ops import get_op_result_name\r\n\r\n_num_index_shared_docs = dict()\r\n\r\n\r\nclass NumericIndex(Index):\r\n \"\"\"\r\n Provide numeric type operations.\r\n\r\n This is an abstract class.\r\n \"\"\"\r\n\r\n _is_numeric_dtype = True\r\n\r\n def __new__(cls, data=None, dtype=None, copy=False, name=None):\r\n cls._validate_dtype(dtype)\r\n\r\n # Coerce to ndarray if not already ndarray or Index\r\n if not isinstance(data, (np.ndarray, Index)):\r\n if is_scalar(data):\r\n raise cls._scalar_data_error(data)\r\n\r\n # other iterable of some kind\r\n if not isinstance(data, (ABCSeries, list, tuple)):\r\n data = list(data)\r\n\r\n data = np.asarray(data, dtype=dtype)\r\n\r\n if issubclass(data.dtype.type, str):\r\n cls._string_data_error(data)\r\n\r\n if copy or not is_dtype_equal(data.dtype, cls._default_dtype):\r\n subarr = np.array(data, dtype=cls._default_dtype, copy=copy)\r\n cls._assert_safe_casting(data, subarr)\r\n else:\r\n subarr = data\r\n\r\n if subarr.ndim > 1:\r\n # GH#13601, GH#20285, GH#27125\r\n raise ValueError(\"Index data must be 1-dimensional\")\r\n\r\n name = maybe_extract_name(name, data, cls)\r\n return cls._simple_new(subarr, name=name)\r\n\r\n @classmethod\r\n def _validate_dtype(cls, dtype: Dtype) -> None:\r\n if dtype is None:\r\n return\r\n validation_metadata = {\r\n \"int64index\": (is_signed_integer_dtype, \"signed integer\"),\r\n \"uint64index\": (is_unsigned_integer_dtype, \"unsigned integer\"),\r\n \"float64index\": (is_float_dtype, \"float\"),\r\n \"rangeindex\": (is_signed_integer_dtype, \"signed integer\"),\r\n }\r\n\r\n validation_func, expected = validation_metadata[cls._typ]\r\n if not validation_func(dtype):\r\n raise ValueError(\r\n f\"Incorrect `dtype` passed: expected {expected}, received {dtype}\"\r\n )\r\n\r\n @Appender(_index_shared_docs[\"_maybe_cast_slice_bound\"])\r\n def _maybe_cast_slice_bound(self, label, side, kind):\r\n assert kind in [\"ix\", \"loc\", \"getitem\", None]\r\n\r\n # we will try to coerce to integers\r\n return self._maybe_cast_indexer(label)\r\n\r\n @Appender(_index_shared_docs[\"_shallow_copy\"])\r\n def _shallow_copy(self, values=None, **kwargs):\r\n if values is not None and not self._can_hold_na:\r\n # Ensure we are not returning an Int64Index with float data:\r\n return self._shallow_copy_with_infer(values=values, **kwargs)\r\n return super()._shallow_copy(values=values, **kwargs)\r\n\r\n def _convert_for_op(self, value):\r\n \"\"\"\r\n Convert value to be insertable to ndarray.\r\n \"\"\"\r\n if is_bool(value) or is_bool_dtype(value):\r\n # force conversion to object\r\n # so we don't lose the bools\r\n raise TypeError\r\n\r\n return value\r\n\r\n def _convert_tolerance(self, tolerance, target):\r\n tolerance = np.asarray(tolerance)\r\n if target.size != tolerance.size and tolerance.size > 1:\r\n raise ValueError(\"list-like tolerance size must match target index size\")\r\n if not np.issubdtype(tolerance.dtype, np.number):\r\n if tolerance.ndim > 0:\r\n raise ValueError(\r\n f\"tolerance argument for {type(self).__name__} must contain \"\r\n \"numeric elements if it is list type\"\r\n )\r\n else:\r\n raise ValueError(\r\n f\"tolerance argument for {type(self).__name__} must be numeric \"\r\n f\"if it is a scalar: {repr(tolerance)}\"\r\n )\r\n return tolerance\r\n\r\n @classmethod\r\n def _assert_safe_casting(cls, data, subarr):\r\n \"\"\"\r\n Subclasses need to override this only if the process of casting data\r\n from some accepted dtype to the internal dtype(s) bears the risk of\r\n truncation (e.g. float to int).\r\n \"\"\"\r\n pass\r\n\r\n def _concat_same_dtype(self, indexes, name):\r\n result = type(indexes[0])(np.concatenate([x._values for x in indexes]))\r\n return result.rename(name)\r\n\r\n @property\r\n def is_all_dates(self) -> bool:\r\n \"\"\"\r\n Checks that all the labels are datetime objects.\r\n \"\"\"\r\n return False\r\n\r\n @Appender(Index.insert.__doc__)\r\n def insert(self, loc, item):\r\n # treat NA values as nans:\r\n if is_scalar(item) and isna(item):\r\n item = self._na_value\r\n return super().insert(loc, item)\r\n\r\n def _union(self, other, sort):\r\n # Right now, we treat union(int, float) a bit special.\r\n # See https://github.com/pandas-dev/pandas/issues/26778 for discussion\r\n # We may change union(int, float) to go to object.\r\n # float | [u]int -> float (the special case)\r\n # <T> | <T> -> T\r\n # <T> | <U> -> object\r\n needs_cast = (is_integer_dtype(self.dtype) and is_float_dtype(other.dtype)) or (\r\n is_integer_dtype(other.dtype) and is_float_dtype(self.dtype)\r\n )\r\n if needs_cast:\r\n first = self.astype(\"float\")\r\n second = other.astype(\"float\")\r\n return first._union(second, sort)\r\n else:\r\n return super()._union(other, sort)\r\n\r\n\r\n_num_index_shared_docs[\r\n \"class_descr\"\r\n] = \"\"\"\r\n Immutable ndarray implementing an ordered, sliceable set. The basic object\r\n storing axis labels for all pandas objects. %(klass)s is a special case\r\n of `Index` with purely %(ltype)s labels. %(extra)s.\r\n\r\n Parameters\r\n ----------\r\n data : array-like (1-dimensional)\r\n dtype : NumPy dtype (default: %(dtype)s)\r\n copy : bool\r\n Make a copy of input ndarray.\r\n name : object\r\n Name to be stored in the index.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n\r\n See Also\r\n --------\r\n Index : The base pandas Index type.\r\n\r\n Notes\r\n -----\r\n An Index instance can **only** contain hashable objects.\r\n\"\"\"\r\n\r\n_int64_descr_args = dict(klass=\"Int64Index\", ltype=\"integer\", dtype=\"int64\", extra=\"\")\r\n\r\n\r\nclass IntegerIndex(NumericIndex):\r\n \"\"\"\r\n This is an abstract class for Int64Index, UInt64Index.\r\n \"\"\"\r\n\r\n def __contains__(self, key) -> bool:\r\n \"\"\"\r\n Check if key is a float and has a decimal. If it has, return False.\r\n \"\"\"\r\n hash(key)\r\n try:\r\n if is_float(key) and int(key) != key:\r\n return False\r\n return key in self._engine\r\n except (OverflowError, TypeError, ValueError):\r\n return False\r\n\r\n\r\nclass Int64Index(IntegerIndex):\r\n __doc__ = _num_index_shared_docs[\"class_descr\"] % _int64_descr_args\r\n\r\n _typ = \"int64index\"\r\n _can_hold_na = False\r\n _engine_type = libindex.Int64Engine\r\n _default_dtype = np.int64\r\n\r\n @property\r\n def inferred_type(self) -> str:\r\n \"\"\"\r\n Always 'integer' for ``Int64Index``\r\n \"\"\"\r\n return \"integer\"\r\n\r\n @property\r\n def asi8(self) -> np.ndarray:\r\n # do not cache or you'll create a memory leak\r\n return self.values.view(\"i8\")\r\n\r\n @Appender(_index_shared_docs[\"_convert_scalar_indexer\"])\r\n def _convert_scalar_indexer(self, key, kind=None):\r\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\", None]\r\n\r\n # don't coerce ilocs to integers\r\n if kind != \"iloc\":\r\n key = self._maybe_cast_indexer(key)\r\n return super()._convert_scalar_indexer(key, kind=kind)\r\n\r\n def _wrap_joined_index(self, joined, other):\r\n name = get_op_result_name(self, other)\r\n return Int64Index(joined, name=name)\r\n\r\n @classmethod\r\n def _assert_safe_casting(cls, data, subarr):\r\n \"\"\"\r\n Ensure incoming data can be represented as ints.\r\n \"\"\"\r\n if not issubclass(data.dtype.type, np.signedinteger):\r\n if not np.array_equal(data, subarr):\r\n raise TypeError(\"Unsafe NumPy casting, you must explicitly cast\")\r\n\r\n def _is_compatible_with_other(self, other):\r\n return super()._is_compatible_with_other(other) or all(\r\n isinstance(type(obj), (ABCInt64Index, ABCFloat64Index, ABCRangeIndex))\r\n for obj in [self, other]\r\n )\r\n\r\n\r\nInt64Index._add_numeric_methods()\r\nInt64Index._add_logical_methods()\r\n\r\n_uint64_descr_args = dict(\r\n klass=\"UInt64Index\", ltype=\"unsigned integer\", dtype=\"uint64\", extra=\"\"\r\n)\r\n\r\n\r\nclass UInt64Index(IntegerIndex):\r\n __doc__ = _num_index_shared_docs[\"class_descr\"] % _uint64_descr_args\r\n\r\n _typ = \"uint64index\"\r\n _can_hold_na = False\r\n _engine_type = libindex.UInt64Engine\r\n _default_dtype = np.uint64\r\n\r\n @property\r\n def inferred_type(self) -> str:\r\n \"\"\"\r\n Always 'integer' for ``UInt64Index``\r\n \"\"\"\r\n return \"integer\"\r\n\r\n @property\r\n def asi8(self) -> np.ndarray:\r\n # do not cache or you'll create a memory leak\r\n return self.values.view(\"u8\")\r\n\r\n @Appender(_index_shared_docs[\"_convert_scalar_indexer\"])\r\n def _convert_scalar_indexer(self, key, kind=None):\r\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\", None]\r\n\r\n # don't coerce ilocs to integers\r\n if kind != \"iloc\":\r\n key = self._maybe_cast_indexer(key)\r\n return super()._convert_scalar_indexer(key, kind=kind)\r\n\r\n @Appender(_index_shared_docs[\"_convert_arr_indexer\"])\r\n def _convert_arr_indexer(self, keyarr):\r\n # Cast the indexer to uint64 if possible so that the values returned\r\n # from indexing are also uint64.\r\n dtype = None\r\n if is_integer_dtype(keyarr) or (\r\n lib.infer_dtype(keyarr, skipna=False) == \"integer\"\r\n ):\r\n dtype = np.uint64\r\n\r\n return com.asarray_tuplesafe(keyarr, dtype=dtype)\r\n\r\n @Appender(_index_shared_docs[\"_convert_index_indexer\"])\r\n def _convert_index_indexer(self, keyarr):\r\n # Cast the indexer to uint64 if possible so\r\n # that the values returned from indexing are\r\n # also uint64.\r\n if keyarr.is_integer():\r\n return keyarr.astype(np.uint64)\r\n return keyarr\r\n\r\n def _wrap_joined_index(self, joined, other):\r\n name = get_op_result_name(self, other)\r\n return UInt64Index(joined, name=name)\r\n\r\n @classmethod\r\n def _assert_safe_casting(cls, data, subarr):\r\n \"\"\"\r\n Ensure incoming data can be represented as uints.\r\n \"\"\"\r\n if not issubclass(data.dtype.type, np.unsignedinteger):\r\n if not np.array_equal(data, subarr):\r\n raise TypeError(\"Unsafe NumPy casting, you must explicitly cast\")\r\n\r\n def _is_compatible_with_other(self, other):\r\n return super()._is_compatible_with_other(other) or all(\r\n isinstance(type(obj), (ABCUInt64Index, ABCFloat64Index))\r\n for obj in [self, other]\r\n )\r\n\r\n\r\nUInt64Index._add_numeric_methods()\r\nUInt64Index._add_logical_methods()\r\n\r\n_float64_descr_args = dict(\r\n klass=\"Float64Index\", dtype=\"float64\", ltype=\"float\", extra=\"\"\r\n)\r\n\r\n\r\nclass Float64Index(NumericIndex):\r\n __doc__ = _num_index_shared_docs[\"class_descr\"] % _float64_descr_args\r\n\r\n _typ = \"float64index\"\r\n _engine_type = libindex.Float64Engine\r\n _default_dtype = np.float64\r\n\r\n @property\r\n def inferred_type(self) -> str:\r\n \"\"\"\r\n Always 'floating' for ``Float64Index``\r\n \"\"\"\r\n return \"floating\"\r\n\r\n @Appender(_index_shared_docs[\"astype\"])\r\n def astype(self, dtype, copy=True):\r\n dtype = pandas_dtype(dtype)\r\n if needs_i8_conversion(dtype):\r\n raise TypeError(\r\n f\"Cannot convert Float64Index to dtype {dtype}; integer \"\r\n \"values are required for conversion\"\r\n )\r\n elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype):\r\n # TODO(jreback); this can change once we have an EA Index type\r\n # GH 13149\r\n arr = astype_nansafe(self.values, dtype=dtype)\r\n return Int64Index(arr)\r\n return super().astype(dtype, copy=copy)\r\n\r\n @Appender(_index_shared_docs[\"_convert_scalar_indexer\"])\r\n def _convert_scalar_indexer(self, key, kind=None):\r\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\", None]\r\n\r\n if kind == \"iloc\":\r\n return self._validate_indexer(\"positional\", key, kind)\r\n\r\n return key\r\n\r\n @Appender(_index_shared_docs[\"_convert_slice_indexer\"])\r\n def _convert_slice_indexer(self, key, kind=None):\r\n # if we are not a slice, then we are done\r\n if not isinstance(key, slice):\r\n return key\r\n\r\n if kind == \"iloc\":\r\n return super()._convert_slice_indexer(key, kind=kind)\r\n\r\n # translate to locations\r\n return self.slice_indexer(key.start, key.stop, key.step, kind=kind)\r\n\r\n def _format_native_types(\r\n self, na_rep=\"\", float_format=None, decimal=\".\", quoting=None, **kwargs\r\n ):\r\n from pandas.io.formats.format import FloatArrayFormatter\r\n\r\n formatter = FloatArrayFormatter(\r\n self.values,\r\n na_rep=na_rep,\r\n float_format=float_format,\r\n decimal=decimal,\r\n quoting=quoting,\r\n fixed_width=False,\r\n )\r\n return formatter.get_result_as_array()\r\n\r\n def get_value(self, series, key):\r\n \"\"\"\r\n We always want to get an index value, never a value.\r\n \"\"\"\r\n if not is_scalar(key):\r\n raise InvalidIndexError\r\n\r\n k = com.values_from_object(key)\r\n loc = self.get_loc(k)\r\n new_values = com.values_from_object(series)[loc]\r\n\r\n return new_values\r\n\r\n def equals(self, other) -> bool:\r\n \"\"\"\r\n Determines if two Index objects contain the same elements.\r\n \"\"\"\r\n if self is other:\r\n return True\r\n\r\n if not isinstance(other, Index):\r\n return False\r\n\r\n # need to compare nans locations and make sure that they are the same\r\n # since nans don't compare equal this is a bit tricky\r\n try:\r\n if not isinstance(other, Float64Index):\r\n other = self._constructor(other)\r\n if not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape:\r\n return False\r\n left, right = self._ndarray_values, other._ndarray_values\r\n return ((left == right) | (self._isnan & other._isnan)).all()\r\n except (TypeError, ValueError):\r\n return False\r\n\r\n def __contains__(self, other) -> bool:\r\n if super().__contains__(other):\r\n return True\r\n\r\n try:\r\n # if other is a sequence this throws a ValueError\r\n return np.isnan(other) and self.hasnans\r\n except ValueError:\r\n try:\r\n return len(other) <= 1 and other.item() in self\r\n except AttributeError:\r\n return len(other) <= 1 and other in self\r\n except TypeError:\r\n pass\r\n except TypeError:\r\n pass\r\n\r\n return False\r\n\r\n @Appender(_index_shared_docs[\"get_loc\"])\r\n def get_loc(self, key, method=None, tolerance=None):\r\n try:\r\n if np.all(np.isnan(key)) or is_bool(key):\r\n nan_idxs = self._nan_idxs\r\n try:\r\n return nan_idxs.item()\r\n except ValueError:\r\n if not len(nan_idxs):\r\n raise KeyError(key)\r\n return nan_idxs\r\n except (TypeError, NotImplementedError):\r\n pass\r\n return super().get_loc(key, method=method, tolerance=tolerance)\r\n\r\n @cache_readonly\r\n def is_unique(self) -> bool:\r\n return super().is_unique and self._nan_idxs.size < 2\r\n\r\n @Appender(Index.isin.__doc__)\r\n def isin(self, values, level=None):\r\n if level is not None:\r\n self._validate_index_level(level)\r\n return algorithms.isin(np.array(self), values)\r\n\r\n def _is_compatible_with_other(self, other):\r\n return super()._is_compatible_with_other(other) or all(\r\n isinstance(\r\n type(obj),\r\n (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex),\r\n )\r\n for obj in [self, other]\r\n )\r\n\r\n\r\nFloat64Index._add_numeric_methods()\r\nFloat64Index._add_logical_methods_disabled()\r\n",
"from operator import le, lt\r\nimport textwrap\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._config import get_option\r\n\r\nfrom pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds\r\nfrom pandas.compat.numpy import function as nv\r\nfrom pandas.util._decorators import Appender\r\n\r\nfrom pandas.core.dtypes.cast import maybe_convert_platform\r\nfrom pandas.core.dtypes.common import (\r\n is_categorical_dtype,\r\n is_datetime64_any_dtype,\r\n is_float_dtype,\r\n is_integer_dtype,\r\n is_interval,\r\n is_interval_dtype,\r\n is_list_like,\r\n is_object_dtype,\r\n is_scalar,\r\n is_string_dtype,\r\n is_timedelta64_dtype,\r\n pandas_dtype,\r\n)\r\nfrom pandas.core.dtypes.dtypes import IntervalDtype\r\nfrom pandas.core.dtypes.generic import (\r\n ABCDatetimeIndex,\r\n ABCExtensionArray,\r\n ABCIndexClass,\r\n ABCInterval,\r\n ABCIntervalIndex,\r\n ABCPeriodIndex,\r\n ABCSeries,\r\n)\r\nfrom pandas.core.dtypes.missing import isna, notna\r\n\r\nfrom pandas.core.algorithms import take, value_counts\r\nfrom pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs\r\nfrom pandas.core.arrays.categorical import Categorical\r\nimport pandas.core.common as com\r\nfrom pandas.core.construction import array\r\nfrom pandas.core.indexers import check_array_indexer\r\nfrom pandas.core.indexes.base import ensure_index\r\n\r\n_VALID_CLOSED = {\"left\", \"right\", \"both\", \"neither\"}\r\n_interval_shared_docs = {}\r\n\r\n_shared_docs_kwargs = dict(\r\n klass=\"IntervalArray\", qualname=\"arrays.IntervalArray\", name=\"\"\r\n)\r\n\r\n\r\n_interval_shared_docs[\r\n \"class\"\r\n] = \"\"\"\r\n%(summary)s\r\n\r\n.. versionadded:: %(versionadded)s\r\n\r\nParameters\r\n----------\r\ndata : array-like (1-dimensional)\r\n Array-like containing Interval objects from which to build the\r\n %(klass)s.\r\nclosed : {'left', 'right', 'both', 'neither'}, default 'right'\r\n Whether the intervals are closed on the left-side, right-side, both or\r\n neither.\r\ndtype : dtype or None, default None\r\n If None, dtype will be inferred.\r\n\r\n .. versionadded:: 0.23.0\r\ncopy : bool, default False\r\n Copy the input data.\r\n%(name)s\\\r\nverify_integrity : bool, default True\r\n Verify that the %(klass)s is valid.\r\n\r\nAttributes\r\n----------\r\nleft\r\nright\r\nclosed\r\nmid\r\nlength\r\nis_empty\r\nis_non_overlapping_monotonic\r\n%(extra_attributes)s\\\r\n\r\nMethods\r\n-------\r\nfrom_arrays\r\nfrom_tuples\r\nfrom_breaks\r\ncontains\r\noverlaps\r\nset_closed\r\nto_tuples\r\n%(extra_methods)s\\\r\n\r\nSee Also\r\n--------\r\nIndex : The base pandas Index type.\r\nInterval : A bounded slice-like interval; the elements of an %(klass)s.\r\ninterval_range : Function to create a fixed frequency IntervalIndex.\r\ncut : Bin values into discrete Intervals.\r\nqcut : Bin values into equal-sized Intervals based on rank or sample quantiles.\r\n\r\nNotes\r\n-----\r\nSee the `user guide\r\n<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_\r\nfor more.\r\n\r\n%(examples)s\\\r\n\"\"\"\r\n\r\n\r\n@Appender(\r\n _interval_shared_docs[\"class\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n summary=\"Pandas array for interval data that are closed on the same side.\",\r\n versionadded=\"0.24.0\",\r\n name=\"\",\r\n extra_attributes=\"\",\r\n extra_methods=\"\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n Examples\r\n --------\r\n A new ``IntervalArray`` can be constructed directly from an array-like of\r\n ``Interval`` objects:\r\n\r\n >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])\r\n <IntervalArray>\r\n [(0, 1], (1, 5]]\r\n Length: 2, closed: right, dtype: interval[int64]\r\n\r\n It may also be constructed using one of the constructor\r\n methods: :meth:`IntervalArray.from_arrays`,\r\n :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.\r\n \"\"\"\r\n ),\r\n )\r\n)\r\nclass IntervalArray(IntervalMixin, ExtensionArray):\r\n ndim = 1\r\n can_hold_na = True\r\n _na_value = _fill_value = np.nan\r\n\r\n def __new__(cls, data, closed=None, dtype=None, copy=False, verify_integrity=True):\r\n\r\n if isinstance(data, ABCSeries) and is_interval_dtype(data):\r\n data = data.values\r\n\r\n if isinstance(data, (cls, ABCIntervalIndex)):\r\n left = data.left\r\n right = data.right\r\n closed = closed or data.closed\r\n else:\r\n\r\n # don't allow scalars\r\n if is_scalar(data):\r\n msg = (\r\n f\"{cls.__name__}(...) must be called with a collection \"\r\n f\"of some kind, {data} was passed\"\r\n )\r\n raise TypeError(msg)\r\n\r\n # might need to convert empty or purely na data\r\n data = maybe_convert_platform_interval(data)\r\n left, right, infer_closed = intervals_to_interval_bounds(\r\n data, validate_closed=closed is None\r\n )\r\n closed = closed or infer_closed\r\n\r\n return cls._simple_new(\r\n left,\r\n right,\r\n closed,\r\n copy=copy,\r\n dtype=dtype,\r\n verify_integrity=verify_integrity,\r\n )\r\n\r\n @classmethod\r\n def _simple_new(\r\n cls, left, right, closed=None, copy=False, dtype=None, verify_integrity=True\r\n ):\r\n result = IntervalMixin.__new__(cls)\r\n\r\n closed = closed or \"right\"\r\n left = ensure_index(left, copy=copy)\r\n right = ensure_index(right, copy=copy)\r\n\r\n if dtype is not None:\r\n # GH 19262: dtype must be an IntervalDtype to override inferred\r\n dtype = pandas_dtype(dtype)\r\n if not is_interval_dtype(dtype):\r\n msg = f\"dtype must be an IntervalDtype, got {dtype}\"\r\n raise TypeError(msg)\r\n elif dtype.subtype is not None:\r\n left = left.astype(dtype.subtype)\r\n right = right.astype(dtype.subtype)\r\n\r\n # coerce dtypes to match if needed\r\n if is_float_dtype(left) and is_integer_dtype(right):\r\n right = right.astype(left.dtype)\r\n elif is_float_dtype(right) and is_integer_dtype(left):\r\n left = left.astype(right.dtype)\r\n\r\n if type(left) != type(right):\r\n msg = (\r\n f\"must not have differing left [{type(left).__name__}] and \"\r\n f\"right [{type(right).__name__}] types\"\r\n )\r\n raise ValueError(msg)\r\n elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):\r\n # GH 19016\r\n msg = (\r\n \"category, object, and string subtypes are not supported \"\r\n \"for IntervalArray\"\r\n )\r\n raise TypeError(msg)\r\n elif isinstance(left, ABCPeriodIndex):\r\n msg = \"Period dtypes are not supported, use a PeriodIndex instead\"\r\n raise ValueError(msg)\r\n elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):\r\n msg = (\r\n \"left and right must have the same time zone, got \"\r\n f\"'{left.tz}' and '{right.tz}'\"\r\n )\r\n raise ValueError(msg)\r\n\r\n result._left = left\r\n result._right = right\r\n result._closed = closed\r\n if verify_integrity:\r\n result._validate()\r\n return result\r\n\r\n @classmethod\r\n def _from_sequence(cls, scalars, dtype=None, copy=False):\r\n return cls(scalars, dtype=dtype, copy=copy)\r\n\r\n @classmethod\r\n def _from_factorized(cls, values, original):\r\n if len(values) == 0:\r\n # An empty array returns object-dtype here. We can't create\r\n # a new IA from an (empty) object-dtype array, so turn it into the\r\n # correct dtype.\r\n values = values.astype(original.dtype.subtype)\r\n return cls(values, closed=original.closed)\r\n\r\n _interval_shared_docs[\"from_breaks\"] = textwrap.dedent(\r\n \"\"\"\r\n Construct an %(klass)s from an array of splits.\r\n\r\n Parameters\r\n ----------\r\n breaks : array-like (1-dimensional)\r\n Left and right bounds for each interval.\r\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\r\n Whether the intervals are closed on the left-side, right-side, both\r\n or neither.\r\n copy : bool, default False\r\n Copy the data.\r\n dtype : dtype or None, default None\r\n If None, dtype will be inferred.\r\n\r\n .. versionadded:: 0.23.0\r\n\r\n Returns\r\n -------\r\n %(klass)s\r\n\r\n See Also\r\n --------\r\n interval_range : Function to create a fixed frequency IntervalIndex.\r\n %(klass)s.from_arrays : Construct from a left and right array.\r\n %(klass)s.from_tuples : Construct from a sequence of tuples.\r\n\r\n %(examples)s\\\r\n \"\"\"\r\n )\r\n\r\n @classmethod\r\n @Appender(\r\n _interval_shared_docs[\"from_breaks\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n Examples\r\n --------\r\n >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])\r\n <IntervalArray>\r\n [(0, 1], (1, 2], (2, 3]]\r\n Length: 3, closed: right, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def from_breaks(cls, breaks, closed=\"right\", copy=False, dtype=None):\r\n breaks = maybe_convert_platform_interval(breaks)\r\n\r\n return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)\r\n\r\n _interval_shared_docs[\"from_arrays\"] = textwrap.dedent(\r\n \"\"\"\r\n Construct from two arrays defining the left and right bounds.\r\n\r\n Parameters\r\n ----------\r\n left : array-like (1-dimensional)\r\n Left bounds for each interval.\r\n right : array-like (1-dimensional)\r\n Right bounds for each interval.\r\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\r\n Whether the intervals are closed on the left-side, right-side, both\r\n or neither.\r\n copy : bool, default False\r\n Copy the data.\r\n dtype : dtype, optional\r\n If None, dtype will be inferred.\r\n\r\n .. versionadded:: 0.23.0\r\n\r\n Returns\r\n -------\r\n %(klass)s\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When a value is missing in only one of `left` or `right`.\r\n When a value in `left` is greater than the corresponding value\r\n in `right`.\r\n\r\n See Also\r\n --------\r\n interval_range : Function to create a fixed frequency IntervalIndex.\r\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\r\n splits.\r\n %(klass)s.from_tuples : Construct an %(klass)s from an\r\n array-like of tuples.\r\n\r\n Notes\r\n -----\r\n Each element of `left` must be less than or equal to the `right`\r\n element at the same position. If an element is missing, it must be\r\n missing in both `left` and `right`. A TypeError is raised when\r\n using an unsupported type for `left` or `right`. At the moment,\r\n 'category', 'object', and 'string' subtypes are not supported.\r\n\r\n %(examples)s\\\r\n \"\"\"\r\n )\r\n\r\n @classmethod\r\n @Appender(\r\n _interval_shared_docs[\"from_arrays\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])\r\n <IntervalArray>\r\n [(0, 1], (1, 2], (2, 3]]\r\n Length: 3, closed: right, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def from_arrays(cls, left, right, closed=\"right\", copy=False, dtype=None):\r\n left = maybe_convert_platform_interval(left)\r\n right = maybe_convert_platform_interval(right)\r\n\r\n return cls._simple_new(\r\n left, right, closed, copy=copy, dtype=dtype, verify_integrity=True\r\n )\r\n\r\n _interval_shared_docs[\"from_tuples\"] = textwrap.dedent(\r\n \"\"\"\r\n Construct an %(klass)s from an array-like of tuples.\r\n\r\n Parameters\r\n ----------\r\n data : array-like (1-dimensional)\r\n Array of tuples.\r\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\r\n Whether the intervals are closed on the left-side, right-side, both\r\n or neither.\r\n copy : bool, default False\r\n By-default copy the data, this is compat only and ignored.\r\n dtype : dtype or None, default None\r\n If None, dtype will be inferred.\r\n\r\n .. versionadded:: 0.23.0\r\n\r\n Returns\r\n -------\r\n %(klass)s\r\n\r\n See Also\r\n --------\r\n interval_range : Function to create a fixed frequency IntervalIndex.\r\n %(klass)s.from_arrays : Construct an %(klass)s from a left and\r\n right array.\r\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\r\n splits.\r\n\r\n %(examples)s\\\r\n \"\"\"\r\n )\r\n\r\n @classmethod\r\n @Appender(\r\n _interval_shared_docs[\"from_tuples\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n Examples\r\n --------\r\n >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])\r\n <IntervalArray>\r\n [(0, 1], (1, 2]]\r\n Length: 2, closed: right, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def from_tuples(cls, data, closed=\"right\", copy=False, dtype=None):\r\n if len(data):\r\n left, right = [], []\r\n else:\r\n # ensure that empty data keeps input dtype\r\n left = right = data\r\n\r\n for d in data:\r\n if isna(d):\r\n lhs = rhs = np.nan\r\n else:\r\n name = cls.__name__\r\n try:\r\n # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]\r\n lhs, rhs = d\r\n except ValueError:\r\n msg = f\"{name}.from_tuples requires tuples of length 2, got {d}\"\r\n raise ValueError(msg)\r\n except TypeError:\r\n msg = f\"{name}.from_tuples received an invalid item, {d}\"\r\n raise TypeError(msg)\r\n left.append(lhs)\r\n right.append(rhs)\r\n\r\n return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)\r\n\r\n def _validate(self):\r\n \"\"\"Verify that the IntervalArray is valid.\r\n\r\n Checks that\r\n\r\n * closed is valid\r\n * left and right match lengths\r\n * left and right have the same missing values\r\n * left is always below right\r\n \"\"\"\r\n if self.closed not in _VALID_CLOSED:\r\n msg = f\"invalid option for 'closed': {self.closed}\"\r\n raise ValueError(msg)\r\n if len(self.left) != len(self.right):\r\n msg = \"left and right must have the same length\"\r\n raise ValueError(msg)\r\n left_mask = notna(self.left)\r\n right_mask = notna(self.right)\r\n if not (left_mask == right_mask).all():\r\n msg = (\r\n \"missing values must be missing in the same \"\r\n \"location both left and right sides\"\r\n )\r\n raise ValueError(msg)\r\n if not (self.left[left_mask] <= self.right[left_mask]).all():\r\n msg = \"left side of interval must be <= right side\"\r\n raise ValueError(msg)\r\n\r\n # ---------\r\n # Interface\r\n # ---------\r\n def __iter__(self):\r\n return iter(np.asarray(self))\r\n\r\n def __len__(self) -> int:\r\n return len(self.left)\r\n\r\n def __getitem__(self, value):\r\n value = check_array_indexer(self, value)\r\n left = self.left[value]\r\n right = self.right[value]\r\n\r\n # scalar\r\n if not isinstance(left, ABCIndexClass):\r\n if is_scalar(left) and isna(left):\r\n return self._fill_value\r\n if np.ndim(left) > 1:\r\n # GH#30588 multi-dimensional indexer disallowed\r\n raise ValueError(\"multi-dimensional indexing not allowed\")\r\n return Interval(left, right, self.closed)\r\n\r\n return self._shallow_copy(left, right)\r\n\r\n def __setitem__(self, key, value):\r\n # na value: need special casing to set directly on numpy arrays\r\n needs_float_conversion = False\r\n if is_scalar(value) and isna(value):\r\n if is_integer_dtype(self.dtype.subtype):\r\n # can't set NaN on a numpy integer array\r\n needs_float_conversion = True\r\n elif is_datetime64_any_dtype(self.dtype.subtype):\r\n # need proper NaT to set directly on the numpy array\r\n value = np.datetime64(\"NaT\")\r\n elif is_timedelta64_dtype(self.dtype.subtype):\r\n # need proper NaT to set directly on the numpy array\r\n value = np.timedelta64(\"NaT\")\r\n value_left, value_right = value, value\r\n\r\n # scalar interval\r\n elif is_interval_dtype(value) or isinstance(value, ABCInterval):\r\n self._check_closed_matches(value, name=\"value\")\r\n value_left, value_right = value.left, value.right\r\n\r\n else:\r\n # list-like of intervals\r\n try:\r\n array = IntervalArray(value)\r\n value_left, value_right = array.left, array.right\r\n except TypeError:\r\n # wrong type: not interval or NA\r\n msg = f\"'value' should be an interval type, got {type(value)} instead.\"\r\n raise TypeError(msg)\r\n\r\n key = check_array_indexer(self, key)\r\n # Need to ensure that left and right are updated atomically, so we're\r\n # forced to copy, update the copy, and swap in the new values.\r\n left = self.left.copy(deep=True)\r\n if needs_float_conversion:\r\n left = left.astype(\"float\")\r\n left.values[key] = value_left\r\n self._left = left\r\n\r\n right = self.right.copy(deep=True)\r\n if needs_float_conversion:\r\n right = right.astype(\"float\")\r\n right.values[key] = value_right\r\n self._right = right\r\n\r\n def __eq__(self, other):\r\n # ensure pandas array for list-like and eliminate non-interval scalars\r\n if is_list_like(other):\r\n if len(self) != len(other):\r\n raise ValueError(\"Lengths must match to compare\")\r\n other = array(other)\r\n elif not isinstance(other, Interval):\r\n # non-interval scalar -> no matches\r\n return np.zeros(len(self), dtype=bool)\r\n\r\n # determine the dtype of the elements we want to compare\r\n if isinstance(other, Interval):\r\n other_dtype = \"interval\"\r\n elif not is_categorical_dtype(other):\r\n other_dtype = other.dtype\r\n else:\r\n # for categorical defer to categories for dtype\r\n other_dtype = other.categories.dtype\r\n\r\n # extract intervals if we have interval categories with matching closed\r\n if is_interval_dtype(other_dtype):\r\n if self.closed != other.categories.closed:\r\n return np.zeros(len(self), dtype=bool)\r\n other = other.categories.take(other.codes)\r\n\r\n # interval-like -> need same closed and matching endpoints\r\n if is_interval_dtype(other_dtype):\r\n if self.closed != other.closed:\r\n return np.zeros(len(self), dtype=bool)\r\n return (self.left == other.left) & (self.right == other.right)\r\n\r\n # non-interval/non-object dtype -> no matches\r\n if not is_object_dtype(other_dtype):\r\n return np.zeros(len(self), dtype=bool)\r\n\r\n # object dtype -> iteratively check for intervals\r\n result = np.zeros(len(self), dtype=bool)\r\n for i, obj in enumerate(other):\r\n # need object to be an Interval with same closed and endpoints\r\n if (\r\n isinstance(obj, Interval)\r\n and self.closed == obj.closed\r\n and self.left[i] == obj.left\r\n and self.right[i] == obj.right\r\n ):\r\n result[i] = True\r\n\r\n return result\r\n\r\n def __ne__(self, other):\r\n return ~self.__eq__(other)\r\n\r\n def fillna(self, value=None, method=None, limit=None):\r\n \"\"\"\r\n Fill NA/NaN values using the specified method.\r\n\r\n Parameters\r\n ----------\r\n value : scalar, dict, Series\r\n If a scalar value is passed it is used to fill all missing values.\r\n Alternatively, a Series or dict can be used to fill in different\r\n values for each index. The value should not be a list. The\r\n value(s) passed should be either Interval objects or NA/NaN.\r\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\r\n (Not implemented yet for IntervalArray)\r\n Method to use for filling holes in reindexed Series\r\n limit : int, default None\r\n (Not implemented yet for IntervalArray)\r\n If method is specified, this is the maximum number of consecutive\r\n NaN values to forward/backward fill. In other words, if there is\r\n a gap with more than this number of consecutive NaNs, it will only\r\n be partially filled. If method is not specified, this is the\r\n maximum number of entries along the entire axis where NaNs will be\r\n filled.\r\n\r\n Returns\r\n -------\r\n filled : IntervalArray with NA/NaN filled\r\n \"\"\"\r\n if method is not None:\r\n raise TypeError(\"Filling by method is not supported for IntervalArray.\")\r\n if limit is not None:\r\n raise TypeError(\"limit is not supported for IntervalArray.\")\r\n\r\n if not isinstance(value, ABCInterval):\r\n msg = (\r\n \"'IntervalArray.fillna' only supports filling with a \"\r\n f\"scalar 'pandas.Interval'. Got a '{type(value).__name__}' instead.\"\r\n )\r\n raise TypeError(msg)\r\n\r\n value = getattr(value, \"_values\", value)\r\n self._check_closed_matches(value, name=\"value\")\r\n\r\n left = self.left.fillna(value=value.left)\r\n right = self.right.fillna(value=value.right)\r\n return self._shallow_copy(left, right)\r\n\r\n @property\r\n def dtype(self):\r\n return IntervalDtype(self.left.dtype)\r\n\r\n def astype(self, dtype, copy=True):\r\n \"\"\"\r\n Cast to an ExtensionArray or NumPy array with dtype 'dtype'.\r\n\r\n Parameters\r\n ----------\r\n dtype : str or dtype\r\n Typecode or data-type to which the array is cast.\r\n\r\n copy : bool, default True\r\n Whether to copy the data, even if not necessary. If False,\r\n a copy is made only if the old dtype does not match the\r\n new dtype.\r\n\r\n Returns\r\n -------\r\n array : ExtensionArray or ndarray\r\n ExtensionArray or NumPy ndarray with 'dtype' for its dtype.\r\n \"\"\"\r\n dtype = pandas_dtype(dtype)\r\n if is_interval_dtype(dtype):\r\n if dtype == self.dtype:\r\n return self.copy() if copy else self\r\n\r\n # need to cast to different subtype\r\n try:\r\n new_left = self.left.astype(dtype.subtype)\r\n new_right = self.right.astype(dtype.subtype)\r\n except TypeError:\r\n msg = (\r\n f\"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible\"\r\n )\r\n raise TypeError(msg)\r\n return self._shallow_copy(new_left, new_right)\r\n elif is_categorical_dtype(dtype):\r\n return Categorical(np.asarray(self))\r\n # TODO: This try/except will be repeated.\r\n try:\r\n return np.asarray(self).astype(dtype, copy=copy)\r\n except (TypeError, ValueError):\r\n msg = f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\r\n raise TypeError(msg)\r\n\r\n @classmethod\r\n def _concat_same_type(cls, to_concat):\r\n \"\"\"\r\n Concatenate multiple IntervalArray\r\n\r\n Parameters\r\n ----------\r\n to_concat : sequence of IntervalArray\r\n\r\n Returns\r\n -------\r\n IntervalArray\r\n \"\"\"\r\n closed = {interval.closed for interval in to_concat}\r\n if len(closed) != 1:\r\n raise ValueError(\"Intervals must all be closed on the same side.\")\r\n closed = closed.pop()\r\n\r\n left = np.concatenate([interval.left for interval in to_concat])\r\n right = np.concatenate([interval.right for interval in to_concat])\r\n return cls._simple_new(left, right, closed=closed, copy=False)\r\n\r\n def _shallow_copy(self, left=None, right=None, closed=None):\r\n \"\"\"\r\n Return a new IntervalArray with the replacement attributes\r\n\r\n Parameters\r\n ----------\r\n left : array-like\r\n Values to be used for the left-side of the the intervals.\r\n If None, the existing left and right values will be used.\r\n\r\n right : array-like\r\n Values to be used for the right-side of the the intervals.\r\n If None and left is IntervalArray-like, the left and right\r\n of the IntervalArray-like will be used.\r\n\r\n closed : {'left', 'right', 'both', 'neither'}, optional\r\n Whether the intervals are closed on the left-side, right-side, both\r\n or neither. If None, the existing closed will be used.\r\n \"\"\"\r\n if left is None:\r\n\r\n # no values passed\r\n left, right = self.left, self.right\r\n\r\n elif right is None:\r\n\r\n # only single value passed, could be an IntervalArray\r\n # or array of Intervals\r\n if not isinstance(left, (type(self), ABCIntervalIndex)):\r\n left = type(self)(left)\r\n\r\n left, right = left.left, left.right\r\n else:\r\n\r\n # both left and right are values\r\n pass\r\n\r\n closed = closed or self.closed\r\n return self._simple_new(left, right, closed=closed, verify_integrity=False)\r\n\r\n def copy(self):\r\n \"\"\"\r\n Return a copy of the array.\r\n\r\n Returns\r\n -------\r\n IntervalArray\r\n \"\"\"\r\n left = self.left.copy(deep=True)\r\n right = self.right.copy(deep=True)\r\n closed = self.closed\r\n # TODO: Could skip verify_integrity here.\r\n return type(self).from_arrays(left, right, closed=closed)\r\n\r\n def isna(self):\r\n return isna(self.left)\r\n\r\n @property\r\n def nbytes(self) -> int:\r\n return self.left.nbytes + self.right.nbytes\r\n\r\n @property\r\n def size(self) -> int:\r\n # Avoid materializing self.values\r\n return self.left.size\r\n\r\n def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArray:\r\n if not len(self) or periods == 0:\r\n return self.copy()\r\n\r\n if isna(fill_value):\r\n fill_value = self.dtype.na_value\r\n\r\n # ExtensionArray.shift doesn't work for two reasons\r\n # 1. IntervalArray.dtype.na_value may not be correct for the dtype.\r\n # 2. IntervalArray._from_sequence only accepts NaN for missing values,\r\n # not other values like NaT\r\n\r\n empty_len = min(abs(periods), len(self))\r\n if isna(fill_value):\r\n fill_value = self.left._na_value\r\n empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))\r\n else:\r\n empty = self._from_sequence([fill_value] * empty_len)\r\n\r\n if periods > 0:\r\n a = empty\r\n b = self[:-periods]\r\n else:\r\n a = self[abs(periods) :]\r\n b = empty\r\n return self._concat_same_type([a, b])\r\n\r\n def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):\r\n \"\"\"\r\n Take elements from the IntervalArray.\r\n\r\n Parameters\r\n ----------\r\n indices : sequence of integers\r\n Indices to be taken.\r\n\r\n allow_fill : bool, default False\r\n How to handle negative values in `indices`.\r\n\r\n * False: negative values in `indices` indicate positional indices\r\n from the right (the default). This is similar to\r\n :func:`numpy.take`.\r\n\r\n * True: negative values in `indices` indicate\r\n missing values. These values are set to `fill_value`. Any other\r\n other negative values raise a ``ValueError``.\r\n\r\n fill_value : Interval or NA, optional\r\n Fill value to use for NA-indices when `allow_fill` is True.\r\n This may be ``None``, in which case the default NA value for\r\n the type, ``self.dtype.na_value``, is used.\r\n\r\n For many ExtensionArrays, there will be two representations of\r\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\r\n physical NA value. `fill_value` should be the user-facing version,\r\n and the implementation should handle translating that to the\r\n physical version for processing the take if necessary.\r\n\r\n axis : any, default None\r\n Present for compat with IntervalIndex; does nothing.\r\n\r\n Returns\r\n -------\r\n IntervalArray\r\n\r\n Raises\r\n ------\r\n IndexError\r\n When the indices are out of bounds for the array.\r\n ValueError\r\n When `indices` contains negative values other than ``-1``\r\n and `allow_fill` is True.\r\n \"\"\"\r\n nv.validate_take(tuple(), kwargs)\r\n\r\n fill_left = fill_right = fill_value\r\n if allow_fill:\r\n if fill_value is None:\r\n fill_left = fill_right = self.left._na_value\r\n elif is_interval(fill_value):\r\n self._check_closed_matches(fill_value, name=\"fill_value\")\r\n fill_left, fill_right = fill_value.left, fill_value.right\r\n elif not is_scalar(fill_value) and notna(fill_value):\r\n msg = (\r\n \"'IntervalArray.fillna' only supports filling with a \"\r\n \"'scalar pandas.Interval or NA'. \"\r\n f\"Got a '{type(fill_value).__name__}' instead.\"\r\n )\r\n raise ValueError(msg)\r\n\r\n left_take = take(\r\n self.left, indices, allow_fill=allow_fill, fill_value=fill_left\r\n )\r\n right_take = take(\r\n self.right, indices, allow_fill=allow_fill, fill_value=fill_right\r\n )\r\n\r\n return self._shallow_copy(left_take, right_take)\r\n\r\n def value_counts(self, dropna=True):\r\n \"\"\"\r\n Returns a Series containing counts of each interval.\r\n\r\n Parameters\r\n ----------\r\n dropna : bool, default True\r\n Don't include counts of NaN.\r\n\r\n Returns\r\n -------\r\n counts : Series\r\n\r\n See Also\r\n --------\r\n Series.value_counts\r\n \"\"\"\r\n # TODO: implement this is a non-naive way!\r\n return value_counts(np.asarray(self), dropna=dropna)\r\n\r\n # Formatting\r\n\r\n def _format_data(self):\r\n\r\n # TODO: integrate with categorical and make generic\r\n # name argument is unused here; just for compat with base / categorical\r\n n = len(self)\r\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\r\n\r\n formatter = str\r\n\r\n if n == 0:\r\n summary = \"[]\"\r\n elif n == 1:\r\n first = formatter(self[0])\r\n summary = f\"[{first}]\"\r\n elif n == 2:\r\n first = formatter(self[0])\r\n last = formatter(self[-1])\r\n summary = f\"[{first}, {last}]\"\r\n else:\r\n\r\n if n > max_seq_items:\r\n n = min(max_seq_items // 2, 10)\r\n head = [formatter(x) for x in self[:n]]\r\n tail = [formatter(x) for x in self[-n:]]\r\n head_str = \", \".join(head)\r\n tail_str = \", \".join(tail)\r\n summary = f\"[{head_str} ... {tail_str}]\"\r\n else:\r\n tail = [formatter(x) for x in self]\r\n tail_str = \", \".join(tail)\r\n summary = f\"[{tail_str}]\"\r\n\r\n return summary\r\n\r\n def __repr__(self) -> str:\r\n # the short repr has no trailing newline, while the truncated\r\n # repr does. So we include a newline in our template, and strip\r\n # any trailing newlines from format_object_summary\r\n data = self._format_data()\r\n class_name = f\"<{type(self).__name__}>\\n\"\r\n\r\n template = (\r\n f\"{class_name}\"\r\n f\"{data}\\n\"\r\n f\"Length: {len(self)}, closed: {self.closed}, dtype: {self.dtype}\"\r\n )\r\n return template\r\n\r\n def _format_space(self):\r\n space = \" \" * (len(type(self).__name__) + 1)\r\n return f\"\\n{space}\"\r\n\r\n @property\r\n def left(self):\r\n \"\"\"\r\n Return the left endpoints of each Interval in the IntervalArray as\r\n an Index.\r\n \"\"\"\r\n return self._left\r\n\r\n @property\r\n def right(self):\r\n \"\"\"\r\n Return the right endpoints of each Interval in the IntervalArray as\r\n an Index.\r\n \"\"\"\r\n return self._right\r\n\r\n @property\r\n def closed(self):\r\n \"\"\"\r\n Whether the intervals are closed on the left-side, right-side, both or\r\n neither.\r\n \"\"\"\r\n return self._closed\r\n\r\n _interval_shared_docs[\"set_closed\"] = textwrap.dedent(\r\n \"\"\"\r\n Return an %(klass)s identical to the current one, but closed on the\r\n specified side.\r\n\r\n .. versionadded:: 0.24.0\r\n\r\n Parameters\r\n ----------\r\n closed : {'left', 'right', 'both', 'neither'}\r\n Whether the intervals are closed on the left-side, right-side, both\r\n or neither.\r\n\r\n Returns\r\n -------\r\n new_index : %(klass)s\r\n\r\n %(examples)s\\\r\n \"\"\"\r\n )\r\n\r\n @Appender(\r\n _interval_shared_docs[\"set_closed\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n Examples\r\n --------\r\n >>> index = pd.arrays.IntervalArray.from_breaks(range(4))\r\n >>> index\r\n <IntervalArray>\r\n [(0, 1], (1, 2], (2, 3]]\r\n Length: 3, closed: right, dtype: interval[int64]\r\n >>> index.set_closed('both')\r\n <IntervalArray>\r\n [[0, 1], [1, 2], [2, 3]]\r\n Length: 3, closed: both, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def set_closed(self, closed):\r\n if closed not in _VALID_CLOSED:\r\n msg = f\"invalid option for 'closed': {closed}\"\r\n raise ValueError(msg)\r\n\r\n return self._shallow_copy(closed=closed)\r\n\r\n @property\r\n def length(self):\r\n \"\"\"\r\n Return an Index with entries denoting the length of each Interval in\r\n the IntervalArray.\r\n \"\"\"\r\n try:\r\n return self.right - self.left\r\n except TypeError:\r\n # length not defined for some types, e.g. string\r\n msg = (\r\n \"IntervalArray contains Intervals without defined length, \"\r\n \"e.g. Intervals with string endpoints\"\r\n )\r\n raise TypeError(msg)\r\n\r\n @property\r\n def mid(self):\r\n \"\"\"\r\n Return the midpoint of each Interval in the IntervalArray as an Index.\r\n \"\"\"\r\n try:\r\n return 0.5 * (self.left + self.right)\r\n except TypeError:\r\n # datetime safe version\r\n return self.left + 0.5 * self.length\r\n\r\n _interval_shared_docs[\r\n \"is_non_overlapping_monotonic\"\r\n ] = \"\"\"\r\n Return True if the %(klass)s is non-overlapping (no Intervals share\r\n points) and is either monotonic increasing or monotonic decreasing,\r\n else False.\r\n \"\"\"\r\n # https://github.com/python/mypy/issues/1362\r\n # Mypy does not support decorated properties\r\n @property # type: ignore\r\n @Appender(\r\n _interval_shared_docs[\"is_non_overlapping_monotonic\"] % _shared_docs_kwargs\r\n )\r\n def is_non_overlapping_monotonic(self):\r\n # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )\r\n # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)\r\n # we already require left <= right\r\n\r\n # strict inequality for closed == 'both'; equality implies overlapping\r\n # at a point when both sides of intervals are included\r\n if self.closed == \"both\":\r\n return bool(\r\n (self.right[:-1] < self.left[1:]).all()\r\n or (self.left[:-1] > self.right[1:]).all()\r\n )\r\n\r\n # non-strict inequality when closed != 'both'; at least one side is\r\n # not included in the intervals, so equality does not imply overlapping\r\n return bool(\r\n (self.right[:-1] <= self.left[1:]).all()\r\n or (self.left[:-1] >= self.right[1:]).all()\r\n )\r\n\r\n # Conversion\r\n def __array__(self, dtype=None) -> np.ndarray:\r\n \"\"\"\r\n Return the IntervalArray's data as a numpy array of Interval\r\n objects (with dtype='object')\r\n \"\"\"\r\n left = self.left\r\n right = self.right\r\n mask = self.isna()\r\n closed = self._closed\r\n\r\n result = np.empty(len(left), dtype=object)\r\n for i in range(len(left)):\r\n if mask[i]:\r\n result[i] = np.nan\r\n else:\r\n result[i] = Interval(left[i], right[i], closed)\r\n return result\r\n\r\n def __arrow_array__(self, type=None):\r\n \"\"\"\r\n Convert myself into a pyarrow Array.\r\n \"\"\"\r\n import pyarrow\r\n from pandas.core.arrays._arrow_utils import ArrowIntervalType\r\n\r\n try:\r\n subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)\r\n except TypeError:\r\n raise TypeError(\r\n \"Conversion to arrow with subtype '{}' \"\r\n \"is not supported\".format(self.dtype.subtype)\r\n )\r\n interval_type = ArrowIntervalType(subtype, self.closed)\r\n storage_array = pyarrow.StructArray.from_arrays(\r\n [\r\n pyarrow.array(self.left, type=subtype, from_pandas=True),\r\n pyarrow.array(self.right, type=subtype, from_pandas=True),\r\n ],\r\n names=[\"left\", \"right\"],\r\n )\r\n mask = self.isna()\r\n if mask.any():\r\n # if there are missing values, set validity bitmap also on the array level\r\n null_bitmap = pyarrow.array(~mask).buffers()[1]\r\n storage_array = pyarrow.StructArray.from_buffers(\r\n storage_array.type,\r\n len(storage_array),\r\n [null_bitmap],\r\n children=[storage_array.field(0), storage_array.field(1)],\r\n )\r\n\r\n if type is not None:\r\n if type.equals(interval_type.storage_type):\r\n return storage_array\r\n elif isinstance(type, ArrowIntervalType):\r\n # ensure we have the same subtype and closed attributes\r\n if not type.equals(interval_type):\r\n raise TypeError(\r\n \"Not supported to convert IntervalArray to type with \"\r\n \"different 'subtype' ({0} vs {1}) and 'closed' ({2} vs {3}) \"\r\n \"attributes\".format(\r\n self.dtype.subtype, type.subtype, self.closed, type.closed\r\n )\r\n )\r\n else:\r\n raise TypeError(\r\n \"Not supported to convert IntervalArray to '{0}' type\".format(type)\r\n )\r\n\r\n return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)\r\n\r\n _interval_shared_docs[\r\n \"to_tuples\"\r\n ] = \"\"\"\r\n Return an %(return_type)s of tuples of the form (left, right).\r\n\r\n Parameters\r\n ----------\r\n na_tuple : boolean, default True\r\n Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA\r\n value itself if False, ``nan``.\r\n\r\n .. versionadded:: 0.23.0\r\n\r\n Returns\r\n -------\r\n tuples: %(return_type)s\r\n %(examples)s\\\r\n \"\"\"\r\n\r\n @Appender(\r\n _interval_shared_docs[\"to_tuples\"] % dict(return_type=\"ndarray\", examples=\"\")\r\n )\r\n def to_tuples(self, na_tuple=True):\r\n tuples = com.asarray_tuplesafe(zip(self.left, self.right))\r\n if not na_tuple:\r\n # GH 18756\r\n tuples = np.where(~self.isna(), tuples, np.nan)\r\n return tuples\r\n\r\n @Appender(_extension_array_shared_docs[\"repeat\"] % _shared_docs_kwargs)\r\n def repeat(self, repeats, axis=None):\r\n nv.validate_repeat(tuple(), dict(axis=axis))\r\n left_repeat = self.left.repeat(repeats)\r\n right_repeat = self.right.repeat(repeats)\r\n return self._shallow_copy(left=left_repeat, right=right_repeat)\r\n\r\n _interval_shared_docs[\"contains\"] = textwrap.dedent(\r\n \"\"\"\r\n Check elementwise if the Intervals contain the value.\r\n\r\n Return a boolean mask whether the value is contained in the Intervals\r\n of the %(klass)s.\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Parameters\r\n ----------\r\n other : scalar\r\n The value to check whether it is contained in the Intervals.\r\n\r\n Returns\r\n -------\r\n boolean array\r\n\r\n See Also\r\n --------\r\n Interval.contains : Check whether Interval object contains value.\r\n %(klass)s.overlaps : Check if an Interval overlaps the values in the\r\n %(klass)s.\r\n\r\n Examples\r\n --------\r\n %(examples)s\r\n >>> intervals.contains(0.5)\r\n array([ True, False, False])\r\n \"\"\"\r\n )\r\n\r\n @Appender(\r\n _interval_shared_docs[\"contains\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])\r\n >>> intervals\r\n <IntervalArray>\r\n [(0, 1], (1, 3], (2, 4]]\r\n Length: 3, closed: right, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def contains(self, other):\r\n if isinstance(other, Interval):\r\n raise NotImplementedError(\"contains not implemented for two intervals\")\r\n\r\n return (self.left < other if self.open_left else self.left <= other) & (\r\n other < self.right if self.open_right else other <= self.right\r\n )\r\n\r\n _interval_shared_docs[\"overlaps\"] = textwrap.dedent(\r\n \"\"\"\r\n Check elementwise if an Interval overlaps the values in the %(klass)s.\r\n\r\n Two intervals overlap if they share a common point, including closed\r\n endpoints. Intervals that only have an open endpoint in common do not\r\n overlap.\r\n\r\n .. versionadded:: 0.24.0\r\n\r\n Parameters\r\n ----------\r\n other : %(klass)s\r\n Interval to check against for an overlap.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Boolean array positionally indicating where an overlap occurs.\r\n\r\n See Also\r\n --------\r\n Interval.overlaps : Check whether two Interval objects overlap.\r\n\r\n Examples\r\n --------\r\n %(examples)s\r\n >>> intervals.overlaps(pd.Interval(0.5, 1.5))\r\n array([ True, True, False])\r\n\r\n Intervals that share closed endpoints overlap:\r\n\r\n >>> intervals.overlaps(pd.Interval(1, 3, closed='left'))\r\n array([ True, True, True])\r\n\r\n Intervals that only have an open endpoint in common do not overlap:\r\n\r\n >>> intervals.overlaps(pd.Interval(1, 2, closed='right'))\r\n array([False, True, False])\r\n \"\"\"\r\n )\r\n\r\n @Appender(\r\n _interval_shared_docs[\"overlaps\"]\r\n % dict(\r\n klass=\"IntervalArray\",\r\n examples=textwrap.dedent(\r\n \"\"\"\\\r\n >>> data = [(0, 1), (1, 3), (2, 4)]\r\n >>> intervals = pd.arrays.IntervalArray.from_tuples(data)\r\n >>> intervals\r\n <IntervalArray>\r\n [(0, 1], (1, 3], (2, 4]]\r\n Length: 3, closed: right, dtype: interval[int64]\r\n \"\"\"\r\n ),\r\n )\r\n )\r\n def overlaps(self, other):\r\n if isinstance(other, (IntervalArray, ABCIntervalIndex)):\r\n raise NotImplementedError\r\n elif not isinstance(other, Interval):\r\n msg = f\"`other` must be Interval-like, got {type(other).__name__}\"\r\n raise TypeError(msg)\r\n\r\n # equality is okay if both endpoints are closed (overlap at a point)\r\n op1 = le if (self.closed_left and other.closed_right) else lt\r\n op2 = le if (other.closed_left and self.closed_right) else lt\r\n\r\n # overlaps is equivalent negation of two interval being disjoint:\r\n # disjoint = (A.left > B.right) or (B.left > A.right)\r\n # (simplifying the negation allows this to be done in less operations)\r\n return op1(self.left, other.right) & op2(other.left, self.right)\r\n\r\n\r\ndef maybe_convert_platform_interval(values):\r\n \"\"\"\r\n Try to do platform conversion, with special casing for IntervalArray.\r\n Wrapper around maybe_convert_platform that alters the default return\r\n dtype in certain cases to be compatible with IntervalArray. For example,\r\n empty lists return with integer dtype instead of object dtype, which is\r\n prohibited for IntervalArray.\r\n\r\n Parameters\r\n ----------\r\n values : array-like\r\n\r\n Returns\r\n -------\r\n array\r\n \"\"\"\r\n if isinstance(values, (list, tuple)) and len(values) == 0:\r\n # GH 19016\r\n # empty lists/tuples get object dtype by default, but this is\r\n # prohibited for IntervalArray, so coerce to integer instead\r\n return np.array([], dtype=np.int64)\r\n elif is_categorical_dtype(values):\r\n values = np.asarray(values)\r\n\r\n return maybe_convert_platform(values)\r\n",
"from datetime import datetime, timedelta\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.errors import UnsupportedFunctionCall\r\nimport pandas.util._test_decorators as td\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Index, Series\r\nimport pandas._testing as tm\r\nfrom pandas.core.window import Rolling\r\nfrom pandas.tests.window.common import Base\r\n\r\n\r\nclass TestRolling(Base):\r\n def setup_method(self, method):\r\n self._create_data()\r\n\r\n def test_doc_string(self):\r\n\r\n df = DataFrame({\"B\": [0, 1, 2, np.nan, 4]})\r\n df\r\n df.rolling(2).sum()\r\n df.rolling(2, min_periods=1).sum()\r\n\r\n @pytest.mark.parametrize(\"which\", [\"series\", \"frame\"])\r\n def test_constructor(self, which):\r\n # GH 12669\r\n\r\n o = getattr(self, which)\r\n c = o.rolling\r\n\r\n # valid\r\n c(window=2)\r\n c(window=2, min_periods=1)\r\n c(window=2, min_periods=1, center=True)\r\n c(window=2, min_periods=1, center=False)\r\n\r\n # GH 13383\r\n with pytest.raises(ValueError):\r\n c(0)\r\n c(-1)\r\n\r\n # not valid\r\n for w in [2.0, \"foo\", np.array([2])]:\r\n with pytest.raises(ValueError):\r\n c(window=w)\r\n with pytest.raises(ValueError):\r\n c(window=2, min_periods=w)\r\n with pytest.raises(ValueError):\r\n c(window=2, min_periods=1, center=w)\r\n\r\n @td.skip_if_no_scipy\r\n @pytest.mark.parametrize(\"which\", [\"series\", \"frame\"])\r\n def test_constructor_with_win_type(self, which):\r\n # GH 13383\r\n o = getattr(self, which)\r\n c = o.rolling\r\n with pytest.raises(ValueError):\r\n c(-1, win_type=\"boxcar\")\r\n\r\n @pytest.mark.parametrize(\"window\", [timedelta(days=3), pd.Timedelta(days=3)])\r\n def test_constructor_with_timedelta_window(self, window):\r\n # GH 15440\r\n n = 10\r\n df = DataFrame(\r\n {\"value\": np.arange(n)},\r\n index=pd.date_range(\"2015-12-24\", periods=n, freq=\"D\"),\r\n )\r\n expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))\r\n\r\n result = df.rolling(window=window).sum()\r\n expected = DataFrame(\r\n {\"value\": expected_data},\r\n index=pd.date_range(\"2015-12-24\", periods=n, freq=\"D\"),\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n expected = df.rolling(\"3D\").sum()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"window\", [timedelta(days=3), pd.Timedelta(days=3), \"3D\"])\r\n def test_constructor_timedelta_window_and_minperiods(self, window, raw):\r\n # GH 15305\r\n n = 10\r\n df = DataFrame(\r\n {\"value\": np.arange(n)},\r\n index=pd.date_range(\"2017-08-08\", periods=n, freq=\"D\"),\r\n )\r\n expected = DataFrame(\r\n {\"value\": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},\r\n index=pd.date_range(\"2017-08-08\", periods=n, freq=\"D\"),\r\n )\r\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\r\n result_roll_generic = df.rolling(window=window, min_periods=2).apply(\r\n sum, raw=raw\r\n )\r\n tm.assert_frame_equal(result_roll_sum, expected)\r\n tm.assert_frame_equal(result_roll_generic, expected)\r\n\r\n @pytest.mark.parametrize(\"method\", [\"std\", \"mean\", \"sum\", \"max\", \"min\", \"var\"])\r\n def test_numpy_compat(self, method):\r\n # see gh-12811\r\n r = Rolling(Series([2, 4, 6]), window=2)\r\n\r\n msg = \"numpy operations are not valid with window objects\"\r\n\r\n with pytest.raises(UnsupportedFunctionCall, match=msg):\r\n getattr(r, method)(1, 2, 3)\r\n with pytest.raises(UnsupportedFunctionCall, match=msg):\r\n getattr(r, method)(dtype=np.float64)\r\n\r\n def test_closed(self):\r\n df = DataFrame({\"A\": [0, 1, 2, 3, 4]})\r\n # closed only allowed for datetimelike\r\n with pytest.raises(ValueError):\r\n df.rolling(window=3, closed=\"neither\")\r\n\r\n @pytest.mark.parametrize(\"closed\", [\"neither\", \"left\"])\r\n def test_closed_empty(self, closed, arithmetic_win_operators):\r\n # GH 26005\r\n func_name = arithmetic_win_operators\r\n ser = pd.Series(\r\n data=np.arange(5), index=pd.date_range(\"2000\", periods=5, freq=\"2D\")\r\n )\r\n roll = ser.rolling(\"1D\", closed=closed)\r\n\r\n result = getattr(roll, func_name)()\r\n expected = pd.Series([np.nan] * 5, index=ser.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"func\", [\"min\", \"max\"])\r\n def test_closed_one_entry(self, func):\r\n # GH24718\r\n ser = pd.Series(data=[2], index=pd.date_range(\"2000\", periods=1))\r\n result = getattr(ser.rolling(\"10D\", closed=\"left\"), func)()\r\n tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index))\r\n\r\n @pytest.mark.parametrize(\"func\", [\"min\", \"max\"])\r\n def test_closed_one_entry_groupby(self, func):\r\n # GH24718\r\n ser = pd.DataFrame(\r\n data={\"A\": [1, 1, 2], \"B\": [3, 2, 1]},\r\n index=pd.date_range(\"2000\", periods=3),\r\n )\r\n result = getattr(\r\n ser.groupby(\"A\", sort=False)[\"B\"].rolling(\"10D\", closed=\"left\"), func\r\n )()\r\n exp_idx = pd.MultiIndex.from_arrays(\r\n arrays=[[1, 1, 2], ser.index], names=(\"A\", None)\r\n )\r\n expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name=\"B\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"input_dtype\", [\"int\", \"float\"])\r\n @pytest.mark.parametrize(\r\n \"func,closed,expected\",\r\n [\r\n (\"min\", \"right\", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\r\n (\"min\", \"both\", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\r\n (\"min\", \"neither\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\r\n (\"min\", \"left\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\r\n (\"max\", \"right\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\r\n (\"max\", \"both\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\r\n (\"max\", \"neither\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\r\n (\"max\", \"left\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\r\n ],\r\n )\r\n def test_closed_min_max_datetime(self, input_dtype, func, closed, expected):\r\n # see gh-21704\r\n ser = pd.Series(\r\n data=np.arange(10).astype(input_dtype),\r\n index=pd.date_range(\"2000\", periods=10),\r\n )\r\n\r\n result = getattr(ser.rolling(\"3D\", closed=closed), func)()\r\n expected = pd.Series(expected, index=ser.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_closed_uneven(self):\r\n # see gh-21704\r\n ser = pd.Series(data=np.arange(10), index=pd.date_range(\"2000\", periods=10))\r\n\r\n # uneven\r\n ser = ser.drop(index=ser.index[[1, 5]])\r\n result = ser.rolling(\"3D\", closed=\"left\").min()\r\n expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"func,closed,expected\",\r\n [\r\n (\"min\", \"right\", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\r\n (\"min\", \"both\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),\r\n (\"min\", \"neither\", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\r\n (\"min\", \"left\", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),\r\n (\"max\", \"right\", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),\r\n (\"max\", \"both\", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),\r\n (\"max\", \"neither\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),\r\n (\"max\", \"left\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),\r\n ],\r\n )\r\n def test_closed_min_max_minp(self, func, closed, expected):\r\n # see gh-21704\r\n ser = pd.Series(data=np.arange(10), index=pd.date_range(\"2000\", periods=10))\r\n ser[ser.index[-3:]] = np.nan\r\n result = getattr(ser.rolling(\"3D\", min_periods=2, closed=closed), func)()\r\n expected = pd.Series(expected, index=ser.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"closed,expected\",\r\n [\r\n (\"right\", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),\r\n (\"both\", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\r\n (\"neither\", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\r\n (\"left\", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),\r\n ],\r\n )\r\n def test_closed_median_quantile(self, closed, expected):\r\n # GH 26005\r\n ser = pd.Series(data=np.arange(10), index=pd.date_range(\"2000\", periods=10))\r\n roll = ser.rolling(\"3D\", closed=closed)\r\n expected = pd.Series(expected, index=ser.index)\r\n\r\n result = roll.median()\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = roll.quantile(0.5)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"roller\", [\"1s\", 1])\r\n def tests_empty_df_rolling(self, roller):\r\n # GH 15819 Verifies that datetime and integer rolling windows can be\r\n # applied to empty DataFrames\r\n expected = DataFrame()\r\n result = DataFrame().rolling(roller).sum()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Verifies that datetime and integer rolling windows can be applied to\r\n # empty DataFrames with datetime index\r\n expected = DataFrame(index=pd.DatetimeIndex([]))\r\n result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_empty_window_median_quantile(self):\r\n # GH 26005\r\n expected = pd.Series([np.nan, np.nan, np.nan])\r\n roll = pd.Series(np.arange(3)).rolling(0)\r\n\r\n result = roll.median()\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = roll.quantile(0.1)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_missing_minp_zero(self):\r\n # https://github.com/pandas-dev/pandas/pull/18921\r\n # minp=0\r\n x = pd.Series([np.nan])\r\n result = x.rolling(1, min_periods=0).sum()\r\n expected = pd.Series([0.0])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # minp=1\r\n result = x.rolling(1, min_periods=1).sum()\r\n expected = pd.Series([np.nan])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_missing_minp_zero_variable(self):\r\n # https://github.com/pandas-dev/pandas/pull/18921\r\n x = pd.Series(\r\n [np.nan] * 4,\r\n index=pd.DatetimeIndex(\r\n [\"2017-01-01\", \"2017-01-04\", \"2017-01-06\", \"2017-01-07\"]\r\n ),\r\n )\r\n result = x.rolling(pd.Timedelta(\"2d\"), min_periods=0).sum()\r\n expected = pd.Series(0.0, index=x.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_multi_index_names(self):\r\n\r\n # GH 16789, 16825\r\n cols = pd.MultiIndex.from_product(\r\n [[\"A\", \"B\"], [\"C\", \"D\", \"E\"]], names=[\"1\", \"2\"]\r\n )\r\n df = DataFrame(np.ones((10, 6)), columns=cols)\r\n result = df.rolling(3).cov()\r\n\r\n tm.assert_index_equal(result.columns, df.columns)\r\n assert result.index.names == [None, \"1\", \"2\"]\r\n\r\n @pytest.mark.parametrize(\"klass\", [pd.Series, pd.DataFrame])\r\n def test_iter_raises(self, klass):\r\n # https://github.com/pandas-dev/pandas/issues/11704\r\n # Iteration over a Window\r\n obj = klass([1, 2, 3, 4])\r\n with pytest.raises(NotImplementedError):\r\n iter(obj.rolling(2))\r\n\r\n def test_rolling_axis_sum(self, axis_frame):\r\n # see gh-23372.\r\n df = DataFrame(np.ones((10, 20)))\r\n axis = df._get_axis_number(axis_frame)\r\n\r\n if axis == 0:\r\n expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})\r\n else:\r\n # axis == 1\r\n expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)\r\n\r\n result = df.rolling(3, axis=axis_frame).sum()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_rolling_axis_count(self, axis_frame):\r\n # see gh-26055\r\n df = DataFrame({\"x\": range(3), \"y\": range(3)})\r\n\r\n axis = df._get_axis_number(axis_frame)\r\n\r\n if axis in [0, \"index\"]:\r\n expected = DataFrame({\"x\": [1.0, 2.0, 2.0], \"y\": [1.0, 2.0, 2.0]})\r\n else:\r\n expected = DataFrame({\"x\": [1.0, 1.0, 1.0], \"y\": [2.0, 2.0, 2.0]})\r\n\r\n result = df.rolling(2, axis=axis_frame, min_periods=0).count()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_readonly_array(self):\r\n # GH-27766\r\n arr = np.array([1, 3, np.nan, 3, 5])\r\n arr.setflags(write=False)\r\n result = pd.Series(arr).rolling(2).mean()\r\n expected = pd.Series([np.nan, 2, np.nan, np.nan, 4])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_rolling_datetime(self, axis_frame, tz_naive_fixture):\r\n # GH-28192\r\n tz = tz_naive_fixture\r\n df = pd.DataFrame(\r\n {\r\n i: [1] * 2\r\n for i in pd.date_range(\"2019-8-01\", \"2019-08-03\", freq=\"D\", tz=tz)\r\n }\r\n )\r\n if axis_frame in [0, \"index\"]:\r\n result = df.T.rolling(\"2D\", axis=axis_frame).sum().T\r\n else:\r\n result = df.rolling(\"2D\", axis=axis_frame).sum()\r\n expected = pd.DataFrame(\r\n {\r\n **{\r\n i: [1.0] * 2\r\n for i in pd.date_range(\"2019-8-01\", periods=1, freq=\"D\", tz=tz)\r\n },\r\n **{\r\n i: [2.0] * 2\r\n for i in pd.date_range(\"2019-8-02\", \"2019-8-03\", freq=\"D\", tz=tz)\r\n },\r\n }\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_rolling_window_as_string():\r\n # see gh-22590\r\n date_today = datetime.now()\r\n days = pd.date_range(date_today, date_today + timedelta(365), freq=\"D\")\r\n\r\n npr = np.random.RandomState(seed=421)\r\n\r\n data = npr.randint(1, high=100, size=len(days))\r\n df = DataFrame({\"DateCol\": days, \"metric\": data})\r\n\r\n df.set_index(\"DateCol\", inplace=True)\r\n result = df.rolling(window=\"21D\", min_periods=2, closed=\"left\")[\"metric\"].agg(\"max\")\r\n\r\n expData = (\r\n [np.nan] * 2\r\n + [88.0] * 16\r\n + [97.0] * 9\r\n + [98.0]\r\n + [99.0] * 21\r\n + [95.0] * 16\r\n + [93.0] * 5\r\n + [89.0] * 5\r\n + [96.0] * 21\r\n + [94.0] * 14\r\n + [90.0] * 13\r\n + [88.0] * 2\r\n + [90.0] * 9\r\n + [96.0] * 21\r\n + [95.0] * 6\r\n + [91.0]\r\n + [87.0] * 6\r\n + [92.0] * 21\r\n + [83.0] * 2\r\n + [86.0] * 10\r\n + [87.0] * 5\r\n + [98.0] * 21\r\n + [97.0] * 14\r\n + [93.0] * 7\r\n + [87.0] * 4\r\n + [86.0] * 4\r\n + [95.0] * 21\r\n + [85.0] * 14\r\n + [83.0] * 2\r\n + [76.0] * 5\r\n + [81.0] * 2\r\n + [98.0] * 21\r\n + [95.0] * 14\r\n + [91.0] * 7\r\n + [86.0]\r\n + [93.0] * 3\r\n + [95.0] * 20\r\n )\r\n\r\n expected = Series(expData, index=Index(days, name=\"DateCol\"), name=\"metric\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_min_periods1():\r\n # GH#6795\r\n df = pd.DataFrame([0, 1, 2, 1, 0], columns=[\"a\"])\r\n result = df[\"a\"].rolling(3, center=True, min_periods=1).max()\r\n expected = pd.Series([1.0, 2.0, 2.0, 2.0, 1.0], name=\"a\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\"constructor\", [Series, DataFrame])\r\ndef test_rolling_count_with_min_periods(constructor):\r\n # GH 26996\r\n result = constructor(range(5)).rolling(3, min_periods=3).count()\r\n expected = constructor([np.nan, np.nan, 3.0, 3.0, 3.0])\r\n tm.assert_equal(result, expected)\r\n\r\n\r\[email protected](\"constructor\", [Series, DataFrame])\r\ndef test_rolling_count_default_min_periods_with_null_values(constructor):\r\n # GH 26996\r\n values = [1, 2, 3, np.nan, 4, 5, 6]\r\n expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]\r\n\r\n result = constructor(values).rolling(3).count()\r\n expected = constructor(expected_counts)\r\n tm.assert_equal(result, expected)\r\n",
"\"\"\"\r\nExpressions\r\n-----------\r\n\r\nOffer fast expression evaluation through numexpr\r\n\r\n\"\"\"\r\n\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._config import get_option\r\n\r\nfrom pandas._libs.lib import values_from_object\r\n\r\nfrom pandas.core.dtypes.generic import ABCDataFrame\r\n\r\nfrom pandas.core.computation.check import _NUMEXPR_INSTALLED\r\n\r\nif _NUMEXPR_INSTALLED:\r\n import numexpr as ne\r\n\r\n_TEST_MODE = None\r\n_TEST_RESULT = None\r\n_USE_NUMEXPR = _NUMEXPR_INSTALLED\r\n_evaluate = None\r\n_where = None\r\n\r\n# the set of dtypes that we will allow pass to numexpr\r\n_ALLOWED_DTYPES = {\r\n \"evaluate\": {\"int64\", \"int32\", \"float64\", \"float32\", \"bool\"},\r\n \"where\": {\"int64\", \"float64\", \"bool\"},\r\n}\r\n\r\n# the minimum prod shape that we will use numexpr\r\n_MIN_ELEMENTS = 10000\r\n\r\n\r\ndef set_use_numexpr(v=True):\r\n # set/unset to use numexpr\r\n global _USE_NUMEXPR\r\n if _NUMEXPR_INSTALLED:\r\n _USE_NUMEXPR = v\r\n\r\n # choose what we are going to do\r\n global _evaluate, _where\r\n if not _USE_NUMEXPR:\r\n _evaluate = _evaluate_standard\r\n _where = _where_standard\r\n else:\r\n _evaluate = _evaluate_numexpr\r\n _where = _where_numexpr\r\n\r\n\r\ndef set_numexpr_threads(n=None):\r\n # if we are using numexpr, set the threads to n\r\n # otherwise reset\r\n if _NUMEXPR_INSTALLED and _USE_NUMEXPR:\r\n if n is None:\r\n n = ne.detect_number_of_cores()\r\n ne.set_num_threads(n)\r\n\r\n\r\ndef _evaluate_standard(op, op_str, a, b):\r\n \"\"\" standard evaluation \"\"\"\r\n if _TEST_MODE:\r\n _store_test_result(False)\r\n with np.errstate(all=\"ignore\"):\r\n return op(a, b)\r\n\r\n\r\ndef _can_use_numexpr(op, op_str, a, b, dtype_check):\r\n \"\"\" return a boolean if we WILL be using numexpr \"\"\"\r\n if op_str is not None:\r\n\r\n # required min elements (otherwise we are adding overhead)\r\n if np.prod(a.shape) > _MIN_ELEMENTS:\r\n # check for dtype compatibility\r\n dtypes = set()\r\n for o in [a, b]:\r\n # Series implements dtypes, check for dimension count as well\r\n if hasattr(o, \"dtypes\") and o.ndim > 1:\r\n s = o.dtypes.value_counts()\r\n if len(s) > 1:\r\n return False\r\n dtypes |= set(s.index.astype(str))\r\n # ndarray and Series Case\r\n elif hasattr(o, \"dtype\"):\r\n dtypes |= {o.dtype.name}\r\n\r\n # allowed are a superset\r\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef _evaluate_numexpr(op, op_str, a, b):\r\n result = None\r\n\r\n if _can_use_numexpr(op, op_str, a, b, \"evaluate\"):\r\n is_reversed = op.__name__.strip(\"_\").startswith(\"r\")\r\n if is_reversed:\r\n # we were originally called by a reversed op method\r\n a, b = b, a\r\n\r\n a_value = getattr(a, \"values\", a)\r\n b_value = getattr(b, \"values\", b)\r\n\r\n result = ne.evaluate(\r\n f\"a_value {op_str} b_value\",\r\n local_dict={\"a_value\": a_value, \"b_value\": b_value},\r\n casting=\"safe\",\r\n )\r\n\r\n if _TEST_MODE:\r\n _store_test_result(result is not None)\r\n\r\n if result is None:\r\n result = _evaluate_standard(op, op_str, a, b)\r\n\r\n return result\r\n\r\n\r\ndef _where_standard(cond, a, b):\r\n return np.where(\r\n values_from_object(cond), values_from_object(a), values_from_object(b)\r\n )\r\n\r\n\r\ndef _where_numexpr(cond, a, b):\r\n result = None\r\n\r\n if _can_use_numexpr(None, \"where\", a, b, \"where\"):\r\n cond_value = getattr(cond, \"values\", cond)\r\n a_value = getattr(a, \"values\", a)\r\n b_value = getattr(b, \"values\", b)\r\n\r\n result = ne.evaluate(\r\n \"where(cond_value, a_value, b_value)\",\r\n local_dict={\r\n \"cond_value\": cond_value,\r\n \"a_value\": a_value,\r\n \"b_value\": b_value,\r\n },\r\n casting=\"safe\",\r\n )\r\n\r\n if result is None:\r\n result = _where_standard(cond, a, b)\r\n\r\n return result\r\n\r\n\r\n# turn myself on\r\nset_use_numexpr(get_option(\"compute.use_numexpr\"))\r\n\r\n\r\ndef _has_bool_dtype(x):\r\n if isinstance(x, ABCDataFrame):\r\n return \"bool\" in x.dtypes\r\n try:\r\n return x.dtype == bool\r\n except AttributeError:\r\n return isinstance(x, (bool, np.bool_))\r\n\r\n\r\ndef _bool_arith_check(\r\n op_str, a, b, not_allowed=frozenset((\"/\", \"//\", \"**\")), unsupported=None\r\n):\r\n if unsupported is None:\r\n unsupported = {\"+\": \"|\", \"*\": \"&\", \"-\": \"^\"}\r\n\r\n if _has_bool_dtype(a) and _has_bool_dtype(b):\r\n if op_str in unsupported:\r\n warnings.warn(\r\n f\"evaluating in Python space because the {repr(op_str)} \"\r\n f\"operator is not supported by numexpr for \"\r\n f\"the bool dtype, use {repr(unsupported[op_str])} instead\"\r\n )\r\n return False\r\n\r\n if op_str in not_allowed:\r\n raise NotImplementedError(\r\n f\"operator {repr(op_str)} not implemented for bool dtypes\"\r\n )\r\n return True\r\n\r\n\r\ndef evaluate(op, op_str, a, b, use_numexpr=True):\r\n \"\"\"\r\n Evaluate and return the expression of the op on a and b.\r\n\r\n Parameters\r\n ----------\r\n op : the actual operand\r\n op_str : str\r\n The string version of the op.\r\n a : left operand\r\n b : right operand\r\n use_numexpr : bool, default True\r\n Whether to try to use numexpr.\r\n \"\"\"\r\n\r\n use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)\r\n if use_numexpr:\r\n return _evaluate(op, op_str, a, b)\r\n return _evaluate_standard(op, op_str, a, b)\r\n\r\n\r\ndef where(cond, a, b, use_numexpr=True):\r\n \"\"\"\r\n Evaluate the where condition cond on a and b.\r\n\r\n Parameters\r\n ----------\r\n cond : np.ndarray[bool]\r\n a : return if cond is True\r\n b : return if cond is False\r\n use_numexpr : bool, default True\r\n Whether to try to use numexpr.\r\n \"\"\"\r\n\r\n if use_numexpr:\r\n return _where(cond, a, b)\r\n return _where_standard(cond, a, b)\r\n\r\n\r\ndef set_test_mode(v=True):\r\n \"\"\"\r\n Keeps track of whether numexpr was used. Stores an additional ``True``\r\n for every successful use of evaluate with numexpr since the last\r\n ``get_test_result``\r\n \"\"\"\r\n global _TEST_MODE, _TEST_RESULT\r\n _TEST_MODE = v\r\n _TEST_RESULT = []\r\n\r\n\r\ndef _store_test_result(used_numexpr):\r\n global _TEST_RESULT\r\n if used_numexpr:\r\n _TEST_RESULT.append(used_numexpr)\r\n\r\n\r\ndef get_test_result():\r\n \"\"\"get test result and reset test_results\"\"\"\r\n global _TEST_RESULT\r\n res = _TEST_RESULT\r\n _TEST_RESULT = []\r\n return res\r\n",
"from datetime import datetime, timedelta\r\nfrom functools import partial\r\nfrom io import StringIO\r\n\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nfrom pandas.errors import UnsupportedFunctionCall\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna\r\nimport pandas._testing as tm\r\nfrom pandas.core.groupby.grouper import Grouper\r\nfrom pandas.core.indexes.datetimes import date_range\r\nfrom pandas.core.indexes.period import Period, period_range\r\nfrom pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges\r\n\r\nimport pandas.tseries.offsets as offsets\r\nfrom pandas.tseries.offsets import BDay, Minute\r\n\r\n\r\[email protected]()\r\ndef _index_factory():\r\n return date_range\r\n\r\n\r\[email protected]\r\ndef _index_freq():\r\n return \"Min\"\r\n\r\n\r\[email protected]\r\ndef _static_values(index):\r\n return np.random.rand(len(index))\r\n\r\n\r\ndef test_custom_grouper(index):\r\n\r\n dti = index\r\n s = Series(np.array([1] * len(dti)), index=dti, dtype=\"int64\")\r\n\r\n b = Grouper(freq=Minute(5))\r\n g = s.groupby(b)\r\n\r\n # check all cython functions work\r\n funcs = [\"add\", \"mean\", \"prod\", \"ohlc\", \"min\", \"max\", \"var\"]\r\n for f in funcs:\r\n g._cython_agg_general(f)\r\n\r\n b = Grouper(freq=Minute(5), closed=\"right\", label=\"right\")\r\n g = s.groupby(b)\r\n # check all cython functions work\r\n funcs = [\"add\", \"mean\", \"prod\", \"ohlc\", \"min\", \"max\", \"var\"]\r\n for f in funcs:\r\n g._cython_agg_general(f)\r\n\r\n assert g.ngroups == 2593\r\n assert notna(g.mean()).all()\r\n\r\n # construct expected val\r\n arr = [1] + [5] * 2592\r\n idx = dti[0:-1:5]\r\n idx = idx.append(dti[-1:])\r\n expect = Series(arr, index=idx)\r\n\r\n # GH2763 - return in put dtype if we can\r\n result = g.agg(np.sum)\r\n tm.assert_series_equal(result, expect)\r\n\r\n df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype=\"float64\")\r\n r = df.groupby(b).agg(np.sum)\r\n\r\n assert len(r.columns) == 10\r\n assert len(r.index) == 2593\r\n\r\n\r\[email protected](\r\n \"_index_start,_index_end,_index_name\",\r\n [(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", \"index\")],\r\n)\r\[email protected](\r\n \"closed, expected\",\r\n [\r\n (\r\n \"right\",\r\n lambda s: Series(\r\n [s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],\r\n index=date_range(\"1/1/2000\", periods=4, freq=\"5min\", name=\"index\"),\r\n ),\r\n ),\r\n (\r\n \"left\",\r\n lambda s: Series(\r\n [s[:5].mean(), s[5:10].mean(), s[10:].mean()],\r\n index=date_range(\r\n \"1/1/2000 00:05\", periods=3, freq=\"5min\", name=\"index\"\r\n ),\r\n ),\r\n ),\r\n ],\r\n)\r\ndef test_resample_basic(series, closed, expected):\r\n s = series\r\n expected = expected(s)\r\n result = s.resample(\"5min\", closed=closed, label=\"right\").mean()\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_integerarray():\r\n # GH 25580, resample on IntegerArray\r\n ts = pd.Series(\r\n range(9), index=pd.date_range(\"1/1/2000\", periods=9, freq=\"T\"), dtype=\"Int64\"\r\n )\r\n result = ts.resample(\"3T\").sum()\r\n expected = Series(\r\n [3, 12, 21],\r\n index=pd.date_range(\"1/1/2000\", periods=3, freq=\"3T\"),\r\n dtype=\"Int64\",\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ts.resample(\"3T\").mean()\r\n expected = Series(\r\n [1, 4, 7],\r\n index=pd.date_range(\"1/1/2000\", periods=3, freq=\"3T\"),\r\n dtype=\"float64\",\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_basic_grouper(series):\r\n s = series\r\n result = s.resample(\"5Min\").last()\r\n grouper = Grouper(freq=Minute(5), closed=\"left\", label=\"left\")\r\n expected = s.groupby(grouper).agg(lambda x: x[-1])\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"_index_start,_index_end,_index_name\",\r\n [(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", \"index\")],\r\n)\r\[email protected](\r\n \"keyword,value\",\r\n [(\"label\", \"righttt\"), (\"closed\", \"righttt\"), (\"convention\", \"starttt\")],\r\n)\r\ndef test_resample_string_kwargs(series, keyword, value):\r\n # see gh-19303\r\n # Check that wrong keyword argument strings raise an error\r\n msg = f\"Unsupported value {value} for `{keyword}`\"\r\n with pytest.raises(ValueError, match=msg):\r\n series.resample(\"5min\", **({keyword: value}))\r\n\r\n\r\[email protected](\r\n \"_index_start,_index_end,_index_name\",\r\n [(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", \"index\")],\r\n)\r\ndef test_resample_how(series, downsample_method):\r\n if downsample_method == \"ohlc\":\r\n pytest.skip(\"covered by test_resample_how_ohlc\")\r\n\r\n s = series\r\n grouplist = np.ones_like(s)\r\n grouplist[0] = 0\r\n grouplist[1:6] = 1\r\n grouplist[6:11] = 2\r\n grouplist[11:] = 3\r\n expected = s.groupby(grouplist).agg(downsample_method)\r\n expected.index = date_range(\"1/1/2000\", periods=4, freq=\"5min\", name=\"index\")\r\n\r\n result = getattr(\r\n s.resample(\"5min\", closed=\"right\", label=\"right\"), downsample_method\r\n )()\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"_index_start,_index_end,_index_name\",\r\n [(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", \"index\")],\r\n)\r\ndef test_resample_how_ohlc(series):\r\n s = series\r\n grouplist = np.ones_like(s)\r\n grouplist[0] = 0\r\n grouplist[1:6] = 1\r\n grouplist[6:11] = 2\r\n grouplist[11:] = 3\r\n\r\n def _ohlc(group):\r\n if isna(group).all():\r\n return np.repeat(np.nan, 4)\r\n return [group[0], group.max(), group.min(), group[-1]]\r\n\r\n expected = DataFrame(\r\n s.groupby(grouplist).agg(_ohlc).values.tolist(),\r\n index=date_range(\"1/1/2000\", periods=4, freq=\"5min\", name=\"index\"),\r\n columns=[\"open\", \"high\", \"low\", \"close\"],\r\n )\r\n\r\n result = s.resample(\"5min\", closed=\"right\", label=\"right\").ohlc()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\"func\", [\"min\", \"max\", \"sum\", \"prod\", \"mean\", \"var\", \"std\"])\r\ndef test_numpy_compat(func):\r\n # see gh-12811\r\n s = Series([1, 2, 3, 4, 5], index=date_range(\"20130101\", periods=5, freq=\"s\"))\r\n r = s.resample(\"2s\")\r\n\r\n msg = \"numpy operations are not valid with resample\"\r\n\r\n with pytest.raises(UnsupportedFunctionCall, match=msg):\r\n getattr(r, func)(func, 1, 2, 3)\r\n with pytest.raises(UnsupportedFunctionCall, match=msg):\r\n getattr(r, func)(axis=1)\r\n\r\n\r\ndef test_resample_how_callables():\r\n # GH#7929\r\n data = np.arange(5, dtype=np.int64)\r\n ind = date_range(start=\"2014-01-01\", periods=len(data), freq=\"d\")\r\n df = DataFrame({\"A\": data, \"B\": data}, index=ind)\r\n\r\n def fn(x, a=1):\r\n return str(type(x))\r\n\r\n class FnClass:\r\n def __call__(self, x):\r\n return str(type(x))\r\n\r\n df_standard = df.resample(\"M\").apply(fn)\r\n df_lambda = df.resample(\"M\").apply(lambda x: str(type(x)))\r\n df_partial = df.resample(\"M\").apply(partial(fn))\r\n df_partial2 = df.resample(\"M\").apply(partial(fn, a=2))\r\n df_class = df.resample(\"M\").apply(FnClass())\r\n\r\n tm.assert_frame_equal(df_standard, df_lambda)\r\n tm.assert_frame_equal(df_standard, df_partial)\r\n tm.assert_frame_equal(df_standard, df_partial2)\r\n tm.assert_frame_equal(df_standard, df_class)\r\n\r\n\r\ndef test_resample_rounding():\r\n # GH 8371\r\n # odd results when rounding is needed\r\n\r\n data = \"\"\"date,time,value\r\n11-08-2014,00:00:01.093,1\r\n11-08-2014,00:00:02.159,1\r\n11-08-2014,00:00:02.667,1\r\n11-08-2014,00:00:03.175,1\r\n11-08-2014,00:00:07.058,1\r\n11-08-2014,00:00:07.362,1\r\n11-08-2014,00:00:08.324,1\r\n11-08-2014,00:00:08.830,1\r\n11-08-2014,00:00:08.982,1\r\n11-08-2014,00:00:09.815,1\r\n11-08-2014,00:00:10.540,1\r\n11-08-2014,00:00:11.061,1\r\n11-08-2014,00:00:11.617,1\r\n11-08-2014,00:00:13.607,1\r\n11-08-2014,00:00:14.535,1\r\n11-08-2014,00:00:15.525,1\r\n11-08-2014,00:00:17.960,1\r\n11-08-2014,00:00:20.674,1\r\n11-08-2014,00:00:21.191,1\"\"\"\r\n\r\n df = pd.read_csv(\r\n StringIO(data),\r\n parse_dates={\"timestamp\": [\"date\", \"time\"]},\r\n index_col=\"timestamp\",\r\n )\r\n df.index.name = None\r\n result = df.resample(\"6s\").sum()\r\n expected = DataFrame(\r\n {\"value\": [4, 9, 4, 2]}, index=date_range(\"2014-11-08\", freq=\"6s\", periods=4)\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"7s\").sum()\r\n expected = DataFrame(\r\n {\"value\": [4, 10, 4, 1]}, index=date_range(\"2014-11-08\", freq=\"7s\", periods=4)\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"11s\").sum()\r\n expected = DataFrame(\r\n {\"value\": [11, 8]}, index=date_range(\"2014-11-08\", freq=\"11s\", periods=2)\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"13s\").sum()\r\n expected = DataFrame(\r\n {\"value\": [13, 6]}, index=date_range(\"2014-11-08\", freq=\"13s\", periods=2)\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"17s\").sum()\r\n expected = DataFrame(\r\n {\"value\": [16, 3]}, index=date_range(\"2014-11-08\", freq=\"17s\", periods=2)\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_basic_from_daily():\r\n # from daily\r\n dti = date_range(\r\n start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq=\"D\", name=\"index\"\r\n )\r\n\r\n s = Series(np.random.rand(len(dti)), dti)\r\n\r\n # to weekly\r\n result = s.resample(\"w-sun\").last()\r\n\r\n assert len(result) == 3\r\n assert (result.index.dayofweek == [6, 6, 6]).all()\r\n assert result.iloc[0] == s[\"1/2/2005\"]\r\n assert result.iloc[1] == s[\"1/9/2005\"]\r\n assert result.iloc[2] == s.iloc[-1]\r\n\r\n result = s.resample(\"W-MON\").last()\r\n assert len(result) == 2\r\n assert (result.index.dayofweek == [0, 0]).all()\r\n assert result.iloc[0] == s[\"1/3/2005\"]\r\n assert result.iloc[1] == s[\"1/10/2005\"]\r\n\r\n result = s.resample(\"W-TUE\").last()\r\n assert len(result) == 2\r\n assert (result.index.dayofweek == [1, 1]).all()\r\n assert result.iloc[0] == s[\"1/4/2005\"]\r\n assert result.iloc[1] == s[\"1/10/2005\"]\r\n\r\n result = s.resample(\"W-WED\").last()\r\n assert len(result) == 2\r\n assert (result.index.dayofweek == [2, 2]).all()\r\n assert result.iloc[0] == s[\"1/5/2005\"]\r\n assert result.iloc[1] == s[\"1/10/2005\"]\r\n\r\n result = s.resample(\"W-THU\").last()\r\n assert len(result) == 2\r\n assert (result.index.dayofweek == [3, 3]).all()\r\n assert result.iloc[0] == s[\"1/6/2005\"]\r\n assert result.iloc[1] == s[\"1/10/2005\"]\r\n\r\n result = s.resample(\"W-FRI\").last()\r\n assert len(result) == 2\r\n assert (result.index.dayofweek == [4, 4]).all()\r\n assert result.iloc[0] == s[\"1/7/2005\"]\r\n assert result.iloc[1] == s[\"1/10/2005\"]\r\n\r\n # to biz day\r\n result = s.resample(\"B\").last()\r\n assert len(result) == 7\r\n assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()\r\n\r\n assert result.iloc[0] == s[\"1/2/2005\"]\r\n assert result.iloc[1] == s[\"1/3/2005\"]\r\n assert result.iloc[5] == s[\"1/9/2005\"]\r\n assert result.index.name == \"index\"\r\n\r\n\r\ndef test_resample_upsampling_picked_but_not_correct():\r\n\r\n # Test for issue #3020\r\n dates = date_range(\"01-Jan-2014\", \"05-Jan-2014\", freq=\"D\")\r\n series = Series(1, index=dates)\r\n\r\n result = series.resample(\"D\").mean()\r\n assert result.index[0] == dates[0]\r\n\r\n # GH 5955\r\n # incorrect deciding to upsample when the axis frequency matches the\r\n # resample frequency\r\n\r\n s = Series(\r\n np.arange(1.0, 6), index=[datetime(1975, 1, i, 12, 0) for i in range(1, 6)]\r\n )\r\n expected = Series(\r\n np.arange(1.0, 6), index=date_range(\"19750101\", periods=5, freq=\"D\")\r\n )\r\n\r\n result = s.resample(\"D\").count()\r\n tm.assert_series_equal(result, Series(1, index=expected.index))\r\n\r\n result1 = s.resample(\"D\").sum()\r\n result2 = s.resample(\"D\").mean()\r\n tm.assert_series_equal(result1, expected)\r\n tm.assert_series_equal(result2, expected)\r\n\r\n\r\ndef test_resample_frame_basic():\r\n df = tm.makeTimeDataFrame()\r\n\r\n b = Grouper(freq=\"M\")\r\n g = df.groupby(b)\r\n\r\n # check all cython functions work\r\n funcs = [\"add\", \"mean\", \"prod\", \"min\", \"max\", \"var\"]\r\n for f in funcs:\r\n g._cython_agg_general(f)\r\n\r\n result = df.resample(\"A\").mean()\r\n tm.assert_series_equal(result[\"A\"], df[\"A\"].resample(\"A\").mean())\r\n\r\n result = df.resample(\"M\").mean()\r\n tm.assert_series_equal(result[\"A\"], df[\"A\"].resample(\"M\").mean())\r\n\r\n df.resample(\"M\", kind=\"period\").mean()\r\n df.resample(\"W-WED\", kind=\"period\").mean()\r\n\r\n\r\[email protected](\r\n \"loffset\", [timedelta(minutes=1), \"1min\", Minute(1), np.timedelta64(1, \"m\")]\r\n)\r\ndef test_resample_loffset(loffset):\r\n # GH 7687\r\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", freq=\"min\")\r\n s = Series(np.random.randn(14), index=rng)\r\n\r\n result = s.resample(\"5min\", closed=\"right\", label=\"right\", loffset=loffset).mean()\r\n idx = date_range(\"1/1/2000\", periods=4, freq=\"5min\")\r\n expected = Series(\r\n [s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],\r\n index=idx + timedelta(minutes=1),\r\n )\r\n tm.assert_series_equal(result, expected)\r\n assert result.index.freq == Minute(5)\r\n\r\n # from daily\r\n dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq=\"D\")\r\n ser = Series(np.random.rand(len(dti)), dti)\r\n\r\n # to weekly\r\n result = ser.resample(\"w-sun\").last()\r\n business_day_offset = BDay()\r\n expected = ser.resample(\"w-sun\", loffset=-business_day_offset).last()\r\n assert result.index[0] - business_day_offset == expected.index[0]\r\n\r\n\r\ndef test_resample_loffset_upsample():\r\n # GH 20744\r\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 00:13:00\", freq=\"min\")\r\n s = Series(np.random.randn(14), index=rng)\r\n\r\n result = s.resample(\r\n \"5min\", closed=\"right\", label=\"right\", loffset=timedelta(minutes=1)\r\n ).ffill()\r\n idx = date_range(\"1/1/2000\", periods=4, freq=\"5min\")\r\n expected = Series([s[0], s[5], s[10], s[-1]], index=idx + timedelta(minutes=1))\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_loffset_count():\r\n # GH 12725\r\n start_time = \"1/1/2000 00:00:00\"\r\n rng = date_range(start_time, periods=100, freq=\"S\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n result = ts.resample(\"10S\", loffset=\"1s\").count()\r\n\r\n expected_index = date_range(start_time, periods=10, freq=\"10S\") + timedelta(\r\n seconds=1\r\n )\r\n expected = Series(10, index=expected_index)\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n # Same issue should apply to .size() since it goes through\r\n # same code path\r\n result = ts.resample(\"10S\", loffset=\"1s\").size()\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_upsample():\r\n # from daily\r\n dti = date_range(\r\n start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq=\"D\", name=\"index\"\r\n )\r\n\r\n s = Series(np.random.rand(len(dti)), dti)\r\n\r\n # to minutely, by padding\r\n result = s.resample(\"Min\").pad()\r\n assert len(result) == 12961\r\n assert result[0] == s[0]\r\n assert result[-1] == s[-1]\r\n\r\n assert result.index.name == \"index\"\r\n\r\n\r\ndef test_resample_how_method():\r\n # GH9915\r\n s = Series(\r\n [11, 22],\r\n index=[\r\n Timestamp(\"2015-03-31 21:48:52.672000\"),\r\n Timestamp(\"2015-03-31 21:49:52.739000\"),\r\n ],\r\n )\r\n expected = Series(\r\n [11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],\r\n index=[\r\n Timestamp(\"2015-03-31 21:48:50\"),\r\n Timestamp(\"2015-03-31 21:49:00\"),\r\n Timestamp(\"2015-03-31 21:49:10\"),\r\n Timestamp(\"2015-03-31 21:49:20\"),\r\n Timestamp(\"2015-03-31 21:49:30\"),\r\n Timestamp(\"2015-03-31 21:49:40\"),\r\n Timestamp(\"2015-03-31 21:49:50\"),\r\n ],\r\n )\r\n tm.assert_series_equal(s.resample(\"10S\").mean(), expected)\r\n\r\n\r\ndef test_resample_extra_index_point():\r\n # GH#9756\r\n index = date_range(start=\"20150101\", end=\"20150331\", freq=\"BM\")\r\n expected = DataFrame({\"A\": Series([21, 41, 63], index=index)})\r\n\r\n index = date_range(start=\"20150101\", end=\"20150331\", freq=\"B\")\r\n df = DataFrame({\"A\": Series(range(len(index)), index=index)}, dtype=\"int64\")\r\n result = df.resample(\"BM\").last()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_upsample_with_limit():\r\n rng = date_range(\"1/1/2000\", periods=3, freq=\"5t\")\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n\r\n result = ts.resample(\"t\").ffill(limit=2)\r\n expected = ts.reindex(result.index, method=\"ffill\", limit=2)\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_nearest_upsample_with_limit():\r\n rng = date_range(\"1/1/2000\", periods=3, freq=\"5t\")\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n\r\n result = ts.resample(\"t\").nearest(limit=2)\r\n expected = ts.reindex(result.index, method=\"nearest\", limit=2)\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_ohlc(series):\r\n s = series\r\n\r\n grouper = Grouper(freq=Minute(5))\r\n expect = s.groupby(grouper).agg(lambda x: x[-1])\r\n result = s.resample(\"5Min\").ohlc()\r\n\r\n assert len(result) == len(expect)\r\n assert len(result.columns) == 4\r\n\r\n xs = result.iloc[-2]\r\n assert xs[\"open\"] == s[-6]\r\n assert xs[\"high\"] == s[-6:-1].max()\r\n assert xs[\"low\"] == s[-6:-1].min()\r\n assert xs[\"close\"] == s[-2]\r\n\r\n xs = result.iloc[0]\r\n assert xs[\"open\"] == s[0]\r\n assert xs[\"high\"] == s[:5].max()\r\n assert xs[\"low\"] == s[:5].min()\r\n assert xs[\"close\"] == s[4]\r\n\r\n\r\ndef test_resample_ohlc_result():\r\n\r\n # GH 12332\r\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"h\")\r\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\"))\r\n s = Series(range(len(index)), index=index)\r\n\r\n a = s.loc[:\"4-15-2000\"].resample(\"30T\").ohlc()\r\n assert isinstance(a, DataFrame)\r\n\r\n b = s.loc[:\"4-14-2000\"].resample(\"30T\").ohlc()\r\n assert isinstance(b, DataFrame)\r\n\r\n # GH12348\r\n # raising on odd period\r\n rng = date_range(\"2013-12-30\", \"2014-01-07\")\r\n index = rng.drop(\r\n [\r\n Timestamp(\"2014-01-01\"),\r\n Timestamp(\"2013-12-31\"),\r\n Timestamp(\"2014-01-04\"),\r\n Timestamp(\"2014-01-05\"),\r\n ]\r\n )\r\n df = DataFrame(data=np.arange(len(index)), index=index)\r\n result = df.resample(\"B\").mean()\r\n expected = df.reindex(index=date_range(rng[0], rng[-1], freq=\"B\"))\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_ohlc_dataframe():\r\n df = (\r\n DataFrame(\r\n {\r\n \"PRICE\": {\r\n Timestamp(\"2011-01-06 10:59:05\", tz=None): 24990,\r\n Timestamp(\"2011-01-06 12:43:33\", tz=None): 25499,\r\n Timestamp(\"2011-01-06 12:54:09\", tz=None): 25499,\r\n },\r\n \"VOLUME\": {\r\n Timestamp(\"2011-01-06 10:59:05\", tz=None): 1500000000,\r\n Timestamp(\"2011-01-06 12:43:33\", tz=None): 5000000000,\r\n Timestamp(\"2011-01-06 12:54:09\", tz=None): 100000000,\r\n },\r\n }\r\n )\r\n ).reindex([\"VOLUME\", \"PRICE\"], axis=1)\r\n res = df.resample(\"H\").ohlc()\r\n exp = pd.concat(\r\n [df[\"VOLUME\"].resample(\"H\").ohlc(), df[\"PRICE\"].resample(\"H\").ohlc()],\r\n axis=1,\r\n keys=[\"VOLUME\", \"PRICE\"],\r\n )\r\n tm.assert_frame_equal(exp, res)\r\n\r\n df.columns = [[\"a\", \"b\"], [\"c\", \"d\"]]\r\n res = df.resample(\"H\").ohlc()\r\n exp.columns = pd.MultiIndex.from_tuples(\r\n [\r\n (\"a\", \"c\", \"open\"),\r\n (\"a\", \"c\", \"high\"),\r\n (\"a\", \"c\", \"low\"),\r\n (\"a\", \"c\", \"close\"),\r\n (\"b\", \"d\", \"open\"),\r\n (\"b\", \"d\", \"high\"),\r\n (\"b\", \"d\", \"low\"),\r\n (\"b\", \"d\", \"close\"),\r\n ]\r\n )\r\n tm.assert_frame_equal(exp, res)\r\n\r\n # dupe columns fail atm\r\n # df.columns = ['PRICE', 'PRICE']\r\n\r\n\r\ndef test_resample_dup_index():\r\n\r\n # GH 4812\r\n # dup columns with resample raising\r\n df = DataFrame(\r\n np.random.randn(4, 12),\r\n index=[2000, 2000, 2000, 2000],\r\n columns=[Period(year=2000, month=i + 1, freq=\"M\") for i in range(12)],\r\n )\r\n df.iloc[3, :] = np.nan\r\n result = df.resample(\"Q\", axis=1).mean()\r\n expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()\r\n expected.columns = [Period(year=2000, quarter=i + 1, freq=\"Q\") for i in range(4)]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_reresample():\r\n dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq=\"D\")\r\n s = Series(np.random.rand(len(dti)), dti)\r\n bs = s.resample(\"B\", closed=\"right\", label=\"right\").mean()\r\n result = bs.resample(\"8H\").mean()\r\n assert len(result) == 22\r\n assert isinstance(result.index.freq, offsets.DateOffset)\r\n assert result.index.freq == offsets.Hour(8)\r\n\r\n\r\ndef test_resample_timestamp_to_period(simple_date_range_series):\r\n ts = simple_date_range_series(\"1/1/1990\", \"1/1/2000\")\r\n\r\n result = ts.resample(\"A-DEC\", kind=\"period\").mean()\r\n expected = ts.resample(\"A-DEC\").mean()\r\n expected.index = period_range(\"1990\", \"2000\", freq=\"a-dec\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ts.resample(\"A-JUN\", kind=\"period\").mean()\r\n expected = ts.resample(\"A-JUN\").mean()\r\n expected.index = period_range(\"1990\", \"2000\", freq=\"a-jun\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ts.resample(\"M\", kind=\"period\").mean()\r\n expected = ts.resample(\"M\").mean()\r\n expected.index = period_range(\"1990-01\", \"2000-01\", freq=\"M\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ts.resample(\"M\", kind=\"period\").mean()\r\n expected = ts.resample(\"M\").mean()\r\n expected.index = period_range(\"1990-01\", \"2000-01\", freq=\"M\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_ohlc_5min():\r\n def _ohlc(group):\r\n if isna(group).all():\r\n return np.repeat(np.nan, 4)\r\n return [group[0], group.max(), group.min(), group[-1]]\r\n\r\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 5:59:50\", freq=\"10s\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n resampled = ts.resample(\"5min\", closed=\"right\", label=\"right\").ohlc()\r\n\r\n assert (resampled.loc[\"1/1/2000 00:00\"] == ts[0]).all()\r\n\r\n exp = _ohlc(ts[1:31])\r\n assert (resampled.loc[\"1/1/2000 00:05\"] == exp).all()\r\n\r\n exp = _ohlc(ts[\"1/1/2000 5:55:01\":])\r\n assert (resampled.loc[\"1/1/2000 6:00:00\"] == exp).all()\r\n\r\n\r\ndef test_downsample_non_unique():\r\n rng = date_range(\"1/1/2000\", \"2/29/2000\")\r\n rng2 = rng.repeat(5).values\r\n ts = Series(np.random.randn(len(rng2)), index=rng2)\r\n\r\n result = ts.resample(\"M\").mean()\r\n\r\n expected = ts.groupby(lambda x: x.month).mean()\r\n assert len(result) == 2\r\n tm.assert_almost_equal(result[0], expected[1])\r\n tm.assert_almost_equal(result[1], expected[2])\r\n\r\n\r\ndef test_asfreq_non_unique():\r\n # GH #1077\r\n rng = date_range(\"1/1/2000\", \"2/29/2000\")\r\n rng2 = rng.repeat(2).values\r\n ts = Series(np.random.randn(len(rng2)), index=rng2)\r\n\r\n msg = \"cannot reindex from a duplicate axis\"\r\n with pytest.raises(ValueError, match=msg):\r\n ts.asfreq(\"B\")\r\n\r\n\r\ndef test_resample_axis1():\r\n rng = date_range(\"1/1/2000\", \"2/29/2000\")\r\n df = DataFrame(np.random.randn(3, len(rng)), columns=rng, index=[\"a\", \"b\", \"c\"])\r\n\r\n result = df.resample(\"M\", axis=1).mean()\r\n expected = df.T.resample(\"M\").mean().T\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_anchored_ticks():\r\n # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should\r\n # \"anchor\" the origin at midnight so we get regular intervals rather\r\n # than starting from the first timestamp which might start in the\r\n # middle of a desired interval\r\n\r\n rng = date_range(\"1/1/2000 04:00:00\", periods=86400, freq=\"s\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n ts[:2] = np.nan # so results are the same\r\n\r\n freqs = [\"t\", \"5t\", \"15t\", \"30t\", \"4h\", \"12h\"]\r\n for freq in freqs:\r\n result = ts[2:].resample(freq, closed=\"left\", label=\"left\").mean()\r\n expected = ts.resample(freq, closed=\"left\", label=\"left\").mean()\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_single_group():\r\n mysum = lambda x: x.sum()\r\n\r\n rng = date_range(\"2000-1-1\", \"2000-2-10\", freq=\"D\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n tm.assert_series_equal(ts.resample(\"M\").sum(), ts.resample(\"M\").apply(mysum))\r\n\r\n rng = date_range(\"2000-1-1\", \"2000-1-10\", freq=\"D\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n tm.assert_series_equal(ts.resample(\"M\").sum(), ts.resample(\"M\").apply(mysum))\r\n\r\n # GH 3849\r\n s = Series(\r\n [30.1, 31.6],\r\n index=[Timestamp(\"20070915 15:30:00\"), Timestamp(\"20070915 15:40:00\")],\r\n )\r\n expected = Series([0.75], index=[Timestamp(\"20070915\")])\r\n result = s.resample(\"D\").apply(lambda x: np.std(x))\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_base():\r\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 02:00\", freq=\"s\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n resampled = ts.resample(\"5min\", base=2).mean()\r\n exp_rng = date_range(\"12/31/1999 23:57:00\", \"1/1/2000 01:57\", freq=\"5min\")\r\n tm.assert_index_equal(resampled.index, exp_rng)\r\n\r\n\r\ndef test_resample_float_base():\r\n # GH25161\r\n dt = pd.to_datetime(\r\n [\"2018-11-26 16:17:43.51\", \"2018-11-26 16:17:44.51\", \"2018-11-26 16:17:45.51\"]\r\n )\r\n s = Series(np.arange(3), index=dt)\r\n\r\n base = 17 + 43.51 / 60\r\n result = s.resample(\"3min\", base=base).size()\r\n expected = Series(3, index=pd.DatetimeIndex([\"2018-11-26 16:17:43.51\"]))\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_daily_anchored():\r\n rng = date_range(\"1/1/2000 0:00:00\", periods=10000, freq=\"T\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n ts[:2] = np.nan # so results are the same\r\n\r\n result = ts[2:].resample(\"D\", closed=\"left\", label=\"left\").mean()\r\n expected = ts.resample(\"D\", closed=\"left\", label=\"left\").mean()\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_to_period_monthly_buglet():\r\n # GH #1259\r\n\r\n rng = date_range(\"1/1/2000\", \"12/31/2000\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n result = ts.resample(\"M\", kind=\"period\").mean()\r\n exp_index = period_range(\"Jan-2000\", \"Dec-2000\", freq=\"M\")\r\n tm.assert_index_equal(result.index, exp_index)\r\n\r\n\r\ndef test_period_with_agg():\r\n\r\n # aggregate a period resampler with a lambda\r\n s2 = Series(\r\n np.random.randint(0, 5, 50),\r\n index=pd.period_range(\"2012-01-01\", freq=\"H\", periods=50),\r\n dtype=\"float64\",\r\n )\r\n\r\n expected = s2.to_timestamp().resample(\"D\").mean().to_period()\r\n result = s2.resample(\"D\").agg(lambda x: x.mean())\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_segfault():\r\n # GH 8573\r\n # segfaulting in older versions\r\n all_wins_and_wagers = [\r\n (1, datetime(2013, 10, 1, 16, 20), 1, 0),\r\n (2, datetime(2013, 10, 1, 16, 10), 1, 0),\r\n (2, datetime(2013, 10, 1, 18, 15), 1, 0),\r\n (2, datetime(2013, 10, 1, 16, 10, 31), 1, 0),\r\n ]\r\n\r\n df = DataFrame.from_records(\r\n all_wins_and_wagers, columns=(\"ID\", \"timestamp\", \"A\", \"B\")\r\n ).set_index(\"timestamp\")\r\n result = df.groupby(\"ID\").resample(\"5min\").sum()\r\n expected = df.groupby(\"ID\").apply(lambda x: x.resample(\"5min\").sum())\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_dtype_preservation():\r\n\r\n # GH 12202\r\n # validation tests for dtype preservation\r\n\r\n df = DataFrame(\r\n {\r\n \"date\": pd.date_range(start=\"2016-01-01\", periods=4, freq=\"W\"),\r\n \"group\": [1, 1, 2, 2],\r\n \"val\": Series([5, 6, 7, 8], dtype=\"int32\"),\r\n }\r\n ).set_index(\"date\")\r\n\r\n result = df.resample(\"1D\").ffill()\r\n assert result.val.dtype == np.int32\r\n\r\n result = df.groupby(\"group\").resample(\"1D\").ffill()\r\n assert result.val.dtype == np.int32\r\n\r\n\r\ndef test_resample_dtype_coercion():\r\n\r\n pytest.importorskip(\"scipy.interpolate\")\r\n\r\n # GH 16361\r\n df = {\"a\": [1, 3, 1, 4]}\r\n df = DataFrame(df, index=pd.date_range(\"2017-01-01\", \"2017-01-04\"))\r\n\r\n expected = df.astype(\"float64\").resample(\"H\").mean()[\"a\"].interpolate(\"cubic\")\r\n\r\n result = df.resample(\"H\")[\"a\"].mean().interpolate(\"cubic\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.resample(\"H\").mean()[\"a\"].interpolate(\"cubic\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_weekly_resample_buglet():\r\n # #1327\r\n rng = date_range(\"1/1/2000\", freq=\"B\", periods=20)\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n resampled = ts.resample(\"W\").mean()\r\n expected = ts.resample(\"W-SUN\").mean()\r\n tm.assert_series_equal(resampled, expected)\r\n\r\n\r\ndef test_monthly_resample_error():\r\n # #1451\r\n dates = date_range(\"4/16/2012 20:00\", periods=5000, freq=\"h\")\r\n ts = Series(np.random.randn(len(dates)), index=dates)\r\n # it works!\r\n ts.resample(\"M\")\r\n\r\n\r\ndef test_nanosecond_resample_error():\r\n # GH 12307 - Values falls after last bin when\r\n # Resampling using pd.tseries.offsets.Nano as period\r\n start = 1443707890427\r\n exp_start = 1443707890400\r\n indx = pd.date_range(start=pd.to_datetime(start), periods=10, freq=\"100n\")\r\n ts = Series(range(len(indx)), index=indx)\r\n r = ts.resample(pd.tseries.offsets.Nano(100))\r\n result = r.agg(\"mean\")\r\n\r\n exp_indx = pd.date_range(start=pd.to_datetime(exp_start), periods=10, freq=\"100n\")\r\n exp = Series(range(len(exp_indx)), index=exp_indx)\r\n\r\n tm.assert_series_equal(result, exp)\r\n\r\n\r\ndef test_resample_anchored_intraday(simple_date_range_series):\r\n # #1471, #1458\r\n\r\n rng = date_range(\"1/1/2012\", \"4/1/2012\", freq=\"100min\")\r\n df = DataFrame(rng.month, index=rng)\r\n\r\n result = df.resample(\"M\").mean()\r\n expected = df.resample(\"M\", kind=\"period\").mean().to_timestamp(how=\"end\")\r\n expected.index += Timedelta(1, \"ns\") - Timedelta(1, \"D\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"M\", closed=\"left\").mean()\r\n exp = df.tshift(1, freq=\"D\").resample(\"M\", kind=\"period\").mean()\r\n exp = exp.to_timestamp(how=\"end\")\r\n\r\n exp.index = exp.index + Timedelta(1, \"ns\") - Timedelta(1, \"D\")\r\n tm.assert_frame_equal(result, exp)\r\n\r\n rng = date_range(\"1/1/2012\", \"4/1/2012\", freq=\"100min\")\r\n df = DataFrame(rng.month, index=rng)\r\n\r\n result = df.resample(\"Q\").mean()\r\n expected = df.resample(\"Q\", kind=\"period\").mean().to_timestamp(how=\"end\")\r\n expected.index += Timedelta(1, \"ns\") - Timedelta(1, \"D\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.resample(\"Q\", closed=\"left\").mean()\r\n expected = df.tshift(1, freq=\"D\").resample(\"Q\", kind=\"period\", closed=\"left\").mean()\r\n expected = expected.to_timestamp(how=\"end\")\r\n expected.index += Timedelta(1, \"ns\") - Timedelta(1, \"D\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n ts = simple_date_range_series(\"2012-04-29 23:00\", \"2012-04-30 5:00\", freq=\"h\")\r\n resampled = ts.resample(\"M\").mean()\r\n assert len(resampled) == 1\r\n\r\n\r\ndef test_resample_anchored_monthstart(simple_date_range_series):\r\n ts = simple_date_range_series(\"1/1/2000\", \"12/31/2002\")\r\n\r\n freqs = [\"MS\", \"BMS\", \"QS-MAR\", \"AS-DEC\", \"AS-JUN\"]\r\n\r\n for freq in freqs:\r\n ts.resample(freq).mean()\r\n\r\n\r\ndef test_resample_anchored_multiday():\r\n # When resampling a range spanning multiple days, ensure that the\r\n # start date gets used to determine the offset. Fixes issue where\r\n # a one day period is not a multiple of the frequency.\r\n #\r\n # See: https://github.com/pandas-dev/pandas/issues/8683\r\n\r\n index = pd.date_range(\r\n \"2014-10-14 23:06:23.206\", periods=3, freq=\"400L\"\r\n ) | pd.date_range(\"2014-10-15 23:00:00\", periods=2, freq=\"2200L\")\r\n\r\n s = Series(np.random.randn(5), index=index)\r\n\r\n # Ensure left closing works\r\n result = s.resample(\"2200L\").mean()\r\n assert result.index[-1] == Timestamp(\"2014-10-15 23:00:02.000\")\r\n\r\n # Ensure right closing works\r\n result = s.resample(\"2200L\", label=\"right\").mean()\r\n assert result.index[-1] == Timestamp(\"2014-10-15 23:00:04.200\")\r\n\r\n\r\ndef test_corner_cases(simple_period_range_series, simple_date_range_series):\r\n # miscellaneous test coverage\r\n\r\n rng = date_range(\"1/1/2000\", periods=12, freq=\"t\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n result = ts.resample(\"5t\", closed=\"right\", label=\"left\").mean()\r\n ex_index = date_range(\"1999-12-31 23:55\", periods=4, freq=\"5t\")\r\n tm.assert_index_equal(result.index, ex_index)\r\n\r\n len0pts = simple_period_range_series(\"2007-01\", \"2010-05\", freq=\"M\")[:0]\r\n # it works\r\n result = len0pts.resample(\"A-DEC\").mean()\r\n assert len(result) == 0\r\n\r\n # resample to periods\r\n ts = simple_date_range_series(\"2000-04-28\", \"2000-04-30 11:00\", freq=\"h\")\r\n result = ts.resample(\"M\", kind=\"period\").mean()\r\n assert len(result) == 1\r\n assert result.index[0] == Period(\"2000-04\", freq=\"M\")\r\n\r\n\r\ndef test_anchored_lowercase_buglet():\r\n dates = date_range(\"4/16/2012 20:00\", periods=50000, freq=\"s\")\r\n ts = Series(np.random.randn(len(dates)), index=dates)\r\n # it works!\r\n ts.resample(\"d\").mean()\r\n\r\n\r\ndef test_upsample_apply_functions():\r\n # #1596\r\n rng = pd.date_range(\"2012-06-12\", periods=4, freq=\"h\")\r\n\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n result = ts.resample(\"20min\").aggregate([\"mean\", \"sum\"])\r\n assert isinstance(result, DataFrame)\r\n\r\n\r\ndef test_resample_not_monotonic():\r\n rng = pd.date_range(\"2012-06-12\", periods=200, freq=\"h\")\r\n ts = Series(np.random.randn(len(rng)), index=rng)\r\n\r\n ts = ts.take(np.random.permutation(len(ts)))\r\n\r\n result = ts.resample(\"D\").sum()\r\n exp = ts.sort_index().resample(\"D\").sum()\r\n tm.assert_series_equal(result, exp)\r\n\r\n\r\ndef test_resample_median_bug_1688():\r\n\r\n for dtype in [\"int64\", \"int32\", \"float64\", \"float32\"]:\r\n df = DataFrame(\r\n [1, 2],\r\n index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)],\r\n dtype=dtype,\r\n )\r\n\r\n result = df.resample(\"T\").apply(lambda x: x.mean())\r\n exp = df.asfreq(\"T\")\r\n tm.assert_frame_equal(result, exp)\r\n\r\n result = df.resample(\"T\").median()\r\n exp = df.asfreq(\"T\")\r\n tm.assert_frame_equal(result, exp)\r\n\r\n\r\ndef test_how_lambda_functions(simple_date_range_series):\r\n\r\n ts = simple_date_range_series(\"1/1/2000\", \"4/1/2000\")\r\n\r\n result = ts.resample(\"M\").apply(lambda x: x.mean())\r\n exp = ts.resample(\"M\").mean()\r\n tm.assert_series_equal(result, exp)\r\n\r\n foo_exp = ts.resample(\"M\").mean()\r\n foo_exp.name = \"foo\"\r\n bar_exp = ts.resample(\"M\").std()\r\n bar_exp.name = \"bar\"\r\n\r\n result = ts.resample(\"M\").apply([lambda x: x.mean(), lambda x: x.std(ddof=1)])\r\n result.columns = [\"foo\", \"bar\"]\r\n tm.assert_series_equal(result[\"foo\"], foo_exp)\r\n tm.assert_series_equal(result[\"bar\"], bar_exp)\r\n\r\n # this is a MI Series, so comparing the names of the results\r\n # doesn't make sense\r\n result = ts.resample(\"M\").aggregate(\r\n {\"foo\": lambda x: x.mean(), \"bar\": lambda x: x.std(ddof=1)}\r\n )\r\n tm.assert_series_equal(result[\"foo\"], foo_exp, check_names=False)\r\n tm.assert_series_equal(result[\"bar\"], bar_exp, check_names=False)\r\n\r\n\r\ndef test_resample_unequal_times():\r\n # #1772\r\n start = datetime(1999, 3, 1, 5)\r\n # end hour is less than start\r\n end = datetime(2012, 7, 31, 4)\r\n bad_ind = date_range(start, end, freq=\"30min\")\r\n df = DataFrame({\"close\": 1}, index=bad_ind)\r\n\r\n # it works!\r\n df.resample(\"AS\").sum()\r\n\r\n\r\ndef test_resample_consistency():\r\n\r\n # GH 6418\r\n # resample with bfill / limit / reindex consistency\r\n\r\n i30 = pd.date_range(\"2002-02-02\", periods=4, freq=\"30T\")\r\n s = Series(np.arange(4.0), index=i30)\r\n s[2] = np.NaN\r\n\r\n # Upsample by factor 3 with reindex() and resample() methods:\r\n i10 = pd.date_range(i30[0], i30[-1], freq=\"10T\")\r\n\r\n s10 = s.reindex(index=i10, method=\"bfill\")\r\n s10_2 = s.reindex(index=i10, method=\"bfill\", limit=2)\r\n rl = s.reindex_like(s10, method=\"bfill\", limit=2)\r\n r10_2 = s.resample(\"10Min\").bfill(limit=2)\r\n r10 = s.resample(\"10Min\").bfill()\r\n\r\n # s10_2, r10, r10_2, rl should all be equal\r\n tm.assert_series_equal(s10_2, r10)\r\n tm.assert_series_equal(s10_2, r10_2)\r\n tm.assert_series_equal(s10_2, rl)\r\n\r\n\r\ndef test_resample_timegrouper():\r\n # GH 7227\r\n dates1 = [\r\n datetime(2014, 10, 1),\r\n datetime(2014, 9, 3),\r\n datetime(2014, 11, 5),\r\n datetime(2014, 9, 5),\r\n datetime(2014, 10, 8),\r\n datetime(2014, 7, 15),\r\n ]\r\n\r\n dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]\r\n dates3 = [pd.NaT] + dates1 + [pd.NaT]\r\n\r\n for dates in [dates1, dates2, dates3]:\r\n df = DataFrame(dict(A=dates, B=np.arange(len(dates))))\r\n result = df.set_index(\"A\").resample(\"M\").count()\r\n exp_idx = pd.DatetimeIndex(\r\n [\"2014-07-31\", \"2014-08-31\", \"2014-09-30\", \"2014-10-31\", \"2014-11-30\"],\r\n freq=\"M\",\r\n name=\"A\",\r\n )\r\n expected = DataFrame({\"B\": [1, 0, 2, 2, 1]}, index=exp_idx)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.groupby(pd.Grouper(freq=\"M\", key=\"A\")).count()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))\r\n result = df.set_index(\"A\").resample(\"M\").count()\r\n expected = DataFrame(\r\n {\"B\": [1, 0, 2, 2, 1], \"C\": [1, 0, 2, 2, 1]},\r\n index=exp_idx,\r\n columns=[\"B\", \"C\"],\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.groupby(pd.Grouper(freq=\"M\", key=\"A\")).count()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_nunique():\r\n\r\n # GH 12352\r\n df = DataFrame(\r\n {\r\n \"ID\": {\r\n Timestamp(\"2015-06-05 00:00:00\"): \"0010100903\",\r\n Timestamp(\"2015-06-08 00:00:00\"): \"0010150847\",\r\n },\r\n \"DATE\": {\r\n Timestamp(\"2015-06-05 00:00:00\"): \"2015-06-05\",\r\n Timestamp(\"2015-06-08 00:00:00\"): \"2015-06-08\",\r\n },\r\n }\r\n )\r\n r = df.resample(\"D\")\r\n g = df.groupby(pd.Grouper(freq=\"D\"))\r\n expected = df.groupby(pd.Grouper(freq=\"D\")).ID.apply(lambda x: x.nunique())\r\n assert expected.name == \"ID\"\r\n\r\n for t in [r, g]:\r\n result = r.ID.nunique()\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.ID.resample(\"D\").nunique()\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.ID.groupby(pd.Grouper(freq=\"D\")).nunique()\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_nunique_preserves_column_level_names():\r\n # see gh-23222\r\n df = tm.makeTimeDataFrame(freq=\"1D\").abs()\r\n df.columns = pd.MultiIndex.from_arrays(\r\n [df.columns.tolist()] * 2, names=[\"lev0\", \"lev1\"]\r\n )\r\n result = df.resample(\"1h\").nunique()\r\n tm.assert_index_equal(df.columns, result.columns)\r\n\r\n\r\ndef test_resample_nunique_with_date_gap():\r\n # GH 13453\r\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"h\")\r\n index2 = pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\")\r\n index3 = index.append(index2)\r\n s = Series(range(len(index3)), index=index3, dtype=\"int64\")\r\n r = s.resample(\"M\")\r\n\r\n # Since all elements are unique, these should all be the same\r\n results = [r.count(), r.nunique(), r.agg(Series.nunique), r.agg(\"nunique\")]\r\n\r\n tm.assert_series_equal(results[0], results[1])\r\n tm.assert_series_equal(results[0], results[2])\r\n tm.assert_series_equal(results[0], results[3])\r\n\r\n\r\[email protected](\"n\", [10000, 100000])\r\[email protected](\"k\", [10, 100, 1000])\r\ndef test_resample_group_info(n, k):\r\n # GH10914\r\n\r\n # use a fixed seed to always have the same uniques\r\n prng = np.random.RandomState(1234)\r\n\r\n dr = date_range(start=\"2015-08-27\", periods=n // 10, freq=\"T\")\r\n ts = Series(prng.randint(0, n // k, n).astype(\"int64\"), index=prng.choice(dr, n))\r\n\r\n left = ts.resample(\"30T\").nunique()\r\n ix = date_range(start=ts.index.min(), end=ts.index.max(), freq=\"30T\")\r\n\r\n vals = ts.values\r\n bins = np.searchsorted(ix.values, ts.index, side=\"right\")\r\n\r\n sorter = np.lexsort((vals, bins))\r\n vals, bins = vals[sorter], bins[sorter]\r\n\r\n mask = np.r_[True, vals[1:] != vals[:-1]]\r\n mask |= np.r_[True, bins[1:] != bins[:-1]]\r\n\r\n arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype(\"int64\", copy=False)\r\n right = Series(arr, index=ix)\r\n\r\n tm.assert_series_equal(left, right)\r\n\r\n\r\ndef test_resample_size():\r\n n = 10000\r\n dr = date_range(\"2015-09-19\", periods=n, freq=\"T\")\r\n ts = Series(np.random.randn(n), index=np.random.choice(dr, n))\r\n\r\n left = ts.resample(\"7T\").size()\r\n ix = date_range(start=left.index.min(), end=ts.index.max(), freq=\"7T\")\r\n\r\n bins = np.searchsorted(ix.values, ts.index.values, side=\"right\")\r\n val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype(\"int64\", copy=False)\r\n\r\n right = Series(val, index=ix)\r\n tm.assert_series_equal(left, right)\r\n\r\n\r\ndef test_resample_across_dst():\r\n # The test resamples a DatetimeIndex with values before and after a\r\n # DST change\r\n # Issue: 14682\r\n\r\n # The DatetimeIndex we will start with\r\n # (note that DST happens at 03:00+02:00 -> 02:00+01:00)\r\n # 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00\r\n df1 = DataFrame([1477786980, 1477790580], columns=[\"ts\"])\r\n dti1 = DatetimeIndex(\r\n pd.to_datetime(df1.ts, unit=\"s\")\r\n .dt.tz_localize(\"UTC\")\r\n .dt.tz_convert(\"Europe/Madrid\")\r\n )\r\n\r\n # The expected DatetimeIndex after resampling.\r\n # 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00\r\n df2 = DataFrame([1477785600, 1477789200], columns=[\"ts\"])\r\n dti2 = DatetimeIndex(\r\n pd.to_datetime(df2.ts, unit=\"s\")\r\n .dt.tz_localize(\"UTC\")\r\n .dt.tz_convert(\"Europe/Madrid\")\r\n )\r\n df = DataFrame([5, 5], index=dti1)\r\n\r\n result = df.resample(rule=\"H\").sum()\r\n expected = DataFrame([5, 5], index=dti2)\r\n\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_groupby_with_dst_time_change():\r\n # GH 24972\r\n index = pd.DatetimeIndex(\r\n [1478064900001000000, 1480037118776792000], tz=\"UTC\"\r\n ).tz_convert(\"America/Chicago\")\r\n\r\n df = pd.DataFrame([1, 2], index=index)\r\n result = df.groupby(pd.Grouper(freq=\"1d\")).last()\r\n expected_index_values = pd.date_range(\r\n \"2016-11-02\", \"2016-11-24\", freq=\"d\", tz=\"America/Chicago\"\r\n )\r\n\r\n index = pd.DatetimeIndex(expected_index_values)\r\n expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_resample_dst_anchor():\r\n # 5172\r\n dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz=\"US/Eastern\")\r\n df = DataFrame([5], index=dti)\r\n tm.assert_frame_equal(\r\n df.resample(rule=\"D\").sum(), DataFrame([5], index=df.index.normalize())\r\n )\r\n df.resample(rule=\"MS\").sum()\r\n tm.assert_frame_equal(\r\n df.resample(rule=\"MS\").sum(),\r\n DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], tz=\"US/Eastern\")),\r\n )\r\n\r\n dti = date_range(\"2013-09-30\", \"2013-11-02\", freq=\"30Min\", tz=\"Europe/Paris\")\r\n values = range(dti.size)\r\n df = DataFrame({\"a\": values, \"b\": values, \"c\": values}, index=dti, dtype=\"int64\")\r\n how = {\"a\": \"min\", \"b\": \"max\", \"c\": \"count\"}\r\n\r\n tm.assert_frame_equal(\r\n df.resample(\"W-MON\").agg(how)[[\"a\", \"b\", \"c\"]],\r\n DataFrame(\r\n {\r\n \"a\": [0, 48, 384, 720, 1056, 1394],\r\n \"b\": [47, 383, 719, 1055, 1393, 1586],\r\n \"c\": [48, 336, 336, 336, 338, 193],\r\n },\r\n index=date_range(\"9/30/2013\", \"11/4/2013\", freq=\"W-MON\", tz=\"Europe/Paris\"),\r\n ),\r\n \"W-MON Frequency\",\r\n )\r\n\r\n tm.assert_frame_equal(\r\n df.resample(\"2W-MON\").agg(how)[[\"a\", \"b\", \"c\"]],\r\n DataFrame(\r\n {\r\n \"a\": [0, 48, 720, 1394],\r\n \"b\": [47, 719, 1393, 1586],\r\n \"c\": [48, 672, 674, 193],\r\n },\r\n index=date_range(\r\n \"9/30/2013\", \"11/11/2013\", freq=\"2W-MON\", tz=\"Europe/Paris\"\r\n ),\r\n ),\r\n \"2W-MON Frequency\",\r\n )\r\n\r\n tm.assert_frame_equal(\r\n df.resample(\"MS\").agg(how)[[\"a\", \"b\", \"c\"]],\r\n DataFrame(\r\n {\"a\": [0, 48, 1538], \"b\": [47, 1537, 1586], \"c\": [48, 1490, 49]},\r\n index=date_range(\"9/1/2013\", \"11/1/2013\", freq=\"MS\", tz=\"Europe/Paris\"),\r\n ),\r\n \"MS Frequency\",\r\n )\r\n\r\n tm.assert_frame_equal(\r\n df.resample(\"2MS\").agg(how)[[\"a\", \"b\", \"c\"]],\r\n DataFrame(\r\n {\"a\": [0, 1538], \"b\": [1537, 1586], \"c\": [1538, 49]},\r\n index=date_range(\"9/1/2013\", \"11/1/2013\", freq=\"2MS\", tz=\"Europe/Paris\"),\r\n ),\r\n \"2MS Frequency\",\r\n )\r\n\r\n df_daily = df[\"10/26/2013\":\"10/29/2013\"]\r\n tm.assert_frame_equal(\r\n df_daily.resample(\"D\").agg({\"a\": \"min\", \"b\": \"max\", \"c\": \"count\"})[\r\n [\"a\", \"b\", \"c\"]\r\n ],\r\n DataFrame(\r\n {\r\n \"a\": [1248, 1296, 1346, 1394],\r\n \"b\": [1295, 1345, 1393, 1441],\r\n \"c\": [48, 50, 48, 48],\r\n },\r\n index=date_range(\"10/26/2013\", \"10/29/2013\", freq=\"D\", tz=\"Europe/Paris\"),\r\n ),\r\n \"D Frequency\",\r\n )\r\n\r\n\r\ndef test_downsample_across_dst():\r\n # GH 8531\r\n tz = pytz.timezone(\"Europe/Berlin\")\r\n dt = datetime(2014, 10, 26)\r\n dates = date_range(tz.localize(dt), periods=4, freq=\"2H\")\r\n result = Series(5, index=dates).resample(\"H\").mean()\r\n expected = Series(\r\n [5.0, np.nan] * 3 + [5.0],\r\n index=date_range(tz.localize(dt), periods=7, freq=\"H\"),\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_downsample_across_dst_weekly():\r\n # GH 9119, GH 21459\r\n df = DataFrame(\r\n index=DatetimeIndex(\r\n [\"2017-03-25\", \"2017-03-26\", \"2017-03-27\", \"2017-03-28\", \"2017-03-29\"],\r\n tz=\"Europe/Amsterdam\",\r\n ),\r\n data=[11, 12, 13, 14, 15],\r\n )\r\n result = df.resample(\"1W\").sum()\r\n expected = DataFrame(\r\n [23, 42],\r\n index=pd.DatetimeIndex([\"2017-03-26\", \"2017-04-02\"], tz=\"Europe/Amsterdam\"),\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n idx = pd.date_range(\"2013-04-01\", \"2013-05-01\", tz=\"Europe/London\", freq=\"H\")\r\n s = Series(index=idx, dtype=np.float64)\r\n result = s.resample(\"W\").mean()\r\n expected = Series(\r\n index=pd.date_range(\"2013-04-07\", freq=\"W\", periods=5, tz=\"Europe/London\"),\r\n dtype=np.float64,\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_resample_with_nat():\r\n # GH 13020\r\n index = DatetimeIndex(\r\n [\r\n pd.NaT,\r\n \"1970-01-01 00:00:00\",\r\n pd.NaT,\r\n \"1970-01-01 00:00:01\",\r\n \"1970-01-01 00:00:02\",\r\n ]\r\n )\r\n frame = DataFrame([2, 3, 5, 7, 11], index=index)\r\n\r\n index_1s = DatetimeIndex(\r\n [\"1970-01-01 00:00:00\", \"1970-01-01 00:00:01\", \"1970-01-01 00:00:02\"]\r\n )\r\n frame_1s = DataFrame([3, 7, 11], index=index_1s)\r\n tm.assert_frame_equal(frame.resample(\"1s\").mean(), frame_1s)\r\n\r\n index_2s = DatetimeIndex([\"1970-01-01 00:00:00\", \"1970-01-01 00:00:02\"])\r\n frame_2s = DataFrame([5, 11], index=index_2s)\r\n tm.assert_frame_equal(frame.resample(\"2s\").mean(), frame_2s)\r\n\r\n index_3s = DatetimeIndex([\"1970-01-01 00:00:00\"])\r\n frame_3s = DataFrame([7], index=index_3s)\r\n tm.assert_frame_equal(frame.resample(\"3s\").mean(), frame_3s)\r\n\r\n tm.assert_frame_equal(frame.resample(\"60s\").mean(), frame_3s)\r\n\r\n\r\ndef test_resample_datetime_values():\r\n # GH 13119\r\n # check that datetime dtype is preserved when NaT values are\r\n # introduced by the resampling\r\n\r\n dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]\r\n df = DataFrame({\"timestamp\": dates}, index=dates)\r\n\r\n exp = Series(\r\n [datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],\r\n index=date_range(\"2016-01-15\", periods=3, freq=\"2D\"),\r\n name=\"timestamp\",\r\n )\r\n\r\n res = df.resample(\"2D\").first()[\"timestamp\"]\r\n tm.assert_series_equal(res, exp)\r\n res = df[\"timestamp\"].resample(\"2D\").first()\r\n tm.assert_series_equal(res, exp)\r\n\r\n\r\ndef test_resample_apply_with_additional_args(series):\r\n # GH 14615\r\n def f(data, add_arg):\r\n return np.mean(data) * add_arg\r\n\r\n multiplier = 10\r\n result = series.resample(\"D\").apply(f, multiplier)\r\n expected = series.resample(\"D\").mean().multiply(multiplier)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # Testing as kwarg\r\n result = series.resample(\"D\").apply(f, add_arg=multiplier)\r\n expected = series.resample(\"D\").mean().multiply(multiplier)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # Testing dataframe\r\n df = pd.DataFrame({\"A\": 1, \"B\": 2}, index=pd.date_range(\"2017\", periods=10))\r\n result = df.groupby(\"A\").resample(\"D\").agg(f, multiplier)\r\n expected = df.groupby(\"A\").resample(\"D\").mean().multiply(multiplier)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\"k\", [1, 2, 3])\r\[email protected](\r\n \"n1, freq1, n2, freq2\",\r\n [\r\n (30, \"S\", 0.5, \"Min\"),\r\n (60, \"S\", 1, \"Min\"),\r\n (3600, \"S\", 1, \"H\"),\r\n (60, \"Min\", 1, \"H\"),\r\n (21600, \"S\", 0.25, \"D\"),\r\n (86400, \"S\", 1, \"D\"),\r\n (43200, \"S\", 0.5, \"D\"),\r\n (1440, \"Min\", 1, \"D\"),\r\n (12, \"H\", 0.5, \"D\"),\r\n (24, \"H\", 1, \"D\"),\r\n ],\r\n)\r\ndef test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):\r\n # GH 24127\r\n n1_ = n1 * k\r\n n2_ = n2 * k\r\n s = pd.Series(\r\n 0, index=pd.date_range(\"19910905 13:00\", \"19911005 07:00\", freq=freq1)\r\n )\r\n s = s + range(len(s))\r\n\r\n result1 = s.resample(str(n1_) + freq1).mean()\r\n result2 = s.resample(str(n2_) + freq2).mean()\r\n tm.assert_series_equal(result1, result2)\r\n\r\n\r\[email protected](\r\n \"first,last,offset,exp_first,exp_last\",\r\n [\r\n (\"19910905\", \"19920406\", \"D\", \"19910905\", \"19920407\"),\r\n (\"19910905 00:00\", \"19920406 06:00\", \"D\", \"19910905\", \"19920407\"),\r\n (\"19910905 06:00\", \"19920406 06:00\", \"H\", \"19910905 06:00\", \"19920406 07:00\"),\r\n (\"19910906\", \"19920406\", \"M\", \"19910831\", \"19920430\"),\r\n (\"19910831\", \"19920430\", \"M\", \"19910831\", \"19920531\"),\r\n (\"1991-08\", \"1992-04\", \"M\", \"19910831\", \"19920531\"),\r\n ],\r\n)\r\ndef test_get_timestamp_range_edges(first, last, offset, exp_first, exp_last):\r\n first = pd.Period(first)\r\n first = first.to_timestamp(first.freq)\r\n last = pd.Period(last)\r\n last = last.to_timestamp(last.freq)\r\n\r\n exp_first = pd.Timestamp(exp_first, freq=offset)\r\n exp_last = pd.Timestamp(exp_last, freq=offset)\r\n\r\n offset = pd.tseries.frequencies.to_offset(offset)\r\n result = _get_timestamp_range_edges(first, last, offset)\r\n expected = (exp_first, exp_last)\r\n assert result == expected\r\n\r\n\r\ndef test_resample_apply_product():\r\n # GH 5586\r\n index = date_range(start=\"2012-01-31\", freq=\"M\", periods=12)\r\n\r\n ts = Series(range(12), index=index)\r\n df = DataFrame(dict(A=ts, B=ts + 2))\r\n result = df.resample(\"Q\").apply(np.product)\r\n expected = DataFrame(\r\n np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64),\r\n index=DatetimeIndex(\r\n [\"2012-03-31\", \"2012-06-30\", \"2012-09-30\", \"2012-12-31\"], freq=\"Q-DEC\"\r\n ),\r\n columns=[\"A\", \"B\"],\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n",
"\"\"\"Utilities for the neural network modules\r\n\"\"\"\r\n\r\n# Author: Issam H. Laradji <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport numpy as np\r\n\r\nfrom scipy.special import expit as logistic_sigmoid\r\nfrom scipy.special import xlogy\r\n\r\n\r\ndef identity(X):\r\n \"\"\"Simply return the input array.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Data, where n_samples is the number of samples\r\n and n_features is the number of features.\r\n\r\n Returns\r\n -------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Same as the input data.\r\n \"\"\"\r\n return X\r\n\r\n\r\ndef logistic(X):\r\n \"\"\"Compute the logistic function inplace.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The input data.\r\n\r\n Returns\r\n -------\r\n X_new : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The transformed data.\r\n \"\"\"\r\n return logistic_sigmoid(X, out=X)\r\n\r\n\r\ndef tanh(X):\r\n \"\"\"Compute the hyperbolic tan function inplace.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The input data.\r\n\r\n Returns\r\n -------\r\n X_new : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The transformed data.\r\n \"\"\"\r\n return np.tanh(X, out=X)\r\n\r\n\r\ndef relu(X):\r\n \"\"\"Compute the rectified linear unit function inplace.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The input data.\r\n\r\n Returns\r\n -------\r\n X_new : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The transformed data.\r\n \"\"\"\r\n np.clip(X, 0, np.finfo(X.dtype).max, out=X)\r\n return X\r\n\r\n\r\ndef softmax(X):\r\n \"\"\"Compute the K-way softmax function inplace.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The input data.\r\n\r\n Returns\r\n -------\r\n X_new : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The transformed data.\r\n \"\"\"\r\n tmp = X - X.max(axis=1)[:, np.newaxis]\r\n np.exp(tmp, out=X)\r\n X /= X.sum(axis=1)[:, np.newaxis]\r\n\r\n return X\r\n\r\n\r\nACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic,\r\n 'relu': relu, 'softmax': softmax}\r\n\r\n\r\ndef inplace_identity_derivative(Z, delta):\r\n \"\"\"Apply the derivative of the identity function: do nothing.\r\n\r\n Parameters\r\n ----------\r\n Z : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The data which was output from the identity activation function during\r\n the forward pass.\r\n\r\n delta : {array-like}, shape (n_samples, n_features)\r\n The backpropagated error signal to be modified inplace.\r\n \"\"\"\r\n # Nothing to do\r\n\r\n\r\ndef inplace_logistic_derivative(Z, delta):\r\n \"\"\"Apply the derivative of the logistic sigmoid function.\r\n\r\n It exploits the fact that the derivative is a simple function of the output\r\n value from logistic function.\r\n\r\n Parameters\r\n ----------\r\n Z : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The data which was output from the logistic activation function during\r\n the forward pass.\r\n\r\n delta : {array-like}, shape (n_samples, n_features)\r\n The backpropagated error signal to be modified inplace.\r\n \"\"\"\r\n delta *= Z\r\n delta *= (1 - Z)\r\n\r\n\r\ndef inplace_tanh_derivative(Z, delta):\r\n \"\"\"Apply the derivative of the hyperbolic tanh function.\r\n\r\n It exploits the fact that the derivative is a simple function of the output\r\n value from hyperbolic tangent.\r\n\r\n Parameters\r\n ----------\r\n Z : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The data which was output from the hyperbolic tangent activation\r\n function during the forward pass.\r\n\r\n delta : {array-like}, shape (n_samples, n_features)\r\n The backpropagated error signal to be modified inplace.\r\n \"\"\"\r\n delta *= (1 - Z ** 2)\r\n\r\n\r\ndef inplace_relu_derivative(Z, delta):\r\n \"\"\"Apply the derivative of the relu function.\r\n\r\n It exploits the fact that the derivative is a simple function of the output\r\n value from rectified linear units activation function.\r\n\r\n Parameters\r\n ----------\r\n Z : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n The data which was output from the rectified linear units activation\r\n function during the forward pass.\r\n\r\n delta : {array-like}, shape (n_samples, n_features)\r\n The backpropagated error signal to be modified inplace.\r\n \"\"\"\r\n delta[Z == 0] = 0\r\n\r\n\r\nDERIVATIVES = {'identity': inplace_identity_derivative,\r\n 'tanh': inplace_tanh_derivative,\r\n 'logistic': inplace_logistic_derivative,\r\n 'relu': inplace_relu_derivative}\r\n\r\n\r\ndef squared_loss(y_true, y_pred):\r\n \"\"\"Compute the squared loss for regression.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like or label indicator matrix\r\n Ground truth (correct) values.\r\n\r\n y_pred : array-like or label indicator matrix\r\n Predicted values, as returned by a regression estimator.\r\n\r\n Returns\r\n -------\r\n loss : float\r\n The degree to which the samples are correctly predicted.\r\n \"\"\"\r\n return ((y_true - y_pred) ** 2).mean() / 2\r\n\r\n\r\ndef log_loss(y_true, y_prob):\r\n \"\"\"Compute Logistic loss for classification.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like or label indicator matrix\r\n Ground truth (correct) labels.\r\n\r\n y_prob : array-like of float, shape = (n_samples, n_classes)\r\n Predicted probabilities, as returned by a classifier's\r\n predict_proba method.\r\n\r\n Returns\r\n -------\r\n loss : float\r\n The degree to which the samples are correctly predicted.\r\n \"\"\"\r\n if y_prob.shape[1] == 1:\r\n y_prob = np.append(1 - y_prob, y_prob, axis=1)\r\n\r\n if y_true.shape[1] == 1:\r\n y_true = np.append(1 - y_true, y_true, axis=1)\r\n\r\n return - xlogy(y_true, y_prob).sum() / y_prob.shape[0]\r\n\r\n\r\ndef binary_log_loss(y_true, y_prob):\r\n \"\"\"Compute binary logistic loss for classification.\r\n\r\n This is identical to log_loss in binary classification case,\r\n but is kept for its use in multilabel case.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like or label indicator matrix\r\n Ground truth (correct) labels.\r\n\r\n y_prob : array-like of float, shape = (n_samples, n_classes)\r\n Predicted probabilities, as returned by a classifier's\r\n predict_proba method.\r\n\r\n Returns\r\n -------\r\n loss : float\r\n The degree to which the samples are correctly predicted.\r\n \"\"\"\r\n return -(xlogy(y_true, y_prob) +\r\n xlogy(1 - y_true, 1 - y_prob)).sum() / y_prob.shape[0]\r\n\r\n\r\nLOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,\r\n 'binary_log_loss': binary_log_loss}\r\n",
"# Authors: Ashim Bhattarai <[email protected]>\r\n# Thomas J Fan <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport numpy as np\r\n\r\nfrom ._base import _BaseImputer\r\nfrom ..utils.validation import FLOAT_DTYPES\r\nfrom ..metrics import pairwise_distances_chunked\r\nfrom ..metrics.pairwise import _NAN_METRICS\r\nfrom ..neighbors._base import _get_weights\r\nfrom ..neighbors._base import _check_weights\r\nfrom ..utils import check_array\r\nfrom ..utils import is_scalar_nan\r\nfrom ..utils._mask import _get_mask\r\nfrom ..utils.validation import check_is_fitted\r\n\r\n\r\nclass KNNImputer(_BaseImputer):\r\n \"\"\"Imputation for completing missing values using k-Nearest Neighbors.\r\n\r\n Each sample's missing values are imputed using the mean value from\r\n `n_neighbors` nearest neighbors found in the training set. Two samples are\r\n close if the features that neither is missing are close.\r\n\r\n Read more in the :ref:`User Guide <knnimpute>`.\r\n\r\n .. versionadded:: 0.22\r\n\r\n Parameters\r\n ----------\r\n missing_values : number, string, np.nan or None, default=`np.nan`\r\n The placeholder for the missing values. All occurrences of\r\n `missing_values` will be imputed.\r\n\r\n n_neighbors : int, default=5\r\n Number of neighboring samples to use for imputation.\r\n\r\n weights : {'uniform', 'distance'} or callable, default='uniform'\r\n Weight function used in prediction. Possible values:\r\n\r\n - 'uniform' : uniform weights. All points in each neighborhood are\r\n weighted equally.\r\n - 'distance' : weight points by the inverse of their distance.\r\n in this case, closer neighbors of a query point will have a\r\n greater influence than neighbors which are further away.\r\n - callable : a user-defined function which accepts an\r\n array of distances, and returns an array of the same shape\r\n containing the weights.\r\n\r\n metric : {'nan_euclidean'} or callable, default='nan_euclidean'\r\n Distance metric for searching neighbors. Possible values:\r\n\r\n - 'nan_euclidean'\r\n - callable : a user-defined function which conforms to the definition\r\n of ``_pairwise_callable(X, Y, metric, **kwds)``. The function\r\n accepts two arrays, X and Y, and a `missing_values` keyword in\r\n `kwds` and returns a scalar distance value.\r\n\r\n copy : bool, default=True\r\n If True, a copy of X will be created. If False, imputation will\r\n be done in-place whenever possible.\r\n\r\n add_indicator : bool, default=False\r\n If True, a :class:`MissingIndicator` transform will stack onto the\r\n output of the imputer's transform. This allows a predictive estimator\r\n to account for missingness despite imputation. If a feature has no\r\n missing values at fit/train time, the feature won't appear on the\r\n missing indicator even if there are missing values at transform/test\r\n time.\r\n\r\n Attributes\r\n ----------\r\n indicator_ : :class:`sklearn.impute.MissingIndicator`\r\n Indicator used to add binary indicators for missing values.\r\n ``None`` if add_indicator is False.\r\n\r\n References\r\n ----------\r\n * Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor\r\n Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing\r\n value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17\r\n no. 6, 2001 Pages 520-525.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.impute import KNNImputer\r\n >>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]\r\n >>> imputer = KNNImputer(n_neighbors=2)\r\n >>> imputer.fit_transform(X)\r\n array([[1. , 2. , 4. ],\r\n [3. , 4. , 3. ],\r\n [5.5, 6. , 5. ],\r\n [8. , 8. , 7. ]])\r\n \"\"\"\r\n\r\n def __init__(self, missing_values=np.nan, n_neighbors=5,\r\n weights=\"uniform\", metric=\"nan_euclidean\", copy=True,\r\n add_indicator=False):\r\n super().__init__(\r\n missing_values=missing_values,\r\n add_indicator=add_indicator\r\n )\r\n self.n_neighbors = n_neighbors\r\n self.weights = weights\r\n self.metric = metric\r\n self.copy = copy\r\n\r\n def _calc_impute(self, dist_pot_donors, n_neighbors,\r\n fit_X_col, mask_fit_X_col):\r\n \"\"\"Helper function to impute a single column.\r\n\r\n Parameters\r\n ----------\r\n dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)\r\n Distance matrix between the receivers and potential donors from\r\n training set. There must be at least one non-nan distance between\r\n a receiver and a potential donor.\r\n\r\n n_neighbors : int\r\n Number of neighbors to consider.\r\n\r\n fit_X_col : ndarray of shape (n_potential_donors,)\r\n Column of potential donors from training set.\r\n\r\n mask_fit_X_col : ndarray of shape (n_potential_donors,)\r\n Missing mask for fit_X_col.\r\n\r\n Returns\r\n -------\r\n imputed_values: ndarray of shape (n_receivers,)\r\n Imputed values for receiver.\r\n \"\"\"\r\n # Get donors\r\n donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1,\r\n axis=1)[:, :n_neighbors]\r\n\r\n # Get weight matrix from from distance matrix\r\n donors_dist = dist_pot_donors[\r\n np.arange(donors_idx.shape[0])[:, None], donors_idx]\r\n\r\n weight_matrix = _get_weights(donors_dist, self.weights)\r\n\r\n # fill nans with zeros\r\n if weight_matrix is not None:\r\n weight_matrix[np.isnan(weight_matrix)] = 0.0\r\n\r\n # Retrieve donor values and calculate kNN average\r\n donors = fit_X_col.take(donors_idx)\r\n donors_mask = mask_fit_X_col.take(donors_idx)\r\n donors = np.ma.array(donors, mask=donors_mask)\r\n\r\n return np.ma.average(donors, axis=1, weights=weight_matrix).data\r\n\r\n def fit(self, X, y=None):\r\n \"\"\"Fit the imputer on X.\r\n\r\n Parameters\r\n ----------\r\n X : array-like shape of (n_samples, n_features)\r\n Input data, where `n_samples` is the number of samples and\r\n `n_features` is the number of features.\r\n\r\n Returns\r\n -------\r\n self : object\r\n \"\"\"\r\n # Check data integrity and calling arguments\r\n if not is_scalar_nan(self.missing_values):\r\n force_all_finite = True\r\n else:\r\n force_all_finite = \"allow-nan\"\r\n if self.metric not in _NAN_METRICS and not callable(self.metric):\r\n raise ValueError(\r\n \"The selected metric does not support NaN values\")\r\n if self.n_neighbors <= 0:\r\n raise ValueError(\r\n \"Expected n_neighbors > 0. Got {}\".format(self.n_neighbors))\r\n\r\n X = check_array(X, accept_sparse=False, dtype=FLOAT_DTYPES,\r\n force_all_finite=force_all_finite, copy=self.copy)\r\n super()._fit_indicator(X)\r\n\r\n _check_weights(self.weights)\r\n self._fit_X = X\r\n self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)\r\n return self\r\n\r\n def transform(self, X):\r\n \"\"\"Impute all missing values in X.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n The input data to complete.\r\n\r\n Returns\r\n -------\r\n X : array-like of shape (n_samples, n_output_features)\r\n The imputed dataset. `n_output_features` is the number of features\r\n that is not always missing during `fit`.\r\n \"\"\"\r\n\r\n check_is_fitted(self)\r\n if not is_scalar_nan(self.missing_values):\r\n force_all_finite = True\r\n else:\r\n force_all_finite = \"allow-nan\"\r\n X = check_array(X, accept_sparse=False, dtype=FLOAT_DTYPES,\r\n force_all_finite=force_all_finite, copy=self.copy)\r\n X_indicator = super()._transform_indicator(X)\r\n\r\n if X.shape[1] != self._fit_X.shape[1]:\r\n raise ValueError(\"Incompatible dimension between the fitted \"\r\n \"dataset and the one to be transformed\")\r\n\r\n mask = _get_mask(X, self.missing_values)\r\n mask_fit_X = self._mask_fit_X\r\n valid_mask = ~np.all(mask_fit_X, axis=0)\r\n\r\n if not np.any(mask):\r\n # No missing values in X\r\n # Remove columns where the training data is all nan\r\n return X[:, valid_mask]\r\n\r\n row_missing_idx = np.flatnonzero(mask.any(axis=1))\r\n\r\n non_missing_fix_X = np.logical_not(mask_fit_X)\r\n\r\n # Maps from indices from X to indices in dist matrix\r\n dist_idx_map = np.zeros(X.shape[0], dtype=np.int)\r\n dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])\r\n\r\n def process_chunk(dist_chunk, start):\r\n row_missing_chunk = row_missing_idx[start:start + len(dist_chunk)]\r\n\r\n # Find and impute missing by column\r\n for col in range(X.shape[1]):\r\n if not valid_mask[col]:\r\n # column was all missing during training\r\n continue\r\n\r\n col_mask = mask[row_missing_chunk, col]\r\n if not np.any(col_mask):\r\n # column has no missing values\r\n continue\r\n\r\n potential_donors_idx, = np.nonzero(non_missing_fix_X[:, col])\r\n\r\n # receivers_idx are indices in X\r\n receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]\r\n\r\n # distances for samples that needed imputation for column\r\n dist_subset = (dist_chunk[dist_idx_map[receivers_idx] - start]\r\n [:, potential_donors_idx])\r\n\r\n # receivers with all nan distances impute with mean\r\n all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)\r\n all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]\r\n\r\n if all_nan_receivers_idx.size:\r\n col_mean = np.ma.array(self._fit_X[:, col],\r\n mask=mask_fit_X[:, col]).mean()\r\n X[all_nan_receivers_idx, col] = col_mean\r\n\r\n if len(all_nan_receivers_idx) == len(receivers_idx):\r\n # all receivers imputed with mean\r\n continue\r\n\r\n # receivers with at least one defined distance\r\n receivers_idx = receivers_idx[~all_nan_dist_mask]\r\n dist_subset = (dist_chunk[dist_idx_map[receivers_idx]\r\n - start]\r\n [:, potential_donors_idx])\r\n\r\n n_neighbors = min(self.n_neighbors, len(potential_donors_idx))\r\n value = self._calc_impute(\r\n dist_subset,\r\n n_neighbors,\r\n self._fit_X[potential_donors_idx, col],\r\n mask_fit_X[potential_donors_idx, col])\r\n X[receivers_idx, col] = value\r\n\r\n # process in fixed-memory chunks\r\n gen = pairwise_distances_chunked(\r\n X[row_missing_idx, :],\r\n self._fit_X,\r\n metric=self.metric,\r\n missing_values=self.missing_values,\r\n force_all_finite=force_all_finite,\r\n reduce_func=process_chunk)\r\n for chunk in gen:\r\n # process_chunk modifies X in place. No return value.\r\n pass\r\n\r\n return super()._concatenate_indicator(X[:, valid_mask], X_indicator)\r\n"
] | [
[
"sklearn.tree._reingold_tilford.Tree",
"sklearn.tree._reingold_tilford.buchheim",
"numpy.unique"
],
[
"sklearn.utils.assert_all_finite",
"numpy.log",
"numpy.empty_like",
"numpy.median",
"numpy.ones",
"numpy.full_like",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"scipy.optimize.newton"
],
[
"pandas._testing.makeTimeDataFrame",
"numpy.random.randn",
"pandas.Index",
"pandas.MultiIndex"
],
[
"pandas.core.dtypes.common.is_list_like",
"pandas.core.groupby.ops.BaseGrouper",
"pandas.core.groupby.categorical.recode_for_groupby",
"pandas.core.dtypes.common.ensure_categorical",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.common.asarray_tuplesafe",
"pandas.core.algorithms.factorize",
"pandas.core.algorithms.unique1d",
"pandas.core.arrays.Categorical.from_codes",
"pandas.core.indexes.api.Index",
"numpy.sort",
"pandas.core.groupby.categorical.recode_from_groupby",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.array",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.dtypes.common.is_categorical_dtype"
],
[
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.Categorical",
"numpy.arange",
"numpy.random.randn",
"numpy.array",
"numpy.random.randint"
],
[
"pandas._testing.assert_produces_warning",
"pandas.Series",
"pandas.CategoricalDtype",
"pandas.Timestamp",
"pandas.SparseDtype",
"pandas.DataFrame",
"pandas.PeriodDtype",
"numpy.random.randn",
"pandas.Interval",
"pandas.Period",
"pandas.Int64Dtype",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"pandas.IntervalDtype"
],
[
"sklearn.utils.estimator_checks.check_class_weight_balanced_linear_classifier",
"sklearn.utils.estimator_checks._set_check_estimator_ids",
"sklearn.utils.estimator_checks._set_checking_parameters",
"sklearn.utils.estimator_checks.check_parameters_default_constructible",
"sklearn.linear_model.LogisticRegression",
"sklearn.utils._testing.ignore_warnings",
"sklearn.utils.estimator_checks._construct_instance",
"sklearn.utils.estimator_checks.check_estimator",
"sklearn.utils.all_estimators"
],
[
"pandas.tseries.frequencies.to_offset",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas._libs.tslibs.Timestamp",
"pandas.Series",
"pandas._libs.lib.generate_bins_dt64",
"pandas.DataFrame",
"pandas.core.indexes.datetimes.date_range",
"pandas._libs.tslibs.frequencies.is_superperiod",
"pandas._libs.tslibs.period.IncompatibleFrequency",
"pandas.core.indexes.timedeltas.timedelta_range",
"pandas.core.indexes.period.PeriodIndex",
"pandas.util._decorators.Substitution",
"pandas.errors.AbstractMethodError",
"numpy.arange",
"pandas.core.indexes.period.period_range",
"pandas.core.groupby.groupby.get_groupby",
"pandas.core.indexes.timedeltas.TimedeltaIndex",
"numpy.insert",
"pandas.util._decorators.Appender",
"pandas._libs.tslibs.frequencies.is_subperiod",
"pandas._libs.tslibs.Period",
"numpy.sum",
"pandas.core.algorithms.take_1d",
"pandas.tseries.offsets.Nano",
"pandas.compat.numpy.function.validate_resampler_func",
"pandas.core.groupby.ops.BinGrouper"
],
[
"pandas.core.common.flatten",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.tslibs.Timedelta",
"pandas.compat.chainmap.DeepChainMap",
"pandas._libs.tslibs.Timestamp",
"pandas.Index",
"pandas.core.computation.ops.is_term",
"pandas.core.computation.common._ensure_decoded",
"pandas.core.common.values_from_object",
"pandas.io.formats.printing.pprint_thing"
],
[
"pandas._libs.tslibs.parsing.try_parse_datetime_components",
"pandas._libs.tslibs.parsing.try_parse_year_month_day",
"numpy.array",
"pandas._libs.tslibs.parsing.try_parse_date_and_time",
"numpy.empty"
],
[
"pandas._libs.tslibs.timedeltas.delta_to_nanoseconds",
"numpy.int32",
"pandas.Timedelta",
"numpy.timedelta64",
"numpy.int64",
"pandas.offsets.Nano",
"numpy.array"
],
[
"matplotlib.pyplot.subplots"
],
[
"numpy.asarray",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.issubdtype",
"pandas.core.dtypes.common.is_dtype_equal",
"numpy.concatenate",
"pandas.core.common.values_from_object",
"pandas.core.common.asarray_tuplesafe",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.ops.get_op_result_name",
"pandas.io.formats.format.FloatArrayFormatter",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"numpy.isnan",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_bool",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.core.dtypes.common.is_bool_dtype",
"numpy.array_equal",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype"
],
[
"numpy.asarray",
"pandas._libs.interval.intervals_to_interval_bounds",
"pandas.core.dtypes.missing.notna",
"pandas.core.dtypes.dtypes.IntervalDtype",
"numpy.concatenate",
"pandas._libs.interval.Interval",
"pandas._config.get_option",
"pandas.core.arrays._arrow_utils.ArrowIntervalType",
"pandas.core.indexers.check_array_indexer",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.dtypes.cast.maybe_convert_platform",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_string_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.ndim",
"numpy.timedelta64",
"numpy.array",
"pandas.core.dtypes.common.is_scalar",
"numpy.datetime64",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_interval",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.algorithms.take"
],
[
"numpy.array",
"pandas._testing.assert_equal",
"pandas.Series",
"numpy.arange",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas._testing.assert_frame_equal",
"pandas.Timedelta",
"numpy.ones",
"pandas.DatetimeIndex",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"numpy.random.RandomState",
"pandas._testing.assert_index_equal"
],
[
"pandas._config.get_option",
"pandas._libs.lib.values_from_object",
"numpy.prod",
"numpy.errstate"
],
[
"pandas.tseries.offsets.Hour",
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas.tseries.frequencies.to_offset",
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.core.indexes.period.Period",
"pandas.core.indexes.datetimes.date_range",
"pandas.core.resample._get_timestamp_range_edges",
"numpy.random.randn",
"numpy.searchsorted",
"numpy.mean",
"pandas.DataFrame.from_records",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"pandas.tseries.offsets.BDay",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"pandas.core.indexes.period.period_range",
"pandas.core.groupby.grouper.Grouper",
"numpy.lexsort",
"pandas.DatetimeIndex",
"numpy.std",
"pandas._testing.assert_series_equal",
"numpy.repeat",
"pandas._testing.assert_index_equal",
"pandas.core.resample.DatetimeIndex",
"numpy.random.choice",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.date_range",
"numpy.random.RandomState",
"numpy.array",
"pandas.period_range",
"pandas.Grouper",
"pandas.tseries.offsets.Nano",
"pandas.Period",
"pandas._testing.makeTimeDataFrame",
"pandas.tseries.offsets.Minute",
"pandas.Timestamp"
],
[
"scipy.special.expit",
"numpy.finfo",
"numpy.append",
"numpy.tanh",
"numpy.exp",
"scipy.special.xlogy"
],
[
"numpy.logical_not",
"numpy.nonzero",
"numpy.isnan",
"numpy.arange",
"numpy.flatnonzero",
"numpy.all",
"numpy.argpartition",
"numpy.any",
"numpy.ma.array",
"numpy.zeros",
"numpy.ma.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.0",
"0.24",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samueljamesbell/scikit-optimize | [
"c53998816481d150ccc745ffd07d022fdb1fd25d",
"c53998816481d150ccc745ffd07d022fdb1fd25d",
"c53998816481d150ccc745ffd07d022fdb1fd25d",
"c53998816481d150ccc745ffd07d022fdb1fd25d"
] | [
"examples/utils.py",
"skopt/benchmarks.py",
"examples/plots/partial-dependence-plot.py",
"skopt/tests/test_optimizer.py"
] | [
"# Module to import functions from in examples for multiprocessing backend\nimport numpy as np\n\n\ndef obj_fun(x, noise_level=0.1):\n return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) +\\\n np.random.randn() * noise_level\n",
"# -*- coding: utf-8 -*-\n\"\"\"A collection of benchmark problems.\"\"\"\n\nimport numpy as np\n\n\ndef bench1(x):\n \"\"\"A benchmark function for test purposes.\n\n f(x) = x ** 2\n\n It has a single minima with f(x*) = 0 at x* = 0.\n \"\"\"\n return x[0] ** 2\n\n\ndef bench1_with_time(x):\n \"\"\"Same as bench1 but returns the computation time (constant).\"\"\"\n return x[0] ** 2, 2.22\n\n\ndef bench2(x):\n \"\"\"A benchmark function for test purposes.\n\n f(x) = x ** 2 if x < 0\n (x-5) ** 2 - 5 otherwise.\n\n It has a global minima with f(x*) = -5 at x* = 5.\n \"\"\"\n if x[0] < 0:\n return x[0] ** 2\n else:\n return (x[0] - 5) ** 2 - 5\n\n\ndef bench3(x):\n \"\"\"A benchmark function for test purposes.\n\n f(x) = sin(5*x) * (1 - tanh(x ** 2))\n\n It has a global minima with f(x*) ~= -0.9 at x* ~= -0.3.\n \"\"\"\n return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))\n\n\ndef bench4(x):\n \"\"\"A benchmark function for test purposes.\n\n f(x) = float(x) ** 2\n\n where x is a string. It has a single minima with f(x*) = 0 at x* = \"0\".\n This benchmark is used for checking support of categorical variables.\n \"\"\"\n return float(x[0]) ** 2\n\n\ndef bench5(x):\n \"\"\"A benchmark function for test purposes.\n\n f(x) = float(x[0]) ** 2 + x[1] ** 2\n\n where x is a string. It has a single minima with f(x) = 0 at x[0] = \"0\"\n and x[1] = \"0\"\n This benchmark is used for checking support of mixed spaces.\n \"\"\"\n return float(x[0]) ** 2 + x[1] ** 2\n\n\ndef branin(x, a=1, b=5.1 / (4 * np.pi ** 2), c=5. / np.pi,\n r=6, s=10, t=1. / (8 * np.pi)):\n \"\"\"Branin-Hoo function is defined on the square\n :math:`x1 \\\\in [-5, 10], x2 \\\\in [0, 15]`.\n\n It has three minima with f(x*) = 0.397887 at x* = (-pi, 12.275),\n (+pi, 2.275), and (9.42478, 2.475).\n\n More details: <http://www.sfu.ca/~ssurjano/branin.html>\n \"\"\"\n return (a * (x[1] - b * x[0] ** 2 + c * x[0] - r) ** 2 +\n s * (1 - t) * np.cos(x[0]) + s)\n\n\ndef hart6(x,\n alpha=np.asarray([1.0, 1.2, 3.0, 3.2]),\n P=10 ** -4 * np.asarray([[1312, 1696, 5569, 124, 8283, 5886],\n [2329, 4135, 8307, 3736, 1004, 9991],\n [2348, 1451, 3522, 2883, 3047, 6650],\n [4047, 8828, 8732, 5743, 1091, 381]]),\n A=np.asarray([[10, 3, 17, 3.50, 1.7, 8],\n [0.05, 10, 17, 0.1, 8, 14],\n [3, 3.5, 1.7, 10, 17, 8],\n [17, 8, 0.05, 10, 0.1, 14]])):\n \"\"\"The six dimensional Hartmann function is defined on the unit hypercube.\n\n It has six local minima and one global minimum f(x*) = -3.32237 at\n x* = (0.20169, 0.15001, 0.476874, 0.275332, 0.311652, 0.6573).\n\n More details: <http://www.sfu.ca/~ssurjano/hart6.html>\n \"\"\"\n return -np.sum(alpha * np.exp(-np.sum(A * (np.array(x) - P) ** 2, axis=1)))\n",
"\"\"\"\n========================\nPartial Dependence Plots\n========================\n\nSigurd Carlsen Feb 2019\nHolger Nahrstaedt 2020\n\n.. currentmodule:: skopt\n\nPlot objective now supports optional use of partial dependence as well as\ndifferent methods of defining parameter values for dependency plots.\n\"\"\"\nprint(__doc__)\nimport sys\nfrom skopt.plots import plot_objective\nfrom skopt import forest_minimize\nimport numpy as np\nnp.random.seed(123)\nimport matplotlib.pyplot as plt\n\n#############################################################################\n# Objective function\n# ==================\n# Plot objective now supports optional use of partial dependence as well as\n# different methods of defining parameter values for dependency plots\n\n# Here we define a function that we evaluate.\ndef funny_func(x):\n s = 0\n for i in range(len(x)):\n s += (x[i] * i) ** 2\n return s\n\n#############################################################################\n# Optimisation using decision trees\n# =================================\n# We run forest_minimize on the function\nbounds = [(-1, 1.), ] * 3\nn_calls = 150\n\nresult = forest_minimize(funny_func, bounds, n_calls=n_calls,\n base_estimator=\"ET\",\n random_state=4)\n\n#############################################################################\n# Partial dependence plot\n# =======================\n# Here we see an example of using partial dependence. Even when setting\n# n_points all the way down to 10 from the default of 40, this method is\n# still very slow. This is because partial dependence calculates 250 extra\n# predictions for each point on the plots.\n\n\n_ = plot_objective(result, n_points=10)\n\n#############################################################################\n# It is possible to change the location of the red dot, which normally shows\n# the position of the found minimum. We can set it 'expected_minimum',\n# which is the minimum value of the surrogate function, obtained by a\n# minimum search method.\n\n_ = plot_objective(result, n_points=10, minimum='expected_minimum')\n#############################################################################\n# Plot without partial dependence\n# ===============================\n# Here we plot without partial dependence. We see that it is a lot faster.\n# Also the values for the other parameters are set to the default \"result\"\n# which is the parameter set of the best observed value so far. In the case\n# of funny_func this is close to 0 for all parameters.\n\n_ = plot_objective(result, sample_source='result', n_points=10)\n\n#############################################################################\n# Modify the shown minimum\n# ========================\n# Here we try with setting the `minimum` parameters to something other than\n# \"result\". First we try with \"expected_minimum\" which is the set of\n# parameters that gives the miniumum value of the surrogate function,\n# using scipys minimum search method.\n\n_ = plot_objective(result, n_points=10, sample_source='expected_minimum',\n minimum='expected_minimum')\n\n#############################################################################\n# \"expected_minimum_random\" is a naive way of finding the minimum of the\n# surrogate by only using random sampling:\n\n_ = plot_objective(result, n_points=10, sample_source='expected_minimum_random',\n minimum='expected_minimum_random')\n\n#############################################################################\n# We can also specify how many initial samples are used for the two different\n# \"expected_minimum\" methods. We set it to a low value in the next examples\n# to showcase how it affects the minimum for the two methods.\n\n_ = plot_objective(result, n_points=10, sample_source='expected_minimum_random',\n minimum='expected_minimum_random',\n n_minimum_search=10)\n\n#############################################################################\n\n_ = plot_objective(result, n_points=10, sample_source=\"expected_minimum\",\n minimum='expected_minimum', n_minimum_search=2)\n\n#############################################################################\n# Set a minimum location\n# ======================\n# Lastly we can also define these parameters ourself by parsing a list\n# as the minimum argument:\n\n_ = plot_objective(result, n_points=10, sample_source=[1, -0.5, 0.5],\n minimum=[1, -0.5, 0.5])\n\n\n",
"import numpy as np\nimport pytest\n\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom skopt import gp_minimize\nfrom skopt import forest_minimize\nfrom skopt.benchmarks import bench1, bench1_with_time\nfrom skopt.benchmarks import branin\nfrom skopt.learning import ExtraTreesRegressor, RandomForestRegressor\nfrom skopt.learning import GradientBoostingQuantileRegressor\nfrom skopt.optimizer import Optimizer\nfrom scipy.optimize import OptimizeResult\n\n\nTREE_REGRESSORS = (ExtraTreesRegressor(random_state=2),\n RandomForestRegressor(random_state=2),\n GradientBoostingQuantileRegressor(random_state=2))\nACQ_FUNCS_PS = [\"EIps\", \"PIps\"]\nACQ_FUNCS_MIXED = [\"EI\", \"EIps\"]\nESTIMATOR_STRINGS = [\"GP\", \"RF\", \"ET\", \"GBRT\", \"DUMMY\",\n \"gp\", \"rf\", \"et\", \"gbrt\", \"dummy\"]\n\n\[email protected]_test\ndef test_multiple_asks():\n # calling ask() multiple times without a tell() inbetween should\n # be a \"no op\"\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\")\n\n opt.run(bench1, n_iter=3)\n # tell() computes the next point ready for the next call to ask()\n # hence there are three after three iterations\n assert_equal(len(opt.models), 3)\n assert_equal(len(opt.Xi), 3)\n opt.ask()\n assert_equal(len(opt.models), 3)\n assert_equal(len(opt.Xi), 3)\n assert_equal(opt.ask(), opt.ask())\n opt.update_next()\n assert_equal(opt.ask(), opt.ask())\n\n\[email protected]_test\ndef test_model_queue_size():\n # Check if model_queue_size limits the model queue size\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\", model_queue_size=2)\n\n opt.run(bench1, n_iter=3)\n # tell() computes the next point ready for the next call to ask()\n # hence there are three after three iterations\n assert_equal(len(opt.models), 2)\n assert_equal(len(opt.Xi), 3)\n opt.ask()\n assert_equal(len(opt.models), 2)\n assert_equal(len(opt.Xi), 3)\n assert_equal(opt.ask(), opt.ask())\n\n\[email protected]_test\ndef test_invalid_tell_arguments():\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\")\n\n # can't have single point and multiple values for y\n assert_raises(ValueError, opt.tell, [1.], [1., 1.])\n\n\[email protected]_test\ndef test_invalid_tell_arguments_list():\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\")\n\n assert_raises(ValueError, opt.tell, [[1.], [2.]], [1., None])\n\n\[email protected]_test\ndef test_bounds_checking_1D():\n low = -2.\n high = 2.\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(low, high)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\")\n\n assert_raises(ValueError, opt.tell, [high + 0.5], 2.)\n assert_raises(ValueError, opt.tell, [low - 0.5], 2.)\n # feed two points to tell() at once\n assert_raises(ValueError, opt.tell, [high + 0.5, high], (2., 3.))\n assert_raises(ValueError, opt.tell, [low - 0.5, high], (2., 3.))\n\n\[email protected]_test\ndef test_bounds_checking_2D():\n low = -2.\n high = 2.\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(low, high), (low+4, high+4)], base_estimator,\n n_initial_points=1, acq_optimizer=\"sampling\")\n\n assert_raises(ValueError, opt.tell, [high + 0.5, high + 4.5], 2.)\n assert_raises(ValueError, opt.tell, [low - 0.5, low - 4.5], 2.)\n\n # first out, second in\n assert_raises(ValueError, opt.tell, [high + 0.5, high + 0.5], 2.)\n assert_raises(ValueError, opt.tell, [low - 0.5, high + 0.5], 2.)\n\n\[email protected]_test\ndef test_bounds_checking_2D_multiple_points():\n low = -2.\n high = 2.\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(low, high), (low+4, high+4)], base_estimator,\n n_initial_points=1, acq_optimizer=\"sampling\")\n\n # first component out, second in\n assert_raises(ValueError, opt.tell,\n [(high + 0.5, high + 0.5), (high + 0.5, high + 0.5)],\n [2., 3.])\n assert_raises(ValueError, opt.tell,\n [(low - 0.5, high + 0.5), (low - 0.5, high + 0.5)],\n [2., 3.])\n\n\[email protected]_test\ndef test_dimension_checking_1D():\n low = -2\n high = 2\n opt = Optimizer([(low, high)])\n with pytest.raises(ValueError) as e:\n # within bounds but one dimension too high\n opt.tell([low+1, low+1], 2.)\n assert \"Dimensions of point \" in str(e.value)\n\n\[email protected]_test\ndef test_dimension_checking_2D():\n low = -2\n high = 2\n opt = Optimizer([(low, high), (low, high)])\n # within bounds but one dimension too little\n with pytest.raises(ValueError) as e:\n opt.tell([low+1, ], 2.)\n assert \"Dimensions of point \" in str(e.value)\n # within bounds but one dimension too much\n with pytest.raises(ValueError) as e:\n opt.tell([low+1, low+1, low+1], 2.)\n assert \"Dimensions of point \" in str(e.value)\n\n\[email protected]_test\ndef test_dimension_checking_2D_multiple_points():\n low = -2\n high = 2\n opt = Optimizer([(low, high), (low, high)])\n # within bounds but one dimension too little\n with pytest.raises(ValueError) as e:\n opt.tell([[low+1, ], [low+1, low+2], [low+1, low+3]], 2.)\n assert \"dimensions as the space\" in str(e.value)\n # within bounds but one dimension too much\n with pytest.raises(ValueError) as e:\n opt.tell([[low + 1, low + 1, low + 1], [low + 1, low + 2],\n [low + 1, low + 3]], 2.)\n assert \"dimensions as the space\" in str(e.value)\n\n\[email protected]_test\ndef test_returns_result_object():\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,\n acq_optimizer=\"sampling\")\n result = opt.tell([1.5], 2.)\n\n assert isinstance(result, OptimizeResult)\n assert_equal(len(result.x_iters), len(result.func_vals))\n assert_equal(np.min(result.func_vals), result.fun)\n\n\[email protected]_test\[email protected](\"base_estimator\", TREE_REGRESSORS)\ndef test_acq_optimizer(base_estimator):\n with pytest.raises(ValueError) as e:\n Optimizer([(-2.0, 2.0)], base_estimator=base_estimator,\n n_initial_points=1, acq_optimizer='lbfgs')\n assert \"should run with acq_optimizer='sampling'\" in str(e.value)\n\n\[email protected](\"base_estimator\", TREE_REGRESSORS)\[email protected](\"acq_func\", ACQ_FUNCS_PS)\ndef test_acq_optimizer_with_time_api(base_estimator, acq_func):\n opt = Optimizer([(-2.0, 2.0),], base_estimator=base_estimator,\n acq_func=acq_func,\n acq_optimizer=\"sampling\", n_initial_points=2)\n x1 = opt.ask()\n opt.tell(x1, (bench1(x1), 1.0))\n x2 = opt.ask()\n res = opt.tell(x2, (bench1(x2), 2.0))\n\n # x1 and x2 are random.\n assert x1 != x2\n\n assert len(res.models) == 1\n assert_array_equal(res.func_vals.shape, (2,))\n assert_array_equal(res.log_time.shape, (2,))\n\n # x3 = opt.ask()\n\n with pytest.raises(TypeError) as e:\n opt.tell(x2, bench1(x2))\n\n\[email protected]_test\[email protected](\"acq_func\", ACQ_FUNCS_MIXED)\ndef test_optimizer_copy(acq_func):\n # Checks that the base estimator, the objective and target values\n # are copied correctly.\n\n base_estimator = ExtraTreesRegressor(random_state=2)\n opt = Optimizer([(-2.0, 2.0)], base_estimator, acq_func=acq_func,\n n_initial_points=1, acq_optimizer=\"sampling\")\n\n # run three iterations so that we have some points and objective values\n if \"ps\" in acq_func:\n opt.run(bench1_with_time, n_iter=3)\n else:\n opt.run(bench1, n_iter=3)\n\n opt_copy = opt.copy()\n\n copied_estimator = opt_copy.base_estimator_\n\n if \"ps\" in acq_func:\n assert isinstance(copied_estimator, MultiOutputRegressor)\n # check that the base_estimator is not wrapped multiple times\n is_multi = isinstance(copied_estimator.estimator,\n MultiOutputRegressor)\n assert not is_multi\n else:\n assert not isinstance(copied_estimator, MultiOutputRegressor)\n\n assert_array_equal(opt_copy.Xi, opt.Xi)\n assert_array_equal(opt_copy.yi, opt.yi)\n\n\[email protected](\"base_estimator\", ESTIMATOR_STRINGS)\ndef test_exhaust_initial_calls(base_estimator):\n # check a model is fitted and used to make suggestions after we added\n # at least n_initial_points via tell()\n opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=2,\n acq_optimizer=\"sampling\", random_state=1)\n\n x0 = opt.ask() # random point\n x1 = opt.ask() # random point\n assert x0 != x1\n # first call to tell()\n r1 = opt.tell(x1, 3.)\n assert len(r1.models) == 0\n x2 = opt.ask() # random point\n assert x1 != x2\n # second call to tell()\n r2 = opt.tell(x2, 4.)\n if base_estimator.lower() == 'dummy':\n assert len(r2.models) == 0\n else:\n assert len(r2.models) == 1\n # this is the first non-random point\n x3 = opt.ask()\n assert x2 != x3\n x4 = opt.ask()\n r3 = opt.tell(x3, 1.)\n # no new information was added so should be the same, unless we are using\n # the dummy estimator which will forever return random points and never\n # fits any models\n if base_estimator.lower() == 'dummy':\n assert x3 != x4\n assert len(r3.models) == 0\n else:\n assert x3 == x4\n assert len(r3.models) == 2\n\n\[email protected]_test\ndef test_optimizer_base_estimator_string_invalid():\n with pytest.raises(ValueError) as e:\n Optimizer([(-2.0, 2.0)], base_estimator=\"rtr\",\n n_initial_points=1)\n assert \"'RF', 'ET', 'GP', 'GBRT' or 'DUMMY'\" in str(e.value)\n\n\[email protected]_test\[email protected](\"base_estimator\", ESTIMATOR_STRINGS)\ndef test_optimizer_base_estimator_string_smoke(base_estimator):\n opt = Optimizer([(-2.0, 2.0)], base_estimator=base_estimator,\n n_initial_points=2, acq_func=\"EI\")\n opt.run(func=lambda x: x[0]**2, n_iter=3)\n\n\[email protected]_test\ndef test_optimizer_base_estimator_string_smoke_njobs():\n opt = Optimizer([(-2.0, 2.0)], base_estimator=\"GBRT\",\n n_initial_points=1, acq_func=\"EI\", n_jobs=-1)\n opt.run(func=lambda x: x[0]**2, n_iter=3)\n\n\ndef test_defaults_are_equivalent():\n # check that the defaults of Optimizer reproduce the defaults of\n # gp_minimize\n space = [(-5., 10.), (0., 15.)]\n #opt = Optimizer(space, 'ET', acq_func=\"EI\", random_state=1)\n opt = Optimizer(space, random_state=1)\n\n for n in range(12):\n x = opt.ask()\n res_opt = opt.tell(x, branin(x))\n\n #res_min = forest_minimize(branin, space, n_calls=12, random_state=1)\n res_min = gp_minimize(branin, space, n_calls=12, random_state=1)\n\n assert res_min.space == res_opt.space\n # tolerate small differences in the points sampled\n assert np.allclose(res_min.x_iters, res_opt.x_iters)#, atol=1e-5)\n assert np.allclose(res_min.x, res_opt.x)#, atol=1e-5)\n\n res_opt2 = opt.get_result()\n assert np.allclose(res_min.x_iters, res_opt2.x_iters) # , atol=1e-5)\n assert np.allclose(res_min.x, res_opt2.x) # , atol=1e-5)\n\n\[email protected]_test\ndef test_dimensions_names():\n from skopt.space import Real, Categorical, Integer\n # create search space and optimizer\n space = [Real(0, 1, name='real'),\n Categorical(['a', 'b', 'c'], name='cat'),\n Integer(0, 1, name='int')]\n opt = Optimizer(space, n_initial_points=2)\n # result of the optimizer missing dimension names\n result = opt.tell([(0.5, 'a', 0.5)], [3])\n names = []\n for d in result.space.dimensions:\n names.append(d.name)\n assert len(names) == 3\n assert \"real\" in names\n assert \"cat\" in names\n assert \"int\" in names\n assert None not in names\n\n\[email protected]_test\ndef test_categorical_only():\n from skopt.space import Categorical\n cat1 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n cat2 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n\n opt = Optimizer([cat1, cat2])\n for n in range(15):\n x = opt.ask()\n res = opt.tell(x, 12 * n)\n assert len(res.x_iters) == 15\n next_x = opt.ask(n_points=4)\n assert len(next_x) == 4\n\n cat3 = Categorical([\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\"])\n cat4 = Categorical([\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\"])\n\n opt = Optimizer([cat3, cat4])\n for n in range(15):\n x = opt.ask()\n res = opt.tell(x, 12 * n)\n assert len(res.x_iters) == 15\n next_x = opt.ask(n_points=4)\n assert len(next_x) == 4\n\n\ndef test_categorical_only2():\n from numpy import linalg\n from skopt.space import Categorical\n from skopt.learning import GaussianProcessRegressor\n space = [Categorical([1, 2, 3]), Categorical([4, 5, 6])]\n opt = Optimizer(space,\n base_estimator=GaussianProcessRegressor(alpha=1e-7),\n acq_optimizer='lbfgs',\n n_initial_points=10,\n n_jobs=2)\n\n next_x = opt.ask(n_points=4)\n assert len(next_x) == 4\n opt.tell(next_x, [linalg.norm(x) for x in next_x])\n next_x = opt.ask(n_points=4)\n assert len(next_x) == 4\n opt.tell(next_x, [linalg.norm(x) for x in next_x])\n next_x = opt.ask(n_points=4)\n assert len(next_x) == 4\n"
] | [
[
"numpy.tanh",
"numpy.random.randn",
"numpy.sin"
],
[
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.tanh",
"numpy.array"
],
[
"numpy.random.seed"
],
[
"numpy.allclose",
"numpy.min",
"numpy.linalg.norm",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_raises"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
smiledinisa/SimpleCVReproduction | [
"c6ac180887472a920d86b6eb15f933294b65dcec"
] | [
"RL/actor_critic.py"
] | [
"import argparse\nimport gym\nimport numpy as np\nfrom itertools import count\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Categorical\n\n# Cart Pole\n\nparser = argparse.ArgumentParser(description='PyTorch actor-critic example')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor (default: 0.99)')\nparser.add_argument('--seed', type=int, default=543, metavar='N',\n help='random seed (default: 543)')\nparser.add_argument('--render', action='store_true',\n help='render the environment')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='interval between training status logs (default: 10)')\nargs = parser.parse_args()\n\n\nenv = gym.make('CartPole-v0')\nenv.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n\nSavedAction = namedtuple('SavedAction', ['log_prob', 'value'])\n\n\nclass Policy(nn.Module):\n \"\"\"\n implements both actor and critic in one model\n \"\"\"\n def __init__(self):\n super(Policy, self).__init__()\n self.affine1 = nn.Linear(4, 128)\n\n # actor's layer\n self.action_head = nn.Linear(128, 2)\n\n # critic's layer\n self.value_head = nn.Linear(128, 1)\n\n # action & reward buffer\n self.saved_actions = []\n self.rewards = []\n\n def forward(self, x):\n \"\"\"\n forward of both actor and critic\n \"\"\"\n x = F.relu(self.affine1(x))\n\n # actor: choses action to take from state s_t \n # by returning probability of each action\n action_prob = F.softmax(self.action_head(x), dim=-1)\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tuple of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t \n return action_prob, state_values\n\n\nmodel = Policy()\noptimizer = optim.Adam(model.parameters(), lr=3e-2)\neps = np.finfo(np.float32).eps.item()\n\n\ndef select_action(state):\n state = torch.from_numpy(state).float()\n probs, state_value = model(state)\n\n # create a categorical distribution over the list of probabilities of actions\n m = Categorical(probs)\n\n # and sample an action using the distribution\n action = m.sample()\n\n # save to action buffer\n model.saved_actions.append(SavedAction(m.log_prob(action), state_value))\n\n # the action to take (left or right)\n return action.item()\n\n\ndef finish_episode():\n \"\"\"\n Training code. Calculates actor and critic loss and performs backprop.\n \"\"\"\n R = 0\n saved_actions = model.saved_actions\n policy_losses = [] # list to save actor (policy) loss\n value_losses = [] # list to save critic (value) loss\n returns = [] # list to save the true values\n\n # calculate the true value using rewards returned from the environment\n for r in model.rewards[::-1]:\n # calculate the discounted value\n R = r + args.gamma * R\n returns.insert(0, R)\n\n returns = torch.tensor(returns)\n returns = (returns - returns.mean()) / (returns.std() + eps)\n\n for (log_prob, value), R in zip(saved_actions, returns):\n advantage = R - value.item()\n\n # calculate actor (policy) loss \n policy_losses.append(-log_prob * advantage)\n\n # calculate critic (value) loss using L1 smooth loss\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))\n\n # reset gradients\n optimizer.zero_grad()\n\n # sum up all the values of policy_losses and value_losses\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()\n\n # perform backprop\n loss.backward()\n optimizer.step()\n\n # reset rewards and action buffer\n del model.rewards[:]\n del model.saved_actions[:]\n\n\ndef main():\n running_reward = 10\n\n # run inifinitely many episodes\n for i_episode in count(1):\n\n # reset environment and episode reward\n state = env.reset()\n ep_reward = 0\n\n # for each episode, only run 9999 steps so that we don't \n # infinite loop while learning\n for t in range(1, 20000):\n\n # select action from policy\n action = select_action(state)\n\n # take the action\n state, reward, done, _ = env.step(action)\n\n if args.render:\n env.render()\n\n model.rewards.append(reward)\n ep_reward += reward\n if done:\n break\n\n # update cumulative reward\n running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward\n\n # perform backprop\n finish_episode()\n\n # log results\n if i_episode % args.log_interval == 0:\n print('Episode {}\\tLast reward: {:.2f}\\tAverage reward: {:.2f}'.format(\n i_episode, ep_reward, running_reward))\n\n # check if we have \"solved\" the cart pole problem\n if running_reward > env.spec.reward_threshold:\n print(\"Solved! Running reward is now {} and \"\n \"the last episode runs to {} time steps!\".format(running_reward, t))\n break\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.manual_seed",
"torch.from_numpy",
"numpy.finfo",
"torch.tensor",
"torch.nn.Linear",
"torch.distributions.Categorical",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mjchi7/CS234 | [
"c476714aafcd880c4d7707799d9556dfb2de24a6"
] | [
"assignment2/core/deep_q_learning.py"
] | [
"import os\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport time\r\n\r\nfrom q_learning import QN\r\n\r\n\r\nclass DQN(QN):\r\n \"\"\"\r\n Abstract class for Deep Q Learning\r\n \"\"\"\r\n def add_placeholders_op(self):\r\n raise NotImplementedError\r\n\r\n\r\n def get_q_values_op(self, scope, reuse=False):\r\n \"\"\"\r\n set Q values, of shape = (batch_size, num_actions)\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n def add_update_target_op(self, q_scope, target_q_scope):\r\n \"\"\"\r\n Update_target_op will be called periodically \r\n to copy Q network to target Q network\r\n \r\n Args:\r\n q_scope: name of the scope of variables for q\r\n target_q_scope: name of the scope of variables for the target\r\n network\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n def add_loss_op(self, q, target_q):\r\n \"\"\"\r\n Set (Q_target - Q)^2\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n def add_optimizer_op(self, scope):\r\n \"\"\"\r\n Set training op wrt to loss for variable in scope\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n def process_state(self, state):\r\n \"\"\"\r\n Processing of state\r\n\r\n State placeholders are tf.uint8 for fast transfer to GPU\r\n Need to cast it to float32 for the rest of the tf graph.\r\n\r\n Args:\r\n state: node of tf graph of shape = (batch_size, height, width, nchannels)\r\n of type tf.uint8.\r\n if , values are between 0 and 255 -> 0 and 1\r\n \"\"\"\r\n state = tf.cast(state, tf.float32)\r\n state /= self.config.high\r\n\r\n return state\r\n\r\n\r\n def build(self):\r\n \"\"\"\r\n Build model by adding all necessary variables\r\n \"\"\"\r\n # add placeholders\r\n self.add_placeholders_op()\r\n\r\n # compute Q values of state\r\n s = self.process_state(self.s)\r\n self.q = self.get_q_values_op(s, scope=\"q\", reuse=False)\r\n\r\n # compute Q values of next state\r\n sp = self.process_state(self.sp)\r\n self.target_q = self.get_q_values_op(sp, scope=\"target_q\", reuse=False)\r\n\r\n # add update operator for target network\r\n self.add_update_target_op(\"q\", \"target_q\")\r\n\r\n # add square loss\r\n self.add_loss_op(self.q, self.target_q)\r\n\r\n # add optmizer for the main networks\r\n self.add_optimizer_op(\"q\")\r\n\r\n\r\n def initialize(self):\r\n \"\"\"\r\n Assumes the graph has been constructed\r\n Creates a tf Session and run initializer of variables\r\n \"\"\"\r\n # create tf session\r\n self.sess = tf.Session()\r\n\r\n # tensorboard stuff\r\n self.add_summary()\r\n\r\n # initiliaze all variables\r\n init = tf.global_variables_initializer()\r\n self.sess.run(init)\r\n\r\n # synchronise q and target_q networks\r\n self.sess.run(self.update_target_op)\r\n\r\n # for saving networks weights\r\n self.saver = tf.train.Saver()\r\n\r\n \r\n def add_summary(self):\r\n \"\"\"\r\n Tensorboard stuff\r\n \"\"\"\r\n # extra placeholders to log stuff from python\r\n self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"avg_reward\")\r\n self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"max_reward\")\r\n self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"std_reward\")\r\n\r\n self.avg_q_placeholder = tf.placeholder(tf.float32, shape=(), name=\"avg_q\")\r\n self.max_q_placeholder = tf.placeholder(tf.float32, shape=(), name=\"max_q\")\r\n self.std_q_placeholder = tf.placeholder(tf.float32, shape=(), name=\"std_q\")\r\n\r\n self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name=\"eval_reward\")\r\n\r\n # add placeholders from the graph\r\n tf.summary.scalar(\"loss\", self.loss)\r\n tf.summary.scalar(\"grads norm\", self.grad_norm)\r\n\r\n # extra summaries from python -> placeholders\r\n tf.summary.scalar(\"Avg Reward\", self.avg_reward_placeholder)\r\n tf.summary.scalar(\"Max Reward\", self.max_reward_placeholder)\r\n tf.summary.scalar(\"Std Reward\", self.std_reward_placeholder)\r\n\r\n tf.summary.scalar(\"Avg Q\", self.avg_q_placeholder)\r\n tf.summary.scalar(\"Max Q\", self.max_q_placeholder)\r\n tf.summary.scalar(\"Std Q\", self.std_q_placeholder)\r\n\r\n tf.summary.scalar(\"Eval Reward\", self.eval_reward_placeholder)\r\n \r\n # logging\r\n self.merged = tf.summary.merge_all()\r\n self.file_writer = tf.summary.FileWriter(self.config.output_path, \r\n self.sess.graph)\r\n\r\n\r\n\r\n def save(self):\r\n \"\"\"\r\n Saves session\r\n \"\"\"\r\n if not os.path.exists(self.config.model_output):\r\n os.makedirs(self.config.model_output)\r\n\r\n self.saver.save(self.sess, self.config.model_output)\r\n\r\n\r\n def get_best_action(self, state):\r\n \"\"\"\r\n Return best action\r\n\r\n Args:\r\n state: 4 consecutive observations from gym\r\n Returns:\r\n action: (int)\r\n action_values: (np array) q values for all actions\r\n \"\"\"\r\n action_values = self.sess.run(self.q, feed_dict={self.s: [state]})[0]\r\n return np.argmax(action_values), action_values\r\n\r\n\r\n def update_step(self, t, replay_buffer, lr):\r\n \"\"\"\r\n Performs an update of parameters by sampling from replay_buffer\r\n\r\n Args:\r\n t: number of iteration (episode and move)\r\n replay_buffer: ReplayBuffer instance .sample() gives batches\r\n lr: (float) learning rate\r\n Returns:\r\n loss: (Q - Q_target)^2\r\n \"\"\"\r\n\r\n s_batch, a_batch, r_batch, sp_batch, done_mask_batch = replay_buffer.sample(\r\n self.config.batch_size)\r\n\r\n\r\n fd = {\r\n # inputs\r\n self.s: s_batch,\r\n self.a: a_batch,\r\n self.r: r_batch,\r\n self.sp: sp_batch, \r\n self.done_mask: done_mask_batch,\r\n self.lr: lr, \r\n # extra info\r\n self.avg_reward_placeholder: self.avg_reward, \r\n self.max_reward_placeholder: self.max_reward, \r\n self.std_reward_placeholder: self.std_reward, \r\n self.avg_q_placeholder: self.avg_q, \r\n self.max_q_placeholder: self.max_q, \r\n self.std_q_placeholder: self.std_q, \r\n self.eval_reward_placeholder: self.eval_reward, \r\n }\r\n\r\n loss_eval, grad_norm_eval, summary, _ = self.sess.run([self.loss, self.grad_norm, \r\n self.merged, self.train_op], feed_dict=fd)\r\n\r\n\r\n # tensorboard stuff\r\n self.file_writer.add_summary(summary, t)\r\n \r\n return loss_eval, grad_norm_eval\r\n\r\n\r\n def update_target_params(self):\r\n \"\"\"\r\n Update parametes of Q' with parameters of Q\r\n \"\"\"\r\n self.sess.run(self.update_target_op)\r\n\r\n"
] | [
[
"tensorflow.summary.FileWriter",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.summary.scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
shkarupa-alex/segme | [
"d5bc0043f9e709c8ccaf8949d662bc6fd6144006",
"d5bc0043f9e709c8ccaf8949d662bc6fd6144006",
"d5bc0043f9e709c8ccaf8949d662bc6fd6144006",
"d5bc0043f9e709c8ccaf8949d662bc6fd6144006",
"d5bc0043f9e709c8ccaf8949d662bc6fd6144006"
] | [
"segme/model/f3_net/model.py",
"segme/common/tests/test_adppool.py",
"segme/testing_utils.py",
"segme/loss/laplacian_pyramid.py",
"segme/metric/tests/test_grad.py"
] | [
"import tensorflow as tf\nfrom keras import models, layers\nfrom keras.utils.generic_utils import register_keras_serializable\nfrom keras.utils.tf_utils import shape_type_conversion\nfrom .decoder import Decoder\nfrom ...backbone import Backbone\nfrom ...common import ConvBnRelu, HeadActivation, HeadProjection, resize_by_sample\n\n\n@register_keras_serializable(package='SegMe>F3Net')\nclass F3Net(layers.Layer):\n def __init__(self, classes, bone_arch, bone_init, bone_train, filters, **kwargs):\n super().__init__(**kwargs)\n self.input_spec = layers.InputSpec(ndim=4, dtype='uint8')\n self.classes = classes\n self.bone_arch = bone_arch\n self.bone_init = bone_init\n self.bone_train = bone_train\n self.filters = filters\n\n @shape_type_conversion\n def build(self, input_shape):\n self.bone = Backbone(self.bone_arch, self.bone_init, self.bone_train, scales=[4, 8, 16, 32])\n\n self.squeeze2 = ConvBnRelu(self.filters, 1, kernel_initializer='he_normal')\n self.squeeze3 = ConvBnRelu(self.filters, 1, kernel_initializer='he_normal')\n self.squeeze4 = ConvBnRelu(self.filters, 1, kernel_initializer='he_normal')\n self.squeeze5 = ConvBnRelu(self.filters, 1, kernel_initializer='he_normal')\n\n self.decoder1 = Decoder(False, self.filters)\n self.decoder2 = Decoder(True, self.filters)\n\n self.proj_p1 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n self.proj_p2 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n self.proj_o2 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n self.proj_o3 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n self.proj_o4 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n self.proj_o5 = HeadProjection(self.classes, kernel_size=3, kernel_initializer='he_normal')\n\n self.act = HeadActivation(self.classes)\n\n super().build(input_shape)\n\n def call(self, inputs, **kwargs):\n out2h, out3h, out4h, out5v = self.bone(inputs)\n\n out2h = self.squeeze2(out2h)\n out3h = self.squeeze3(out3h)\n out4h = self.squeeze4(out4h)\n out5v = self.squeeze5(out5v)\n\n out2h, out3h, out4h, out5v, pred1 = self.decoder1([out2h, out3h, out4h, out5v])\n out2h, out3h, out4h, out5v, pred2 = self.decoder2([out2h, out3h, out4h, out5v, pred1])\n\n pred1 = self.proj_p1(pred1)\n pred2 = self.proj_p2(pred2)\n out2h = self.proj_o2(out2h)\n out3h = self.proj_o3(out3h)\n out4h = self.proj_o4(out4h)\n out5v = self.proj_o5(out5v)\n\n outputs = [pred2, pred1, out2h, out3h, out4h, out5v]\n outputs = [resize_by_sample([out, inputs]) for out in outputs]\n outputs = [self.act(out) for out in outputs]\n\n return outputs\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n output_shape = input_shape[:-1] + (self.classes,)\n\n return [output_shape] * 6\n\n def compute_output_signature(self, input_signature):\n outptut_signature = super().compute_output_signature(input_signature)\n\n return [tf.TensorSpec(dtype='float32', shape=os.shape) for os in outptut_signature]\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'classes': self.classes,\n 'bone_arch': self.bone_arch,\n 'bone_init': self.bone_init,\n 'bone_train': self.bone_train,\n 'filters': self.filters\n })\n\n return config\n\n\ndef build_f3_net(classes, bone_arch='resnet_50', bone_init='imagenet', bone_train=False, filters=64):\n inputs = layers.Input(name='image', shape=[None, None, 3], dtype='uint8')\n outputs = F3Net(\n classes, bone_arch=bone_arch, bone_init=bone_init, bone_train=bone_train, filters=filters)(inputs)\n model = models.Model(inputs=inputs, outputs=outputs, name='f3_net')\n\n return model\n",
"import numpy as np\nimport tensorflow as tf\nfrom keras import keras_parameterized, testing_utils\nfrom ..adppool import AdaptiveAveragePooling, AdaptiveMaxPooling\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestAdaptiveAveragePooling(keras_parameterized.TestCase):\n def test_layer(self):\n testing_utils.layer_test(\n AdaptiveAveragePooling,\n kwargs={'output_size': 2},\n input_shape=[2, 16, 16, 3],\n input_dtype='float32',\n expected_output_shape=[None, 2, 2, 3],\n expected_output_dtype='float32'\n )\n testing_utils.layer_test(\n AdaptiveAveragePooling,\n kwargs={'output_size': (4, 3)},\n input_shape=[2, 15, 16, 3],\n input_dtype='float32',\n expected_output_shape=[None, 4, 3, 3],\n expected_output_dtype='float32'\n )\n\n def test_value(self):\n shape = [2, 16, 16, 3]\n data = np.arange(0, np.prod(shape)).reshape(shape).astype('float32')\n\n result = testing_utils.layer_test(\n AdaptiveAveragePooling,\n kwargs={'output_size': 1},\n input_data=data,\n expected_output_shape=[None, 1, 1, 3],\n expected_output_dtype='float32'\n ).astype('int32')\n self.assertListEqual(result.ravel().tolist(), [382, 383, 384, 1150, 1151, 1152])\n\n result = testing_utils.layer_test(\n AdaptiveAveragePooling,\n kwargs={'output_size': 2},\n input_data=data,\n expected_output_shape=[None, 2, 2, 3],\n expected_output_dtype='float32'\n ).astype('int32')\n self.assertListEqual(result.ravel().tolist(), [\n 178, 179, 180, 202, 203, 204, 562, 563, 564, 586, 587, 588, 946, 947, 948, 970, 971, 972, 1330, 1331, 1332,\n 1354, 1355, 1356])\n\n result = testing_utils.layer_test(\n AdaptiveAveragePooling,\n kwargs={'output_size': 3},\n input_data=data,\n expected_output_shape=[None, 3, 3, 3],\n expected_output_dtype='float32'\n ).astype('int32')\n self.assertListEqual(result.ravel().tolist(), [\n 127, 128, 129, 142, 143, 144, 157, 158, 159, 367, 368, 369, 382, 383, 384, 397, 398, 399, 607, 608, 609,\n 622, 623, 624, 637, 638, 639, 895, 896, 897, 910, 911, 912, 925, 926, 927, 1135, 1136, 1137, 1150, 1151,\n 1152, 1165, 1166, 1167, 1375, 1376, 1377, 1390, 1391, 1392, 1405, 1406, 1407])\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestAdaptiveMaxPooling(keras_parameterized.TestCase):\n def test_layer(self):\n testing_utils.layer_test(\n AdaptiveMaxPooling,\n kwargs={'output_size': 2},\n input_shape=[2, 16, 16, 3],\n input_dtype='float32',\n expected_output_shape=[None, 2, 2, 3],\n expected_output_dtype='float32'\n )\n testing_utils.layer_test(\n AdaptiveMaxPooling,\n kwargs={'output_size': (4, 3)},\n input_shape=[2, 15, 16, 3],\n input_dtype='float32',\n expected_output_shape=[None, 4, 3, 3],\n expected_output_dtype='float32'\n )\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"import numpy as np\nimport tensorflow as tf\nfrom keras.testing_utils import _thread_local_data, should_run_eagerly\nfrom keras import backend, models, layers\nfrom tensorflow.python.framework import tensor_shape, test_util\nfrom tensorflow.python.util import tf_inspect\n\n\n@test_util.disable_cudnn_autotune\ndef layer_multi_io_test(\n layer_cls, kwargs=None, input_shapes=None, input_dtypes=None,\n input_datas=None, expected_outputs=None, expected_output_dtypes=None,\n expected_output_shapes=None, validate_training=True, adapt_data=None):\n \"\"\"Test routine for a layer with multiple inputs and outputs.\n\n Arguments:\n layer_cls: Layer class object.\n kwargs: Optional dictionary of keyword arguments for instantiating the\n layer.\n input_shapes: Input shape tuples.\n input_dtypes: Data types of the input data.\n input_datas: Numpy arrays of input data.\n expected_outputs: Numpy arrays of the expected output.\n expected_output_dtypes: Data types expected for the output.\n expected_output_shapes: Shape tuples for the expected output shapes.\n validate_training: Whether to attempt to validate training on this layer.\n This might be set to False for non-differentiable layers that output\n string or integer values.\n adapt_data: Optional data for an 'adapt' call. If None, adapt() will not\n be tested for this layer. This is only relevant for PreprocessingLayers.\n\n Returns:\n The output data (Numpy array) returned by the layer, for additional\n checks to be done by the calling code.\n\n Raises:\n ValueError: if `input_shape is None`.\n \"\"\"\n if input_shapes is not None:\n if not isinstance(input_shapes, (list, tuple)):\n raise ValueError(\n 'A list of shape tuples expected for input_shapes')\n for input_shape in input_shapes:\n if not isinstance(input_shape, (list, tuple)):\n raise ValueError(\n 'A list of shape tuples expected for input_shapes')\n for shape_dim in input_shape:\n if not isinstance(shape_dim, (int, type(None))):\n raise ValueError(\n 'Only integer and None values allowed in input_shapes')\n\n if input_dtypes is not None:\n if not isinstance(input_dtypes, (list, tuple)):\n raise ValueError(\n 'A list data type names expected for input_dtypes')\n for input_dtype in input_dtypes:\n if not isinstance(input_dtype, str):\n raise ValueError(\n 'Only string values allowed in input_dtypes')\n\n if input_datas is not None:\n if not isinstance(input_datas, (list, tuple)):\n raise ValueError('A list of numpy arrays expected for input_datas')\n for input_data in input_datas:\n if not isinstance(input_data, np.ndarray):\n raise ValueError(\n 'A list of numpy arrays expected for input_datas')\n\n output_size = -1\n if expected_outputs is not None:\n if not isinstance(expected_outputs, (list, tuple)):\n raise ValueError(\n 'A list of numpy arrays expected for expected_outputs')\n for expected_output in expected_outputs:\n if not isinstance(expected_output, np.ndarray):\n raise ValueError(\n 'A list of numpy arrays expected for expected_outputs')\n output_size = max(output_size, len(expected_outputs))\n if expected_output_dtypes is not None:\n if not isinstance(expected_output_dtypes, (list, tuple)):\n raise ValueError(\n 'A list data type names expected for expected_output_dtypes')\n for expected_output_dtype in expected_output_dtypes:\n if not isinstance(expected_output_dtype, str):\n raise ValueError(\n 'Only string values allowed in expected_output_dtypes')\n output_size = max(output_size, len(expected_output_dtypes))\n if expected_output_shapes is not None:\n if not isinstance(expected_output_shapes, (list, tuple)):\n raise ValueError(\n 'A list of shape tuples expected for expected_output_shapes')\n for expected_output_shape in expected_output_shapes:\n if not isinstance(expected_output_shape, (list, tuple)):\n raise ValueError(\n 'A list of shape tuples expected for expected_output_shapes')\n for shape_dim in expected_output_shape:\n if not isinstance(shape_dim, (int, type(None))):\n raise ValueError(\n 'Only integer and None values allowed in '\n 'expected_output_shapes')\n output_size = max(output_size, len(expected_output_shapes))\n\n if expected_outputs is not None and \\\n expected_output_dtypes is not None and \\\n len(expected_outputs) != len(expected_output_dtypes):\n raise ValueError(\n 'Sizes of \"expected_outputs\" and \"expected_output_dtypes\" '\n 'should be equal if both provided')\n if expected_outputs is not None and \\\n expected_output_shapes is not None and \\\n len(expected_outputs) != len(expected_output_shapes):\n raise ValueError(\n 'Sizes of \"expected_outputs\" and \"expected_output_shapes\" '\n 'should be equal if both provided')\n if expected_output_dtypes is not None and \\\n expected_output_shapes is not None and \\\n len(expected_output_dtypes) != len(expected_output_shapes):\n raise ValueError(\n 'Sizes of \"expected_output_dtypes\" and \"expected_output_shapes\" '\n 'should be equal if both provided')\n\n if 0 >= output_size:\n raise ValueError(\n 'Could not determine number of outputs. Provide at least one of: '\n '\"expected_output_dtypes\" or \"expected_output_shapes\" or '\n '\"expected_outputs\"')\n\n input_size = -1\n if input_datas is None:\n if input_shapes is None:\n raise ValueError(\n 'Either input_shapes or input_datas should be provided')\n input_size = len(input_shapes)\n if not input_dtypes:\n input_dtypes = ['float32'] * input_size\n\n input_datas = []\n input_data_shapes = [list(input_shape) for input_shape in input_shapes]\n for i, input_data_shape in enumerate(input_data_shapes):\n for j, e in enumerate(input_data_shape):\n if e is None:\n input_data_shape[j] = np.random.randint(1, 4)\n input_data = 10 * np.random.random(input_data_shape)\n if input_dtypes[i][:5] == 'float':\n input_data -= 0.5\n input_data = input_data.astype(input_dtypes[i])\n input_datas.append(input_data)\n elif input_shapes is None:\n input_size = len(input_datas)\n input_shapes = [input_data.shape for input_data in input_datas]\n else:\n if len(input_datas) != len(input_shapes):\n raise ValueError(\n 'Sizes of \"input_datas\" and \"input_shapes\" should be equal '\n 'if both provided')\n for input_data, input_shape in zip(input_datas, input_shapes):\n if len(input_data.shape) != len(input_shape) or \\\n not np.all(np.equal(input_data.shape, input_shape)):\n raise ValueError(\n 'Shapes of \"input_datas\" and values in \"input_shapes\" '\n 'should be equal if both provided')\n input_size = len(input_datas)\n\n if 0 >= input_size:\n raise ValueError('Wrong number of inputs')\n\n if input_dtypes is None:\n input_dtypes = [input_data.dtype for input_data in input_datas]\n if expected_output_dtypes is None:\n expected_output_dtypes = input_dtypes[:1] * output_size\n\n # instantiation\n kwargs = kwargs or {}\n layer = layer_cls(**kwargs)\n\n # Test adapt, if data was passed.\n if adapt_data is not None:\n layer.adapt(adapt_data)\n\n # test get_weights , set_weights at layer level\n weights = layer.get_weights()\n layer.set_weights(weights)\n\n # test and instantiation from weights\n if 'weights' in tf_inspect.getargspec(layer_cls.__init__):\n kwargs['weights'] = weights\n layer = layer_cls(**kwargs)\n\n # test in functional API\n xs = [\n layers.Input(shape=input_shapes[i][1:], dtype=input_dtypes[i])\n for i in range(input_size)]\n _y = layer(_squize(xs, input_size))\n ys = _expand(_y, output_size)\n\n if 1 == output_size and isinstance(_y, (list, tuple)):\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found {} outputs but '\n 'expected to find 1.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, len(_y), kwargs))\n elif 1 < output_size and not isinstance(_y, (list, tuple)):\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found {} outputs but '\n 'expected to find {} outputs.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, _y, output_size, kwargs))\n\n try:\n _assert_dtypes(ys, expected_output_dtypes)\n except AssertionError:\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found output dtypes={} '\n 'but expected to find {}.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, [backend.dtype(yi) for yi in ys],\n expected_output_dtypes, kwargs))\n\n if expected_output_shapes is not None:\n expected_shapes = [\n tensor_shape.TensorShape(sh)\n for sh in expected_output_shapes]\n actual_shapes = [yi.shape for yi in ys]\n\n try:\n _assert_shapes(expected_shapes, actual_shapes)\n except AssertionError:\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found output_shapes={} '\n 'but expected to find {}.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, actual_shapes,\n expected_shapes, kwargs))\n\n # check shape inference\n model = models.Model(_squize(xs, input_size), _squize(ys, output_size))\n\n compute_input_shapes = _squize([\n tensor_shape.TensorShape(sh) for sh in input_shapes], input_size)\n computed_output_shapes = _expand(layer.compute_output_shape(\n compute_input_shapes), output_size)\n computed_output_shapes = [\n tuple(sh.as_list()) for sh in computed_output_shapes]\n\n compute_input_signatures = _squize([\n tf.TensorSpec(shape=input_shapes[i], dtype=input_dtypes[i])\n for i in range(input_size)], input_size)\n computed_output_signatures = _expand(layer.compute_output_signature(\n compute_input_signatures), output_size)\n computed_output_signature_shapes = [\n cs.shape for cs in computed_output_signatures]\n computed_output_signature_dtypes = [\n cs.dtype for cs in computed_output_signatures]\n\n actual_outputs = _expand(model.predict(_squize(\n input_datas, input_size)), output_size)\n actual_output_shapes = [ao.shape for ao in actual_outputs]\n actual_output_dtypes = [ao.dtype for ao in actual_outputs]\n\n try:\n _assert_shapes(computed_output_shapes, actual_output_shapes)\n except AssertionError as e:\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found output_shapes={} '\n 'but expected to find {}.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, actual_output_shapes,\n computed_output_shapes, kwargs))\n\n try:\n _assert_shapes(computed_output_signature_shapes, actual_output_shapes)\n except AssertionError:\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found output_shapes={} '\n 'but expected to find {}.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, actual_output_shapes,\n computed_output_signatures, kwargs))\n\n try:\n _assert_dtypes(computed_output_signatures, actual_output_dtypes)\n except AssertionError:\n raise AssertionError(\n 'When testing layer {}, for inputs {}, found output dtypes={}'\n 'but expected to find {}.\\nFull kwargs: {}'.format(\n layer_cls.__name__, xs, actual_output_dtypes,\n computed_output_signature_dtypes, kwargs))\n\n if expected_outputs is not None:\n for i in range(output_size):\n np.testing.assert_allclose(\n actual_outputs[i], expected_outputs[i], rtol=1e-3, atol=1e-6)\n\n # test serialization, weight setting at model level\n model_config = model.get_config()\n recovered_model = models.Model.from_config(model_config)\n if model.weights:\n weights = model.get_weights()\n recovered_model.set_weights(weights)\n outputs = _expand(recovered_model.predict(_squize(\n input_datas, input_size)), output_size)\n for i in range(output_size):\n np.testing.assert_allclose(\n outputs[i], actual_outputs[i], rtol=1e-3, atol=1e-6)\n\n # test training mode (e.g. useful for dropout tests)\n # Rebuild the model to avoid the graph being reused between predict() and\n # See b/120160788 for more details. This should be mitigated after 2.0.\n if validate_training:\n _x = _squize(xs, input_size)\n model = models.Model(_x, layer(_x))\n if _thread_local_data.run_eagerly is not None:\n model.compile(\n 'rmsprop',\n 'mse',\n weighted_metrics=['acc'],\n run_eagerly=should_run_eagerly())\n else:\n model.compile('rmsprop', 'mse', weighted_metrics=['acc'])\n model.train_on_batch(\n _squize(input_datas, input_size),\n _squize(actual_outputs, output_size))\n\n # Test adapt, if data was passed.\n if adapt_data is not None:\n layer.adapt(adapt_data)\n\n # Sequential model does not supports multiple inputs or outputs\n #\n # # test as first layer in Sequential API\n # layer_config = layer.get_config()\n # layer_config['batch_input_shape'] = input_shape\n # layer = layer.__class__.from_config(layer_config)\n #\n # Sequential model does not supports multiple inputs or outputs\n # model = models.Sequential()\n # model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype))\n # model.add(layer)\n # actual_output = model.predict(input_data)\n # actual_output_shape = actual_output.shape\n # for expected_dim, actual_dim in zip(computed_output_shape,\n # actual_output_shape):\n # if expected_dim is not None:\n # if expected_dim != actual_dim:\n # raise AssertionError(\n # 'When testing layer {} **after deserialization**, for '\n # 'input {}, found output_shape={} but expected to find '\n # 'inferred shape {}.\\nFull kwargs: {}'.format(\n # layer_cls.__name__, x, actual_output_shape,\n # computed_output_shape, kwargs))\n #\n #\n # if expected_output is not None:\n # np.testing.assert_allclose(actual_output, expected_output,\n # rtol=1e-3, atol=1e-6)\n #\n # # test serialization, weight setting at model level\n # model_config = model.get_config()\n # recovered_model = models.Sequential.from_config(model_config)\n # if model.weights:\n # weights = model.get_weights()\n # recovered_model.set_weights(weights)\n # output = recovered_model.predict(input_data)\n # np.testing.assert_allclose(\n # output, actual_output, rtol=1e-3, atol=1e-6)\n\n # for further checks in the caller function\n return _squize(actual_outputs, output_size)\n\n\ndef _squize(data, size):\n if size > 1 and (not isinstance(data, (list, tuple)) or not len(data)):\n raise ValueError('Wrong \"data\" value')\n\n return data[0] if 1 == size else data\n\n\ndef _expand(data, size):\n return [data] if 1 == size else data\n\n\ndef _assert_dtypes(tensors, expected_dtypes):\n if not isinstance(tensors, (list, tuple)):\n raise ValueError('A list of tensors should be provided for \"tensors\"')\n if not isinstance(expected_dtypes, (list, tuple)):\n raise ValueError(\n 'A list of dtype names should be provided for \"expected_dtypes\"')\n if len(tensors) != len(expected_dtypes):\n raise ValueError(\n 'Sizes of \"tensors\" and corresponding \"expected_dtypes\" '\n 'should be equal')\n\n for tensor, expected_dtype in zip(tensors, expected_dtypes):\n if backend.dtype(tensor) != expected_dtype:\n raise AssertionError('Wrong dtype')\n\n\ndef _assert_shapes(expected_shapes, actual_shapes):\n if not isinstance(expected_shapes, list):\n raise ValueError(\n 'A list of shapes should be provided for \"expected_shapes\"')\n if not isinstance(actual_shapes, list):\n raise ValueError(\n 'A list of shapes should be provided for \"actual_shapes\"')\n if len(expected_shapes) != len(actual_shapes):\n raise ValueError(\n 'Sizes of \"expected_shapes\" and corresponding \"actual_shapes\" '\n 'should be equal')\n\n for expected_shape, actual_shape in zip(expected_shapes, actual_shapes):\n _assert_shape(expected_shape, actual_shape)\n\n\ndef _assert_shape(expected_shape, actual_shape):\n if not isinstance(expected_shape, (list, tuple, tensor_shape.TensorShape)):\n raise ValueError(\n 'Wrong shape provided for \"expected_shape\"')\n if not isinstance(actual_shape, (list, tuple, tensor_shape.TensorShape)):\n raise ValueError(\n 'Wrong shape provided for \"actual_shape\"')\n if len(expected_shape) != len(actual_shape):\n raise AssertionError('Wrong shape')\n\n for expected_dim, actual_dim in zip(expected_shape, actual_shape):\n if isinstance(expected_dim, tensor_shape.Dimension):\n expected_dim = expected_dim.value\n if isinstance(actual_dim, tensor_shape.Dimension):\n actual_dim = actual_dim.value\n if expected_dim is None:\n continue\n if expected_dim != actual_dim:\n raise AssertionError('Wrong shape')\n",
"import numpy as np\nimport tensorflow as tf\nfrom keras.utils.generic_utils import register_keras_serializable\nfrom keras.utils.losses_utils import ReductionV2 as Reduction\nfrom .weighted_wrapper import WeightedLossFunctionWrapper\n\n\n@register_keras_serializable(package='SegMe')\nclass LaplacianPyramidLoss(WeightedLossFunctionWrapper):\n \"\"\" Proposed in: 'Optimizing the Latent Space of Generative Networks'\n\n Implements Lap1 in https://arxiv.org/pdf/1707.05776.pdf\n \"\"\"\n\n def __init__(\n self, levels=5, size=5, sigma=2.0, reduction=Reduction.AUTO,\n name='laplacian_pyramid_loss'):\n super().__init__(\n laplacian_pyramid_loss, reduction=reduction, name=name, levels=levels, size=size, sigma=sigma)\n\n\ndef _pad_odd(inputs):\n height_width = tf.shape(inputs)[1:3]\n hpad, wpad = tf.unstack(height_width % 2)\n paddings = [[0, 0], [0, hpad], [0, wpad], [0, 0]]\n padded = tf.pad(inputs, paddings, 'REFLECT')\n\n return padded\n\n\ndef _gauss_kernel(size, sigma):\n # Implements [9] in 'Diffusion Distance for Histogram Comparison', DOI 10.1109/CVPR.2006.99\n space = np.arange(size) - (size - 1) / 2\n\n sigma2 = sigma ** 2\n gauss1d = np.exp(-space ** 2 / (2 * sigma2)) / np.sqrt(2 * np.pi * sigma2)\n\n gauss2d = gauss1d[..., None] * gauss1d[None, ...]\n\n gauss2d /= np.sum(gauss2d)\n\n return gauss2d\n\n\ndef _gauss_filter(inputs, kernel):\n paddings = [((k - 1) // 2, k // 2) for k in kernel.shape[:2][::-1]]\n paddings = [(0, 0)] + paddings + [(0, 0)]\n\n padded = tf.pad(inputs, paddings, 'REFLECT')\n blurred = tf.nn.depthwise_conv2d(padded, kernel, strides=[1, 1, 1, 1], padding='VALID')\n\n return blurred\n\n\ndef _gauss_downsample(inputs, kernel):\n blurred = _gauss_filter(inputs, kernel)\n downsampled = blurred[:, ::2, ::2, :]\n\n return downsampled\n\n\ndef _gauss_upsample(inputs, kernel):\n shape = tf.shape(inputs)\n batch, height, width, channel = tf.unstack(shape)\n\n upsampled = tf.concat([inputs, tf.zeros_like(inputs)], axis=-1)\n upsampled = tf.reshape(upsampled, [batch * height, width * 2 * channel])\n\n upsampled = tf.concat([upsampled, tf.zeros_like(upsampled)], axis=-1)\n upsampled = tf.reshape(upsampled, [batch, height * 2, width * 2, channel])\n\n return _gauss_filter(upsampled, kernel * 4.)\n\n\ndef _laplacian_pyramid(inputs, levels, kernel):\n # https://paperswithcode.com/method/laplacian-pyramid\n pyramid = []\n\n current = inputs\n for level in range(levels):\n current = _pad_odd(current)\n downsampled = _gauss_downsample(current, kernel)\n upsampled = _gauss_upsample(downsampled, kernel)\n pyramid.append(current - upsampled)\n current = downsampled\n pyramid.append(current) # Low-frequency residual\n\n return pyramid\n\n\ndef laplacian_pyramid_loss(y_true, y_pred, sample_weight, levels, size, sigma):\n assert_true_rank = tf.assert_rank(y_true, 4)\n assert_pred_rank = tf.assert_rank(y_pred, 4)\n\n with tf.control_dependencies([assert_true_rank, assert_pred_rank]):\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, dtype=y_pred.dtype)\n\n channels_pred = y_pred.shape[-1]\n if channels_pred is None:\n raise ValueError('Channel dimension of the predictions should be defined. Found `None`.')\n\n kernel = _gauss_kernel(size, sigma)[..., None, None]\n kernel = kernel.astype(y_pred.dtype.as_numpy_dtype)\n\n kernel_pred = np.tile(kernel, (1, 1, channels_pred, 1))\n kernel_pred = tf.constant(kernel_pred, y_pred.dtype)\n pyr_pred = _laplacian_pyramid(y_pred, levels, kernel_pred)\n pyr_true = _laplacian_pyramid(y_true, levels, kernel_pred)\n pyr_true = [tf.stop_gradient(pt) for pt in pyr_true]\n\n if sample_weight is None:\n losses = [tf.abs(_true - _pred) for _true, _pred in zip(pyr_true, pyr_pred)]\n else:\n channels_wght = sample_weight.shape[-1]\n if channels_wght is None:\n raise ValueError('Channel dimension of the sample weights should be defined. Found `None`.')\n\n kernel_wght = np.tile(kernel, (1, 1, channels_wght, 1))\n kernel_wght = tf.constant(kernel_wght, y_pred.dtype)\n pyr_wght = _laplacian_pyramid(sample_weight, levels, kernel_wght)\n pyr_wght = [tf.stop_gradient(pw) for pw in pyr_wght]\n losses = [tf.abs(_true - _pred) * _wght for _true, _pred, _wght in zip(pyr_true, pyr_pred, pyr_wght)]\n\n axis_hwc = list(range(1, y_pred.shape.ndims))\n losses = [(2 ** i / len(losses)) * tf.reduce_mean(l, axis=axis_hwc) for i, l in enumerate(losses)]\n losses = sum(losses)\n\n return losses\n",
"import cv2\nimport numpy as np\nimport tensorflow as tf\nfrom keras import keras_parameterized\nfrom ..grad import Grad\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestGrad(keras_parameterized.TestCase):\n SNAKE = np.round(np.array([\n [1, 2, 0, 0, 0, 0, 0, 0, 0],\n [0, 3, 4, 5, 6, 0, 0, 0, 0],\n [0, 0, 0, 0, 7, 8, 9, 8, 0],\n [0, 0, 0, 0, 0, 0, 0, 7, 0],\n [0, 2, 1, 2, 3, 4, 5, 6, 0],\n [0, 3, 0, 0, 0, 0, 0, 0, 0],\n [0, 4, 0, 6, 5, 4, 3, 2, 1],\n [0, 5, 0, 0, 0, 0, 0, 0, 2],\n [0, 6, 7, 8, 9, 8, 7, 0, 3],\n [0, 0, 0, 0, 0, 0, 7, 5, 4]\n ]).astype('float32') * 255. / 9.)\n\n def test_config(self):\n metric = Grad(\n divider=2.,\n name='metric1'\n )\n self.assertEqual(metric.divider, 2.)\n self.assertEqual(metric.name, 'metric1')\n\n def test_zeros(self):\n targets = np.zeros((2, 32, 32, 1), 'int32')\n probs = np.zeros((2, 32, 32, 1), 'float32')\n weight = np.ones((2, 32, 32, 1), 'float32')\n\n metric = Grad()\n metric.update_state(targets, probs, weight)\n result = self.evaluate(metric.result())\n self.assertAlmostEqual(result, 0.0, places=7)\n\n def test_value(self):\n trim = np.where(cv2.dilate(self.SNAKE, np.ones((2, 2), 'float32')) > 0, 1., 0.)\n pred = np.round((self.SNAKE / 128.) ** 2 * 255. / 3.97)\n\n metric = Grad()\n metric.update_state(self.SNAKE[None, ..., None], pred[None, ..., None], trim[None, ..., None])\n result = self.evaluate(metric.result())\n\n self.assertAlmostEqual(result, 0.001667233, places=9)\n\n def test_unweighted(self):\n pred = np.round((self.SNAKE / 128.) ** 2 * 255. / 3.97)\n\n metric = Grad()\n metric.update_state(self.SNAKE[None, ..., None], pred[None, ..., None])\n result = self.evaluate(metric.result())\n\n self.assertAlmostEqual(result, 0.00253742, places=9)\n\n def test_batch(self):\n trim0 = np.where(cv2.dilate(self.SNAKE, np.ones((2, 2), 'float32')) > 0, 1., 0.)\n pred0 = np.round((self.SNAKE / 128.) ** 2 * 255. / 3.97)\n\n targ1 = np.pad(self.SNAKE[3:, 3:], [[0, 3], [0, 3]])\n trim1 = np.pad(trim0[3:, 3:], [[0, 3], [0, 3]])\n pred1 = np.pad(pred0[3:, 3:], [[0, 3], [0, 3]])\n\n metric = Grad()\n metric.update_state(self.SNAKE[None, ..., None], pred0[None, ..., None], trim0[None, ..., None])\n metric.update_state(targ1[None, ..., None], pred1[None, ..., None], trim1[None, ..., None])\n res0 = self.evaluate(metric.result())\n\n metric.reset_states()\n metric.update_state(\n np.array([self.SNAKE[..., None], targ1[..., None]]),\n np.array([pred0[..., None], pred1[..., None]]),\n np.array([trim0[..., None], trim1[..., None]]))\n res1 = self.evaluate(metric.result())\n\n self.assertEqual(res0, res1)\n\n def test_channel3(self):\n targets = np.zeros((2, 32, 32, 3), 'int32')\n probs = np.zeros((2, 32, 32, 3), 'float32')\n weight = np.ones((2, 32, 32, 3), 'float32')\n\n metric = Grad()\n metric.update_state(targets, probs, weight)\n result = self.evaluate(metric.result())\n self.assertAlmostEqual(result, 0.0, places=7)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.TensorSpec"
],
[
"tensorflow.test.main",
"numpy.prod"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"numpy.random.random",
"numpy.equal",
"tensorflow.python.util.tf_inspect.getargspec",
"numpy.testing.assert_allclose",
"tensorflow.TensorSpec",
"numpy.random.randint"
],
[
"tensorflow.convert_to_tensor",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.pad",
"numpy.exp",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.assert_rank",
"numpy.arange",
"tensorflow.stop_gradient",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.zeros_like",
"numpy.sum",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"numpy.tile",
"tensorflow.abs"
],
[
"numpy.pad",
"tensorflow.test.main",
"numpy.ones",
"numpy.round",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
syakoo/galois-field | [
"e642adfa7da55f6cd95cadceb0116cdea379c181"
] | [
"galois_field/core/gcd.py"
] | [
"from typing import Tuple\n\nimport numpy as np\n\nfrom . import modulus, inverse\n\n\ndef gcd_poly(poly1: np.poly1d, poly2: np.poly1d, p: int) -> np.poly1d:\n \"\"\"Seek the gcd of two polynomials over Fp.\n\n Args:\n poly1 (np.poly1d): A polynomial.\n poly2 (np.poly1d): A polynomial.\n p (int): A prime number.\n\n Returns:\n np.poly1d: gcd(poly1, poly2) over Fp.\n \"\"\"\n def poly2monic(poly: np.poly1d)\\\n -> Tuple[np.poly1d, Tuple[np.poly1d, np.poly1d]]:\n highest_degree_coeff = poly.coeffs[0]\n if highest_degree_coeff == 1:\n return poly, (np.poly1d([1]), np.poly1d([1]))\n\n inv_hdc = inverse.inverse_el(highest_degree_coeff, p)\n coeffs = poly.coeffs * inv_hdc\n return np.poly1d(modulus.modulus_coeffs(coeffs, p)),\\\n (np.poly1d([highest_degree_coeff]), np.poly1d([inv_hdc]))\n\n if len(poly1.coeffs) < len(poly2.coeffs):\n poly1, poly2 = poly2, poly1\n\n poly2_monic, hdc = poly2monic(poly2)\n _, r = np.polydiv(poly1 * hdc[1], poly2_monic)\n r = np.poly1d(modulus.modulus_coeffs(r.coeffs, p)) * hdc[0]\n r = modulus.modulus_poly_over_fp(r, p)\n\n if r.coeffs[0] == 0:\n return poly2\n\n return gcd_poly(poly2, r, p)\n"
] | [
[
"numpy.polydiv",
"numpy.poly1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sghislandi/GSSI_Numerical_Methods_2020-2021 | [
"171853ff2d99560565783bdd2e0f3a9e0c4c5ca5"
] | [
"pyplots/simpson_integration/error_plotter.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\n\n#Reading the output\nflag = 0\nif os.path.exists('../../build/output/simpson_integration/simpson_approximation_errors_v1.txt'):\n flag = 1\n N, deviation = np.loadtxt('../../build/output/simpson_integration/simpson_approximation_errors_v1.txt', skiprows = 1, unpack = True)\nelif os.path.exists('output/simpson_integration/simpson_approximation_errors_v1.txt'):\n flag = 2\n N, deviation = np.loadtxt('output/simpson_integration/simpson_approximation_errors_v1.txt', skiprows = 1, unpack = True)\nelif os.path.exists('../../build/output/simpson_integration/simpson_approximation_errors.txt'):\n flag = 3\n N, deviation = np.loadtxt('../../build/output/simpson_integration/simpson_approximation_errors.txt', skiprows = 1, unpack = True)\nelif os.path.exists('output/simpson_integration/simpson_approximation_errors.txt'):\n flag = 4\n N, deviation = np.loadtxt('output/simpson_integration/simpson_approximation_errors.txt', skiprows = 1, unpack = True)\nelse:\n print(\"No output file found\")\n exit()\n\n#Fixed quantities\na = 0\nb = 2\nthird_derivative_a = 0\nthird_derivative_b = 48\n\n#Error computed as I_simpson - I_exact\nlog_deviation = np.log10(abs(deviation))\n\n#Error from theory\nlog_theoretical_error = np.log10(1/90*(third_derivative_b-third_derivative_a)*((b-a)/(2**N))**4)\n\n#Error computed trhough the difference between I(N) and I(2N)\nlog_approximated_error= [None] * (deviation.size-1)\nfor i in range(0,deviation.size-1):\n log_approximated_error[i] = np.log10(1/15*(abs(deviation[i+1]-deviation[i])))\n\n#Definition of useful quantities\nN = 2**N\nlogN = np.log10(N)\n\n#Plots\nfig = plt.figure(figsize=(4.5*np.sqrt(2),4.5))\nplt.plot(logN,log_deviation,'o',markersize=5, label = r'$\\left| \\widetilde{I} - I \\right|$')\nplt.plot(logN,log_theoretical_error,'o', markersize=5, color = 'red', label = 'Theoretical Error')\nplt.plot(logN[1:],log_approximated_error,'o', markersize=5, label = 'Numerical error' )\nplt.title(\"Error analysis for Simpson integration method\", fontsize = 14)\nplt.xlabel('Log(N)' , fontsize = 12)\nplt.ylabel('Log(value)', fontsize = 12)\nplt.legend( fontsize = 12)\n\nfit_deviation = np.polyfit(logN[0:10], log_deviation[0:10], 1)\nfit_theoretical_error = np.polyfit(logN[0:10], log_theoretical_error[0:10], 1)\nfit_approximated_error = np.polyfit(logN[0:10], log_approximated_error[0:10], 1)\n\nprint('**********************')\nprint('PYTHON SCRIPT RESULTS:')\nprint(f'Deviation slope = {fit_deviation[0]}')\nprint(f'Theoretical slope = {fit_theoretical_error[0]}')\nprint(f'Approximated slope = {fit_approximated_error[0]}')\nprint('**********************')\n\nif(flag == 1 or flag == 3):\n plt.savefig('simpson_approximation_errors.pdf')\nelse:\n plt.savefig('../pyplots/simpson_integration/simpson_approximation_errors.pdf')\n\nprint(\"\\nOutput saved in pyplots/simpson_approximation_errors.pdf\")\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.polyfit",
"numpy.sqrt",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
woonzh/semantics | [
"8689acb5af7be689318486aea10da812aa383bb4"
] | [
"modeling.py"
] | [
"# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"The main BERT model and related functions.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport copy\r\nimport json\r\nimport math\r\nimport re\r\nimport six\r\nimport tensorflow as tf\r\n\r\n\r\nclass BertConfig(object):\r\n \"\"\"Configuration for `BertModel`.\"\"\"\r\n\r\n def __init__(self,\r\n vocab_size,\r\n hidden_size=768,\r\n num_hidden_layers=12,\r\n num_attention_heads=12,\r\n intermediate_size=3072,\r\n hidden_act=\"gelu\",\r\n hidden_dropout_prob=0.1,\r\n attention_probs_dropout_prob=0.1,\r\n max_position_embeddings=512,\r\n type_vocab_size=16,\r\n initializer_range=0.02):\r\n \"\"\"Constructs BertConfig.\r\n\r\n Args:\r\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\r\n hidden_size: Size of the encoder layers and the pooler layer.\r\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\r\n num_attention_heads: Number of attention heads for each attention layer in\r\n the Transformer encoder.\r\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\r\n layer in the Transformer encoder.\r\n hidden_act: The non-linear activation function (function or string) in the\r\n encoder and pooler.\r\n hidden_dropout_prob: The dropout probability for all fully connected\r\n layers in the embeddings, encoder, and pooler.\r\n attention_probs_dropout_prob: The dropout ratio for the attention\r\n probabilities.\r\n max_position_embeddings: The maximum sequence length that this model might\r\n ever be used with. Typically set this to something large just in case\r\n (e.g., 512 or 1024 or 2048).\r\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\r\n `BertModel`.\r\n initializer_range: The stdev of the truncated_normal_initializer for\r\n initializing all weight matrices.\r\n \"\"\"\r\n self.vocab_size = vocab_size\r\n self.hidden_size = hidden_size\r\n self.num_hidden_layers = num_hidden_layers\r\n self.num_attention_heads = num_attention_heads\r\n self.hidden_act = hidden_act\r\n self.intermediate_size = intermediate_size\r\n self.hidden_dropout_prob = hidden_dropout_prob\r\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\r\n self.max_position_embeddings = max_position_embeddings\r\n self.type_vocab_size = type_vocab_size\r\n self.initializer_range = initializer_range\r\n\r\n @classmethod\r\n def from_dict(cls, json_object):\r\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\r\n config = BertConfig(vocab_size=None)\r\n for (key, value) in six.iteritems(json_object):\r\n config.__dict__[key] = value\r\n return config\r\n\r\n @classmethod\r\n def from_json_file(cls, json_file):\r\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\r\n with tf.gfile.GFile(json_file, \"r\") as reader:\r\n text = reader.read()\r\n return cls.from_dict(json.loads(text))\r\n\r\n def to_dict(self):\r\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\r\n output = copy.deepcopy(self.__dict__)\r\n return output\r\n\r\n def to_json_string(self):\r\n \"\"\"Serializes this instance to a JSON string.\"\"\"\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\r\n\r\n\r\nclass BertModel(object):\r\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\r\n\r\n Example usage:\r\n\r\n ```python\r\n # Already been converted into WordPiece token ids\r\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\r\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\r\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\r\n\r\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\r\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\r\n\r\n model = modeling.BertModel(config=config, is_training=True,\r\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\r\n\r\n label_embeddings = tf.get_variable(...)\r\n pooled_output = model.get_pooled_output()\r\n logits = tf.matmul(pooled_output, label_embeddings)\r\n ...\r\n ```\r\n \"\"\"\r\n\r\n def __init__(self,\r\n config,\r\n is_training,\r\n input_ids,\r\n input_mask=None,\r\n token_type_ids=None,\r\n use_one_hot_embeddings=True,\r\n scope=None):\r\n \"\"\"Constructor for BertModel.\r\n\r\n Args:\r\n config: `BertConfig` instance.\r\n is_training: bool. true for training model, false for eval model. Controls\r\n whether dropout will be applied.\r\n input_ids: int32 Tensor of shape [batch_size, seq_length].\r\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\r\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\r\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\r\n embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,\r\n it is much faster if this is True, on the CPU or GPU, it is faster if\r\n this is False.\r\n scope: (optional) variable scope. Defaults to \"bert\".\r\n\r\n Raises:\r\n ValueError: The config is invalid or one of the input tensor shapes\r\n is invalid.\r\n \"\"\"\r\n config = copy.deepcopy(config)\r\n if not is_training:\r\n config.hidden_dropout_prob = 0.0\r\n config.attention_probs_dropout_prob = 0.0\r\n\r\n input_shape = get_shape_list(input_ids, expected_rank=2)\r\n batch_size = input_shape[0]\r\n seq_length = input_shape[1]\r\n\r\n if input_mask is None:\r\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\r\n\r\n if token_type_ids is None:\r\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\r\n\r\n with tf.variable_scope(scope, default_name=\"bert\"):\r\n with tf.variable_scope(\"embeddings\"):\r\n # Perform embedding lookup on the word ids.\r\n (self.embedding_output, self.embedding_table) = embedding_lookup(\r\n input_ids=input_ids,\r\n vocab_size=config.vocab_size,\r\n embedding_size=config.hidden_size,\r\n initializer_range=config.initializer_range,\r\n word_embedding_name=\"word_embeddings\",\r\n use_one_hot_embeddings=use_one_hot_embeddings)\r\n\r\n # Add positional embeddings and token type embeddings, then layer\r\n # normalize and perform dropout.\r\n self.embedding_output = embedding_postprocessor(\r\n input_tensor=self.embedding_output,\r\n use_token_type=True,\r\n token_type_ids=token_type_ids,\r\n token_type_vocab_size=config.type_vocab_size,\r\n token_type_embedding_name=\"token_type_embeddings\",\r\n use_position_embeddings=True,\r\n position_embedding_name=\"position_embeddings\",\r\n initializer_range=config.initializer_range,\r\n max_position_embeddings=config.max_position_embeddings,\r\n dropout_prob=config.hidden_dropout_prob)\r\n\r\n with tf.variable_scope(\"encoder\"):\r\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\r\n # mask of shape [batch_size, seq_length, seq_length] which is used\r\n # for the attention scores.\r\n attention_mask = create_attention_mask_from_input_mask(\r\n input_ids, input_mask)\r\n\r\n # Run the stacked transformer.\r\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\r\n self.all_encoder_layers = transformer_model(\r\n input_tensor=self.embedding_output,\r\n attention_mask=attention_mask,\r\n hidden_size=config.hidden_size,\r\n num_hidden_layers=config.num_hidden_layers,\r\n num_attention_heads=config.num_attention_heads,\r\n intermediate_size=config.intermediate_size,\r\n intermediate_act_fn=get_activation(config.hidden_act),\r\n hidden_dropout_prob=config.hidden_dropout_prob,\r\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\r\n initializer_range=config.initializer_range,\r\n do_return_all_layers=True)\r\n\r\n self.sequence_output = self.all_encoder_layers[-1]\r\n # The \"pooler\" converts the encoded sequence tensor of shape\r\n # [batch_size, seq_length, hidden_size] to a tensor of shape\r\n # [batch_size, hidden_size]. This is necessary for segment-level\r\n # (or segment-pair-level) classification tasks where we need a fixed\r\n # dimensional representation of the segment.\r\n with tf.variable_scope(\"pooler\"):\r\n # We \"pool\" the model by simply taking the hidden state corresponding\r\n # to the first token. We assume that this has been pre-trained\r\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\r\n self.pooled_output = tf.layers.dense(\r\n first_token_tensor,\r\n config.hidden_size,\r\n activation=tf.tanh,\r\n kernel_initializer=create_initializer(config.initializer_range))\r\n\r\n def get_pooled_output(self):\r\n return self.pooled_output\r\n\r\n def get_sequence_output(self):\r\n \"\"\"Gets final hidden layer of encoder.\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\r\n to the final hidden of the transformer encoder.\r\n \"\"\"\r\n return self.sequence_output\r\n\r\n def get_all_encoder_layers(self):\r\n return self.all_encoder_layers\r\n\r\n def get_embedding_output(self):\r\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\r\n to the output of the embedding layer, after summing the word\r\n embeddings with the positional embeddings and the token type embeddings,\r\n then performing layer normalization. This is the input to the transformer.\r\n \"\"\"\r\n return self.embedding_output\r\n\r\n def get_embedding_table(self):\r\n return self.embedding_table\r\n\r\n\r\ndef gelu(input_tensor):\r\n \"\"\"Gaussian Error Linear Unit.\r\n\r\n This is a smoother version of the RELU.\r\n Original paper: https://arxiv.org/abs/1606.08415\r\n\r\n Args:\r\n input_tensor: float Tensor to perform activation.\r\n\r\n Returns:\r\n `input_tensor` with the GELU activation applied.\r\n \"\"\"\r\n cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))\r\n return input_tensor * cdf\r\n\r\n\r\ndef get_activation(activation_string):\r\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\r\n\r\n Args:\r\n activation_string: String name of the activation function.\r\n\r\n Returns:\r\n A Python function corresponding to the activation function. If\r\n `activation_string` is None, empty, or \"linear\", this will return None.\r\n If `activation_string` is not a string, it will return `activation_string`.\r\n\r\n Raises:\r\n ValueError: The `activation_string` does not correspond to a known\r\n activation.\r\n \"\"\"\r\n\r\n # We assume that anything that\"s not a string is already an activation\r\n # function, so we just return it.\r\n if not isinstance(activation_string, six.string_types):\r\n return activation_string\r\n\r\n if not activation_string:\r\n return None\r\n\r\n act = activation_string.lower()\r\n if act == \"linear\":\r\n return None\r\n elif act == \"relu\":\r\n return tf.nn.relu\r\n elif act == \"gelu\":\r\n return gelu\r\n elif act == \"tanh\":\r\n return tf.tanh\r\n else:\r\n raise ValueError(\"Unsupported activation: %s\" % act)\r\n\r\n\r\ndef get_assignment_map_from_checkpoint(tvars, init_checkpoint):\r\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\r\n assignment_map = {}\r\n initialized_variable_names = {}\r\n\r\n name_to_variable = collections.OrderedDict()\r\n for var in tvars:\r\n name = var.name\r\n m = re.match(\"^(.*):\\\\d+$\", name)\r\n if m is not None:\r\n name = m.group(1)\r\n name_to_variable[name] = var\r\n\r\n init_vars = tf.train.list_variables(init_checkpoint)\r\n\r\n assignment_map = collections.OrderedDict()\r\n for x in init_vars:\r\n (name, var) = (x[0], x[1])\r\n if name not in name_to_variable:\r\n continue\r\n assignment_map[name] = name\r\n initialized_variable_names[name] = 1\r\n initialized_variable_names[name + \":0\"] = 1\r\n\r\n return (assignment_map, initialized_variable_names)\r\n\r\n\r\ndef dropout(input_tensor, dropout_prob):\r\n \"\"\"Perform dropout.\r\n\r\n Args:\r\n input_tensor: float Tensor.\r\n dropout_prob: Python float. The probability of dropping out a value (NOT of\r\n *keeping* a dimension as in `tf.nn.dropout`).\r\n\r\n Returns:\r\n A version of `input_tensor` with dropout applied.\r\n \"\"\"\r\n if dropout_prob is None or dropout_prob == 0.0:\r\n return input_tensor\r\n\r\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\r\n return output\r\n\r\n\r\ndef layer_norm(input_tensor, name=None):\r\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\r\n return tf.contrib.layers.layer_norm(\r\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)\r\n\r\n\r\ndef layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\r\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\r\n output_tensor = layer_norm(input_tensor, name)\r\n output_tensor = dropout(output_tensor, dropout_prob)\r\n return output_tensor\r\n\r\n\r\ndef create_initializer(initializer_range=0.02):\r\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\r\n return tf.truncated_normal_initializer(stddev=initializer_range)\r\n\r\n\r\ndef embedding_lookup(input_ids,\r\n vocab_size,\r\n embedding_size=128,\r\n initializer_range=0.02,\r\n word_embedding_name=\"word_embeddings\",\r\n use_one_hot_embeddings=False):\r\n \"\"\"Looks up words embeddings for id tensor.\r\n\r\n Args:\r\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\r\n ids.\r\n vocab_size: int. Size of the embedding vocabulary.\r\n embedding_size: int. Width of the word embeddings.\r\n initializer_range: float. Embedding initialization range.\r\n word_embedding_name: string. Name of the embedding table.\r\n use_one_hot_embeddings: bool. If True, use one-hot method for word\r\n embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better\r\n for TPUs.\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, seq_length, embedding_size].\r\n \"\"\"\r\n # This function assumes that the input is of shape [batch_size, seq_length,\r\n # num_inputs].\r\n #\r\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\r\n # reshape to [batch_size, seq_length, 1].\r\n if input_ids.shape.ndims == 2:\r\n input_ids = tf.expand_dims(input_ids, axis=[-1])\r\n\r\n embedding_table = tf.get_variable(\r\n name=word_embedding_name,\r\n shape=[vocab_size, embedding_size],\r\n initializer=create_initializer(initializer_range))\r\n\r\n if use_one_hot_embeddings:\r\n flat_input_ids = tf.reshape(input_ids, [-1])\r\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\r\n output = tf.matmul(one_hot_input_ids, embedding_table)\r\n else:\r\n output = tf.nn.embedding_lookup(embedding_table, input_ids)\r\n\r\n input_shape = get_shape_list(input_ids)\r\n\r\n output = tf.reshape(output,\r\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\r\n return (output, embedding_table)\r\n\r\n\r\ndef embedding_postprocessor(input_tensor,\r\n use_token_type=False,\r\n token_type_ids=None,\r\n token_type_vocab_size=16,\r\n token_type_embedding_name=\"token_type_embeddings\",\r\n use_position_embeddings=True,\r\n position_embedding_name=\"position_embeddings\",\r\n initializer_range=0.02,\r\n max_position_embeddings=512,\r\n dropout_prob=0.1):\r\n \"\"\"Performs various post-processing on a word embedding tensor.\r\n\r\n Args:\r\n input_tensor: float Tensor of shape [batch_size, seq_length,\r\n embedding_size].\r\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\r\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\r\n Must be specified if `use_token_type` is True.\r\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\r\n token_type_embedding_name: string. The name of the embedding table variable\r\n for token type ids.\r\n use_position_embeddings: bool. Whether to add position embeddings for the\r\n position of each token in the sequence.\r\n position_embedding_name: string. The name of the embedding table variable\r\n for positional embeddings.\r\n initializer_range: float. Range of the weight initialization.\r\n max_position_embeddings: int. Maximum sequence length that might ever be\r\n used with this model. This can be longer than the sequence length of\r\n input_tensor, but cannot be shorter.\r\n dropout_prob: float. Dropout probability applied to the final output tensor.\r\n\r\n Returns:\r\n float tensor with same shape as `input_tensor`.\r\n\r\n Raises:\r\n ValueError: One of the tensor shapes or input values is invalid.\r\n \"\"\"\r\n input_shape = get_shape_list(input_tensor, expected_rank=3)\r\n batch_size = input_shape[0]\r\n seq_length = input_shape[1]\r\n width = input_shape[2]\r\n\r\n output = input_tensor\r\n\r\n if use_token_type:\r\n if token_type_ids is None:\r\n raise ValueError(\"`token_type_ids` must be specified if\"\r\n \"`use_token_type` is True.\")\r\n token_type_table = tf.get_variable(\r\n name=token_type_embedding_name,\r\n shape=[token_type_vocab_size, width],\r\n initializer=create_initializer(initializer_range))\r\n # This vocab will be small so we always do one-hot here, since it is always\r\n # faster for a small vocabulary.\r\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\r\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\r\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\r\n token_type_embeddings = tf.reshape(token_type_embeddings,\r\n [batch_size, seq_length, width])\r\n output += token_type_embeddings\r\n\r\n if use_position_embeddings:\r\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\r\n with tf.control_dependencies([assert_op]):\r\n full_position_embeddings = tf.get_variable(\r\n name=position_embedding_name,\r\n shape=[max_position_embeddings, width],\r\n initializer=create_initializer(initializer_range))\r\n # Since the position embedding table is a learned variable, we create it\r\n # using a (long) sequence length `max_position_embeddings`. The actual\r\n # sequence length might be shorter than this, for faster training of\r\n # tasks that do not have long sequences.\r\n #\r\n # So `full_position_embeddings` is effectively an embedding table\r\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\r\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\r\n # perform a slice.\r\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\r\n [seq_length, -1])\r\n num_dims = len(output.shape.as_list())\r\n\r\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\r\n # we broadcast among the first dimensions, which is typically just\r\n # the batch size.\r\n position_broadcast_shape = []\r\n for _ in range(num_dims - 2):\r\n position_broadcast_shape.append(1)\r\n position_broadcast_shape.extend([seq_length, width])\r\n position_embeddings = tf.reshape(position_embeddings,\r\n position_broadcast_shape)\r\n output += position_embeddings\r\n\r\n output = layer_norm_and_dropout(output, dropout_prob)\r\n return output\r\n\r\n\r\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\r\n \"\"\"Create 3D attention mask from a 2D tensor mask.\r\n\r\n Args:\r\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\r\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\r\n \"\"\"\r\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\r\n batch_size = from_shape[0]\r\n from_seq_length = from_shape[1]\r\n\r\n to_shape = get_shape_list(to_mask, expected_rank=2)\r\n to_seq_length = to_shape[1]\r\n\r\n to_mask = tf.cast(\r\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\r\n\r\n # We don't assume that `from_tensor` is a mask (although it could be). We\r\n # don't actually care if we attend *from* padding tokens (only *to* padding)\r\n # tokens so we create a tensor of all ones.\r\n #\r\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\r\n broadcast_ones = tf.ones(\r\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\r\n\r\n # Here we broadcast along two dimensions to create the mask.\r\n mask = broadcast_ones * to_mask\r\n\r\n return mask\r\n\r\n\r\ndef attention_layer(from_tensor,\r\n to_tensor,\r\n attention_mask=None,\r\n num_attention_heads=1,\r\n size_per_head=512,\r\n query_act=None,\r\n key_act=None,\r\n value_act=None,\r\n attention_probs_dropout_prob=0.0,\r\n initializer_range=0.02,\r\n do_return_2d_tensor=False,\r\n batch_size=None,\r\n from_seq_length=None,\r\n to_seq_length=None):\r\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\r\n\r\n This is an implementation of multi-headed attention based on \"Attention\r\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\r\n this is self-attention. Each timestep in `from_tensor` attends to the\r\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\r\n\r\n This function first projects `from_tensor` into a \"query\" tensor and\r\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\r\n of tensors of length `num_attention_heads`, where each tensor is of shape\r\n [batch_size, seq_length, size_per_head].\r\n\r\n Then, the query and key tensors are dot-producted and scaled. These are\r\n softmaxed to obtain attention probabilities. The value tensors are then\r\n interpolated by these probabilities, then concatenated back to a single\r\n tensor and returned.\r\n\r\n In practice, the multi-headed attention are done with transposes and\r\n reshapes rather than actual separate tensors.\r\n\r\n Args:\r\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\r\n from_width].\r\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\r\n attention_mask: (optional) int32 Tensor of shape [batch_size,\r\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\r\n attention scores will effectively be set to -infinity for any positions in\r\n the mask that are 0, and will be unchanged for positions that are 1.\r\n num_attention_heads: int. Number of attention heads.\r\n size_per_head: int. Size of each attention head.\r\n query_act: (optional) Activation function for the query transform.\r\n key_act: (optional) Activation function for the key transform.\r\n value_act: (optional) Activation function for the value transform.\r\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\r\n attention probabilities.\r\n initializer_range: float. Range of the weight initializer.\r\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\r\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\r\n output will be of shape [batch_size, from_seq_length, num_attention_heads\r\n * size_per_head].\r\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\r\n of the 3D version of the `from_tensor` and `to_tensor`.\r\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\r\n of the 3D version of the `from_tensor`.\r\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\r\n of the 3D version of the `to_tensor`.\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, from_seq_length,\r\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\r\n true, this will be of shape [batch_size * from_seq_length,\r\n num_attention_heads * size_per_head]).\r\n\r\n Raises:\r\n ValueError: Any of the arguments or tensor shapes are invalid.\r\n \"\"\"\r\n\r\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\r\n seq_length, width):\r\n output_tensor = tf.reshape(\r\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\r\n\r\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\r\n return output_tensor\r\n\r\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\r\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\r\n\r\n if len(from_shape) != len(to_shape):\r\n raise ValueError(\r\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\r\n\r\n if len(from_shape) == 3:\r\n batch_size = from_shape[0]\r\n from_seq_length = from_shape[1]\r\n to_seq_length = to_shape[1]\r\n elif len(from_shape) == 2:\r\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\r\n raise ValueError(\r\n \"When passing in rank 2 tensors to attention_layer, the values \"\r\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\r\n \"must all be specified.\")\r\n\r\n # Scalar dimensions referenced here:\r\n # B = batch size (number of sequences)\r\n # F = `from_tensor` sequence length\r\n # T = `to_tensor` sequence length\r\n # N = `num_attention_heads`\r\n # H = `size_per_head`\r\n\r\n from_tensor_2d = reshape_to_matrix(from_tensor)\r\n to_tensor_2d = reshape_to_matrix(to_tensor)\r\n\r\n # `query_layer` = [B*F, N*H]\r\n query_layer = tf.layers.dense(\r\n from_tensor_2d,\r\n num_attention_heads * size_per_head,\r\n activation=query_act,\r\n name=\"query\",\r\n kernel_initializer=create_initializer(initializer_range))\r\n\r\n # `key_layer` = [B*T, N*H]\r\n key_layer = tf.layers.dense(\r\n to_tensor_2d,\r\n num_attention_heads * size_per_head,\r\n activation=key_act,\r\n name=\"key\",\r\n kernel_initializer=create_initializer(initializer_range))\r\n\r\n # `value_layer` = [B*T, N*H]\r\n value_layer = tf.layers.dense(\r\n to_tensor_2d,\r\n num_attention_heads * size_per_head,\r\n activation=value_act,\r\n name=\"value\",\r\n kernel_initializer=create_initializer(initializer_range))\r\n\r\n # `query_layer` = [B, N, F, H]\r\n query_layer = transpose_for_scores(query_layer, batch_size,\r\n num_attention_heads, from_seq_length,\r\n size_per_head)\r\n\r\n # `key_layer` = [B, N, T, H]\r\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\r\n to_seq_length, size_per_head)\r\n\r\n # Take the dot product between \"query\" and \"key\" to get the raw\r\n # attention scores.\r\n # `attention_scores` = [B, N, F, T]\r\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\r\n attention_scores = tf.multiply(attention_scores,\r\n 1.0 / math.sqrt(float(size_per_head)))\r\n\r\n if attention_mask is not None:\r\n # `attention_mask` = [B, 1, F, T]\r\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\r\n\r\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\r\n # masked positions, this operation will create a tensor which is 0.0 for\r\n # positions we want to attend and -10000.0 for masked positions.\r\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\r\n\r\n # Since we are adding it to the raw scores before the softmax, this is\r\n # effectively the same as removing these entirely.\r\n attention_scores += adder\r\n\r\n # Normalize the attention scores to probabilities.\r\n # `attention_probs` = [B, N, F, T]\r\n attention_probs = tf.nn.softmax(attention_scores)\r\n\r\n # This is actually dropping out entire tokens to attend to, which might\r\n # seem a bit unusual, but is taken from the original Transformer paper.\r\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\r\n\r\n # `value_layer` = [B, T, N, H]\r\n value_layer = tf.reshape(\r\n value_layer,\r\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\r\n\r\n # `value_layer` = [B, N, T, H]\r\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\r\n\r\n # `context_layer` = [B, N, F, H]\r\n context_layer = tf.matmul(attention_probs, value_layer)\r\n\r\n # `context_layer` = [B, F, N, H]\r\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\r\n\r\n if do_return_2d_tensor:\r\n # `context_layer` = [B*F, N*H]\r\n context_layer = tf.reshape(\r\n context_layer,\r\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\r\n else:\r\n # `context_layer` = [B, F, N*H]\r\n context_layer = tf.reshape(\r\n context_layer,\r\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\r\n\r\n return context_layer\r\n\r\n\r\ndef transformer_model(input_tensor,\r\n attention_mask=None,\r\n hidden_size=768,\r\n num_hidden_layers=12,\r\n num_attention_heads=12,\r\n intermediate_size=3072,\r\n intermediate_act_fn=gelu,\r\n hidden_dropout_prob=0.1,\r\n attention_probs_dropout_prob=0.1,\r\n initializer_range=0.02,\r\n do_return_all_layers=False):\r\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\r\n\r\n This is almost an exact implementation of the original Transformer encoder.\r\n\r\n See the original paper:\r\n https://arxiv.org/abs/1706.03762\r\n\r\n Also see:\r\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\r\n\r\n Args:\r\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\r\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\r\n seq_length], with 1 for positions that can be attended to and 0 in\r\n positions that should not be.\r\n hidden_size: int. Hidden size of the Transformer.\r\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\r\n num_attention_heads: int. Number of attention heads in the Transformer.\r\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\r\n forward) layer.\r\n intermediate_act_fn: function. The non-linear activation function to apply\r\n to the output of the intermediate/feed-forward layer.\r\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\r\n attention_probs_dropout_prob: float. Dropout probability of the attention\r\n probabilities.\r\n initializer_range: float. Range of the initializer (stddev of truncated\r\n normal).\r\n do_return_all_layers: Whether to also return all layers or just the final\r\n layer.\r\n\r\n Returns:\r\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\r\n hidden layer of the Transformer.\r\n\r\n Raises:\r\n ValueError: A Tensor shape or parameter is invalid.\r\n \"\"\"\r\n if hidden_size % num_attention_heads != 0:\r\n raise ValueError(\r\n \"The hidden size (%d) is not a multiple of the number of attention \"\r\n \"heads (%d)\" % (hidden_size, num_attention_heads))\r\n\r\n attention_head_size = int(hidden_size / num_attention_heads)\r\n input_shape = get_shape_list(input_tensor, expected_rank=3)\r\n batch_size = input_shape[0]\r\n seq_length = input_shape[1]\r\n input_width = input_shape[2]\r\n\r\n # The Transformer performs sum residuals on all layers so the input needs\r\n # to be the same as the hidden size.\r\n if input_width != hidden_size:\r\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\r\n (input_width, hidden_size))\r\n\r\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\r\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\r\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\r\n # help the optimizer.\r\n prev_output = reshape_to_matrix(input_tensor)\r\n\r\n all_layer_outputs = []\r\n for layer_idx in range(num_hidden_layers):\r\n with tf.variable_scope(\"layer_%d\" % layer_idx):\r\n layer_input = prev_output\r\n\r\n with tf.variable_scope(\"attention\"):\r\n attention_heads = []\r\n with tf.variable_scope(\"self\"):\r\n attention_head = attention_layer(\r\n from_tensor=layer_input,\r\n to_tensor=layer_input,\r\n attention_mask=attention_mask,\r\n num_attention_heads=num_attention_heads,\r\n size_per_head=attention_head_size,\r\n attention_probs_dropout_prob=attention_probs_dropout_prob,\r\n initializer_range=initializer_range,\r\n do_return_2d_tensor=True,\r\n batch_size=batch_size,\r\n from_seq_length=seq_length,\r\n to_seq_length=seq_length)\r\n attention_heads.append(attention_head)\r\n\r\n attention_output = None\r\n if len(attention_heads) == 1:\r\n attention_output = attention_heads[0]\r\n else:\r\n # In the case where we have other sequences, we just concatenate\r\n # them to the self-attention head before the projection.\r\n attention_output = tf.concat(attention_heads, axis=-1)\r\n\r\n # Run a linear projection of `hidden_size` then add a residual\r\n # with `layer_input`.\r\n with tf.variable_scope(\"output\"):\r\n attention_output = tf.layers.dense(\r\n attention_output,\r\n hidden_size,\r\n kernel_initializer=create_initializer(initializer_range))\r\n attention_output = dropout(attention_output, hidden_dropout_prob)\r\n attention_output = layer_norm(attention_output + layer_input)\r\n\r\n # The activation is only applied to the \"intermediate\" hidden layer.\r\n with tf.variable_scope(\"intermediate\"):\r\n intermediate_output = tf.layers.dense(\r\n attention_output,\r\n intermediate_size,\r\n activation=intermediate_act_fn,\r\n kernel_initializer=create_initializer(initializer_range))\r\n\r\n # Down-project back to `hidden_size` then add the residual.\r\n with tf.variable_scope(\"output\"):\r\n layer_output = tf.layers.dense(\r\n intermediate_output,\r\n hidden_size,\r\n kernel_initializer=create_initializer(initializer_range))\r\n layer_output = dropout(layer_output, hidden_dropout_prob)\r\n layer_output = layer_norm(layer_output + attention_output)\r\n prev_output = layer_output\r\n all_layer_outputs.append(layer_output)\r\n\r\n if do_return_all_layers:\r\n final_outputs = []\r\n for layer_output in all_layer_outputs:\r\n final_output = reshape_from_matrix(layer_output, input_shape)\r\n final_outputs.append(final_output)\r\n return final_outputs\r\n else:\r\n final_output = reshape_from_matrix(prev_output, input_shape)\r\n return final_output\r\n\r\n\r\ndef get_shape_list(tensor, expected_rank=None, name=None):\r\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\r\n\r\n Args:\r\n tensor: A tf.Tensor object to find the shape of.\r\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\r\n specified and the `tensor` has a different rank, and exception will be\r\n thrown.\r\n name: Optional name of the tensor for the error message.\r\n\r\n Returns:\r\n A list of dimensions of the shape of tensor. All static dimensions will\r\n be returned as python integers, and dynamic dimensions will be returned\r\n as tf.Tensor scalars.\r\n \"\"\"\r\n if name is None:\r\n name = tensor.name\r\n\r\n if expected_rank is not None:\r\n assert_rank(tensor, expected_rank, name)\r\n\r\n shape = tensor.shape.as_list()\r\n\r\n non_static_indexes = []\r\n for (index, dim) in enumerate(shape):\r\n if dim is None:\r\n non_static_indexes.append(index)\r\n\r\n if not non_static_indexes:\r\n return shape\r\n\r\n dyn_shape = tf.shape(tensor)\r\n for index in non_static_indexes:\r\n shape[index] = dyn_shape[index]\r\n return shape\r\n\r\n\r\ndef reshape_to_matrix(input_tensor):\r\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\r\n ndims = input_tensor.shape.ndims\r\n if ndims < 2:\r\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\r\n (input_tensor.shape))\r\n if ndims == 2:\r\n return input_tensor\r\n\r\n width = input_tensor.shape[-1]\r\n output_tensor = tf.reshape(input_tensor, [-1, width])\r\n return output_tensor\r\n\r\n\r\ndef reshape_from_matrix(output_tensor, orig_shape_list):\r\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\r\n if len(orig_shape_list) == 2:\r\n return output_tensor\r\n\r\n output_shape = get_shape_list(output_tensor)\r\n\r\n orig_dims = orig_shape_list[0:-1]\r\n width = output_shape[-1]\r\n\r\n return tf.reshape(output_tensor, orig_dims + [width])\r\n\r\n\r\ndef assert_rank(tensor, expected_rank, name=None):\r\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\r\n\r\n Args:\r\n tensor: A tf.Tensor to check the rank of.\r\n expected_rank: Python integer or list of integers, expected rank.\r\n name: Optional name of the tensor for the error message.\r\n\r\n Raises:\r\n ValueError: If the expected shape doesn't match the actual shape.\r\n \"\"\"\r\n if name is None:\r\n name = tensor.name\r\n\r\n expected_rank_dict = {}\r\n if isinstance(expected_rank, six.integer_types):\r\n expected_rank_dict[expected_rank] = True\r\n else:\r\n for x in expected_rank:\r\n expected_rank_dict[x] = True\r\n\r\n actual_rank = tensor.shape.ndims\r\n if actual_rank not in expected_rank_dict:\r\n scope_name = tf.get_variable_scope().name\r\n raise ValueError(\r\n \"For the tensor `%s` in scope `%s`, the actual rank \"\r\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\r\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))\r\n"
] | [
[
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.assert_less_equal",
"tensorflow.truncated_normal_initializer",
"tensorflow.squeeze",
"tensorflow.train.list_variables",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.one_hot",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
GinoBacallao/openai-python | [
"88bbe08947bceb10845b335d7f4cfb5ff406d948"
] | [
"examples/embeddings/utils.py"
] | [
"import openai\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tenacity import retry, wait_random_exponential, stop_after_attempt\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\n\n\n@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\ndef get_embedding(text, engine=\"davinci-similarity\"):\n\n # replace newlines, which can negatively affect performance.\n text = text.replace(\"\\n\", \" \")\n\n return openai.Engine(id=engine).embeddings(input = [text])['data'][0]['embedding']\n\n\ndef cosine_similarity(a, b):\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))\n\n\ndef plot_multiclass_precision_recall(\n y_score, y_true_untransformed, class_list, classifier_name\n):\n \"\"\"\n Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours.\n\n Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html\n \"\"\"\n n_classes = len(class_list)\n y_true = pd.concat(\n [(y_true_untransformed == class_list[i]) for i in range(n_classes)], axis=1\n ).values\n\n # For each class\n precision = dict()\n recall = dict()\n average_precision = dict()\n for i in range(n_classes):\n precision[i], recall[i], _ = precision_recall_curve(y_true[:, i], y_score[:, i])\n average_precision[i] = average_precision_score(y_true[:, i], y_score[:, i])\n\n # A \"micro-average\": quantifying score on all classes jointly\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(\n y_true.ravel(), y_score.ravel()\n )\n average_precision[\"micro\"] = average_precision_score(\n y_true, y_score, average=\"micro\"\n )\n print(\n str(classifier_name)\n + \" - Average precision score over all classes: {0:0.2f}\".format(\n average_precision[\"micro\"]\n )\n )\n\n # setup plot details\n plt.figure(figsize=(9, 10))\n f_scores = np.linspace(0.2, 0.8, num=4)\n lines = []\n labels = []\n for f_score in f_scores:\n x = np.linspace(0.01, 1)\n y = f_score * x / (2 * x - f_score)\n (l,) = plt.plot(x[y >= 0], y[y >= 0], color=\"gray\", alpha=0.2)\n plt.annotate(\"f1={0:0.1f}\".format(f_score), xy=(0.9, y[45] + 0.02))\n\n lines.append(l)\n labels.append(\"iso-f1 curves\")\n (l,) = plt.plot(recall[\"micro\"], precision[\"micro\"], color=\"gold\", lw=2)\n lines.append(l)\n labels.append(\n \"average Precision-recall (auprc = {0:0.2f})\"\n \"\".format(average_precision[\"micro\"])\n )\n\n for i in range(n_classes):\n (l,) = plt.plot(recall[i], precision[i], lw=2)\n lines.append(l)\n labels.append(\n \"Precision-recall for class `{0}` (auprc = {1:0.2f})\"\n \"\".format(class_list[i], average_precision[i])\n )\n\n fig = plt.gcf()\n fig.subplots_adjust(bottom=0.25)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.title(f\"{classifier_name}: Precision-Recall curve for each class\")\n plt.legend(lines, labels)"
] | [
[
"matplotlib.pyplot.legend",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"numpy.linalg.norm",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.average_precision_score",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elsa-burren/easy-neuralnetwork | [
"249149397016c104dd963c17a580f3fe45397191"
] | [
"myNN.py"
] | [
"# myNN.py\n# tested with Python3.7\n# author: Elsa Burren\n\nimport numpy as np\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\nclass MyNode:\n def __init__(self, w):\n self.w = w\n def update(self, dw):\n self.w += dw\n def output(self, x):\n return sigmoid(np.dot(x, self.w))\n\nclass MyHiddenLayer:\n def __init__(self, W, b):\n self.b = b\n self.W = W\n self.nbNodes = self.W.shape[0]\n def update(self, dW):\n self.W += dW \n def output(self, x): \n y = np.zeros(self.nbNodes) \n for i in range(0, self.nbNodes): \n y[i] = sigmoid(np.inner(self.W[i,], x) + self.b)\n return y\n\nclass MyOutputLayer: \n def __init__(self, w): \n self.w = w \n def update(self, dw):\n self.w += dw \n def output(self, x): \n return np.inner(self.w, x)\n \ndef example_3nodes_1feature(): \n import matplotlib.pyplot as plt\n w_hidden = np.array([1.0, -1.5, .5]) \n w_output = np.array([1, 1, -1])\n hidden_layer = MyHiddenLayer(w_hidden, 5) \n output_layer =MyOutputLayer(w_output) \n x = np.linspace(-10, 10, 20)\n y = np.zeros(len(x))\n for i, dx in enumerate(x):\n y[i] = output_layer.output(hidden_layer.output(dx))\n print([y[i]])\n plt.plot(x, y)\n plt.show()\n\ndef example_sigmoid():\n import matplotlib.pyplot as plt\n x = np.linspace(-2, 2, 20)\n y = sigmoid(2*x)\n plt.plot(x,y)\n plt.show()\n\ndef not_available():\n print(\"This example does not exist!\")\n \nif __name__ == \"__main__\":\n\n def select_example(x):\n return {\n \"a\" : \"example_3nodes_1feature\",\n \"b\" : \"example_sigmoid\"\n }.get(x, \"not_available\")\n \n print(\"\\nSo far, two examples are implemented\")\n print(\"\\nExample a: plots the output of a network with one hidden layer, 3 nodes and 1 feature. The purpose is to illustrate what a graph of such a function can look like.\")\n print(\"\\nExample b: a plot of a sigmoid function\")\n print(\"(see the script for the code of these examples)\")\n example_input = input(\"\\nSelect the example (enter a or b) :\")\n example_funct = select_example(example_input)\n locals()[example_funct]()\n\n"
] | [
[
"numpy.dot",
"numpy.inner",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
teshima058/3d_pose_baseline_openpose | [
"a5103a6db6df3cb34590bf68ddb179d4e543ffb2"
] | [
"src/poseVisualizer.py"
] | [
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# pose.shape : ({joint_num}, 3)\n# index_list : np.arange({joint_num})\ndef visualizePose(pose, mode=None, fig_scale=1, index_list=None,):\n fig = plt.figure()\n ax = fig.add_subplot(111 , projection='3d')\n for i,p in enumerate(pose):\n if mode == 'upper':\n upper_joints = [0, 1, 3, 4, 5, 9, 10, 11]\n if not i in upper_joints:\n continue\n ax.scatter(p[0], p[1], zs=p[2], zdir='z', s=20, marker='o', cmap=plt.cm.jet) \n elif index_list is not None:\n ax.scatter(p[0], p[1], zs=p[2], zdir='z', s=50, marker='${}$'.format(index_list[i]), cmap=plt.cm.jet) \n else:\n ax.scatter(p[0], p[1], zs=p[2], zdir='z', s=20, marker='o', cmap=plt.cm.jet) \n\n if mode == 'h36':\n borne_list = [[0,1], [0,4], [1,2], [2,3], [4,5], [5,6], [0,7], [7,8], [8,9], [9,10], [8,11], [11,12], [12,13], [8,14], [14,15], [15,16]]\n for b in borne_list:\n ax.plot([pose[b[0]][0], pose[b[1]][0]], [pose[b[0]][1], pose[b[1]][1]], [pose[b[0]][2], pose[b[1]][2]], lw=2)\n elif mode == 'cmu':\n borne_list = [[0,1], [0,2], [0,3], [3,4], [4,5], [2,6], [6,7], [7,8], [0,9], [9,10], [10,11], [2,12], [12,13], [13,14], [1,15], [15,16], [1,17], [17,18]]\n for b in borne_list:\n ax.plot([pose[b[0]][0], pose[b[1]][0]], [pose[b[0]][1], pose[b[1]][1]], [pose[b[0]][2], pose[b[1]][2]], lw=2)\n elif mode == 'upper':\n borne_list = [[0, 1], [0, 3], [3, 4], [4, 5], [0, 9], [9, 10], [10, 11]]\n for b in borne_list:\n ax.plot([pose[b[0]][0], pose[b[1]][0]], [pose[b[0]][1], pose[b[1]][1]], [pose[b[0]][2], pose[b[1]][2]], lw=2)\n\n # ax.set_xlim(-3 * fig_scale, 3 * fig_scale)\n # ax.set_ylim(-4 * fig_scale, 8 * fig_scale)\n # ax.set_zlim(-3 * fig_scale, 3 * fig_scale)\n ax.set_xlabel(\"X-axis\")\n ax.set_ylabel(\"Y-axis\")\n ax.set_zlabel(\"Z-axis\")\n plt.show()\n\n\n# Example\n# --------------------------\n# visualize outputs\n# --------------------------\n# ~~~\n# outputs = model(inputs)\n# \n# idx = 0\n# pose = outputs[idx].reshape(-1, 3)\n# pose = pose.cpu().detach().numpy()\n# index_frame = np.arange(len(pose))\n# visualizePose(pose, index_frame)\n\n\n\n# --------------------------\n# visualize inputs\n# --------------------------\n# idx = 0\n# pose = []\n# for i in range(len(inputs[idx])):\n# pose.append(inputs[idx][i].cpu().detach().numpy())\n# if i % 2 == 1:\n# pose.append(0)\n# pose = np.array(pose)\n# pose = np.reshape(pose, [-1, 3])\n# index_frame = np.arange(len(pose))\n# visualizePose(pose, index_frame)"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
megcrow/kinetic-schemes | [
"d1f5c5cc95481554e2d68a69b3b8663f9501df3d",
"d1f5c5cc95481554e2d68a69b3b8663f9501df3d",
"d1f5c5cc95481554e2d68a69b3b8663f9501df3d"
] | [
"functions/blasi.py",
"koufopanos_1991.py",
"functions/thurner.py"
] | [
"\"\"\"\nFunctions for Blasi 1993 and Blasi Branca 2001 kinetic reaction schemes for\nbiomass pyrolysis. See comments in each function for more details.\n\nReferences:\nBlasi, 1993. Combustion Science and Technology, 90, pp 315–340.\nBlasi, Branca, 2001. Ind. Eng. Chem. Res., 40, pp 5547-5556.\n\"\"\"\n\nimport numpy as np\n\ndef blasi(wood, gas, tar, char, T, dt, s=1):\n \"\"\"\n Primary and secondary kinetic reactions from Table 1 in Blasi 1993 paper.\n Note that primary reaction parameters in table are not cited correctly from\n the Thurner and Mann 1981 paper, this function uses the correct parameters.\n\n Parameters\n ----------\n wood = wood concentration, kg/m^3\n gas = gas concentation, kg/m^3\n tar = tar concentation, kg/m^3\n char = char concentation, kg/m^3\n T = temperature, K\n dt = time step, s\n s = 1 primary reactions only, 2 primary and secondary reactions\n\n Returns\n -------\n nwood = new wood concentration, kg/m^3\n ngas = new gas concentration, kg/m^3\n ntar = new tar concentration, kg/m^3\n nchar = new char concentration, kg/m^3\n \"\"\"\n # A = pre-factor (1/s) and E = activation energy (kJ/mol)\n A1 = 1.4345e4; E1 = 88.6 # wood -> gas\n A2 = 4.125e6; E2 = 112.7 # wood -> tar\n A3 = 7.3766e5; E3 = 106.5 # wood -> char\n A4 = 4.28e6; E4 = 108 # tar -> gas\n A5 = 1.0e6; E5 = 108 # tar -> char\n R = 0.008314 # universal gas constant, kJ/mol*K\n\n # reaction rate constant for each reaction, 1/s\n K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas\n K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar\n K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char\n K4 = A4 * np.exp(-E4 / (R * T)) # tar -> gas\n K5 = A5 * np.exp(-E5 / (R * T)) # tar -> char\n\n if s == 1:\n # primary reactions only\n rw = -(K1+K2+K3)*wood # wood rate\n rg = K1*wood # gas rate\n rt = K2*wood # tar rate\n rc = K3*wood # char rate\n nwood = wood + rw*dt # update wood concentration\n ngas = gas + rg*dt # update gas concentration\n ntar = tar + rt*dt # update tar concentration\n nchar = char + rc*dt # update char concentration\n elif s == 2:\n # primary and secondary reactions\n rw = -(K1+K2+K3)*wood # wood rate\n rg = K1*wood + K4*tar # gas rate\n rt = K2*wood - (K4+K5)*tar # tar rate\n rc = K3*wood + K5*tar # char rate\n nwood = wood + rw*dt # update wood concentration\n ngas = gas + rg*dt # update gas concentration\n ntar = tar + rt*dt # update tar concentration\n nchar = char + rc*dt # update char concentration\n\n # return new wood, gas, tar, char mass concentrations, kg/m^3\n return nwood, ngas, ntar, nchar\n\n\ndef blasibranca(pw, pg, pt, pc, T, dt):\n \"\"\"\n Primary kinetic reactions from Table 1 in Blasi and Branca 2001 paper.\n\n Parameters\n ----------\n pw = wood concentration, kg/m^3\n pg = gas concentation, kg/m^3\n pt = tar concentation, kg/m^3\n pc = char concentation, kg/m^3\n T = temperature, K\n dt = time step, s\n\n Returns\n -------\n nw = new wood concentration, kg/m^3\n ng = new gas concentration, kg/m^3\n nt = new tar concentration, kg/m^3\n nc = new char concentration, kg/m^3\n \"\"\"\n # A = pre-factor (1/s) and E = activation energy (kJ/mol)\n A1 = 4.38e9; E1 = 152.7 # wood -> gas\n A2 = 1.08e10; E2 = 148 # wood -> tar\n A3 = 3.27e6; E3 = 111.7 # wood -> char\n R = 0.008314 # universal gas constant, kJ/mol*K\n\n # reaction rate constant for each reaction, 1/s\n K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas\n K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar\n K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char\n\n # primary reactions only\n rw = -(K1+K2+K3)*pw # wood rate\n rg = K1*pw # gas rate\n rt = K2*pw # tar rate\n rc = K3*pw # char rate\n nw = pw + rw*dt # update wood concentration\n ng = pg + rg*dt # update gas concentration\n nt = pt + rt*dt # update tar concentration\n nc = pc + rc*dt # update char concentration\n\n # return new wood, gas, tar, char as mass concentrations, kg/m^3\n return nw, ng, nt, nc\n",
"\"\"\"\nPlot yields from primary and secondary reactions as determined by the\nKoufopanos 1991 kinetic scheme for biomass pyrolysis. Note that this scheme\nfocuses on wood conversion and char yield. Product of volatiles and gas is\nlumped together as (V+G) so individual tar and gas component is not provided.\n\nReference:\nKoufopanos, 1991. The Canadian Journal of Chemical Engineering, 69, pp 907–915.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as py\n\n# Parameters\n# ------------------------------------------------------------------------------\n\nT = 773 # temperature for rate constants, K\n\ndt = 0.01 # time step, delta t\ntmax = 25 # max time, s\nt = np.linspace(0, tmax, num=tmax/dt) # time vector\nnt = len(t) # total number of time steps\n\n# Function for Koufopanos 1991 Kinetic Scheme\n# ------------------------------------------------------------------------------\n\ndef koufopanos(B, VG1, C1, VG2, C2, T, dt, s=1):\n \"\"\"\n Primary and secondary kinetic reactions from Koufopanos 1991 paper. Notice\n that volatiles and gases are grouped together as (Volatiles + Gases) which\n are labeled here as VG.\n\n Parameters\n ----------\n B = biomass concentration\n VG1 = (volatiles + gas)1 concentration\n C1 = char1 concentration\n VG2 = (volatiles + gas)2 concentration\n C2 = char2 concentration\n dt = time step, s\n s = 1 primary reactions only, 2 primary and secondary reactions\n\n Returns\n -------\n nB = new biomass concentration\n nVG1 = new (volatiles + char)1 concentration\n nC1 = new char1 concentration\n nVG2 = new (volatiles + gas)2 concentration\n nC2 = new char2 concentration\n \"\"\"\n # A as pre-factor (1/s) and E as activation energy (kJ/mol)\n A1 = 9.973e-5; G1 = 17254.4; L1 = -9061227 # biomass -> (volatiles + gases)1\n A2 = 1.068e-3; G2 = 10224.4; L2 = -6123081 # biomass -> char1\n A3 = 5.7e5; E3 = 81 # (vol+gases)1 -> (vol+gases)2 + char2\n R = 0.008314 # universal gas constant, kJ/mol*K\n\n # reaction rate constant for each reaction, 1/s\n K1 = A1 * np.exp((G1 / T) + (L1 / T**2)) # biomass -> (volatiles + gases)1\n K2 = A2 * np.exp((G2 / T) + (L2 / T**2)) # biomass -> char1\n K3 = A3 * np.exp(-E3 / (R * T)) # (vol+gases)1 -> (vol+gases)2 + char2\n\n if s == 1:\n # primary reactions only\n rB = -(K1+K2)*B # biomass rate\n rVG1 = K1*B # (volatiles + gases)1 rate\n rC1 = K2*B # char1 rate\n rVG2 = 0 # (volatiles + gases)2 rate\n rC2 = 0 # char2 rate\n nB = B + rB*dt # update biomass concentration\n nVG1 = VG1 + rVG1*dt # update (volatiles + gases)1 concentration\n nC1 = C1 + rC1*dt # update char1 concentration\n nVG2 = VG2 + rVG2*dt # update (volatiles + gases)2 concentration\n nC2 = C2 + rC2*dt # update char2 concentration\n elif s == 2:\n # primary and secondary reactions\n rB = -(K1+K2)*B # biomass rate\n rVG1 = K1*B # volatiles + gases)1 rate\n rC1 = K2*B - K3*C1 # char1 rate\n rVG2 = K3*C1 # (volatiles + gases)2 rate\n rC2 = K3*C1 # char2 rate\n nB = B + rB*dt # update biomass concentration\n nVG1 = VG1 + rVG1*dt # update (volatiles + gases)1 concentration\n nC1 = C1 + rC1*dt # update char1 concentration\n nVG2 = VG2 + rVG2*dt # update (volatiles + gases)2 concentration\n nC2 = C2 + rC2*dt # update char2 concentration\n\n return nB, nVG1, nC1, nVG2, nC2\n\n# Product from Kinetic Scheme\n# ------------------------------------------------------------------------------\n\n# Assume initial concentration of B(0) = 1 and everything else initially at zero\n# such as VG(0) = C(0) = 0 where VG is (Volatiles + Gases) and C is Char.\n\n# concentrations reported on a mass basis in kg/m^3\n# pw = wood concentration, pg = gas concentration, pt = tar concentration,\n# pc = char concentration\n\n# store concentrations from primary reactions at each time step\nB = np.ones(nt) # biomass concentration vector\nVG1 = np.zeros(nt) # (volatiles + gases)1 concentration vector\nC1 = np.zeros(nt) # char1 concentration vector\nVG2 = np.zeros(nt) # (volatiles + gases)2 concentration vector\nC2 = np.zeros(nt) # char2 concentration vector\n\n# store concentrations from primary and secondary reactions at each time step\nB_2 = np.ones(nt) # biomass concentration vector\nVG1_2 = np.zeros(nt) # (volatiles + gases)1 concentration vector\nC1_2 = np.zeros(nt) # char1 concentration vector\nVG2_2 = np.zeros(nt) # (volatiles + gases)2 concentration vector\nC2_2 = np.zeros(nt) # char2 concentration vector\n\n# products from primary reactions only\nfor i in range(1, nt):\n B[i], VG1[i], C1[i], VG2[i], C2[i] = koufopanos(B[i-1], VG1[i-1], C1[i-1], VG2[i-1], C2[i-1], T, dt)\n\n# products from primary and secondary reactions\nfor i in range(1, nt):\n B_2[i], VG1_2[i], C1_2[i], VG2_2[i], C2_2[i] = koufopanos(B_2[i-1], VG1_2[i-1], C1_2[i-1], VG2_2[i-1], C2_2[i-1], T, dt, s=2)\n\n# totals from primary reactions only\npvg = VG1 + VG2\npc = C1 + C2\n\n# totals from primary and secondary reactions, assume VG1 -> (VG + C)2 where\n# components in the group (VG + C)2 = 1/2*VG2 + 1/2*C2\npvg_2 = VG1_2 + 0.5*VG2_2\npc_2 = C1_2 + 0.5*C2_2\n\n# mass balance to check results\nmt = B + pvg + pc\nmt2 = B_2 + pvg_2 + pc_2\n\n# Plot Results\n# ------------------------------------------------------------------------------\n\npy.ion()\npy.close('all')\n\npy.figure(1)\npy.plot(t, B, lw=2, label='B')\npy.plot(t, pvg, lw=2, label='(V+G)$_1$')\npy.plot(t, pc, lw=2, label='Char$_1$')\npy.title('Koufopanos 1991 primary reactions at T = {} K'.format(T))\npy.xlabel('Time (s)')\npy.ylabel('Concentration (normalized mass basis)')\npy.legend(loc='best', numpoints=1)\npy.grid()\n\npy.figure(2)\npy.plot(t, B_2, lw=2, label='B')\npy.plot(t, pvg_2, lw=2, label='(V+G)')\npy.plot(t, pc_2, lw=2, label='Char')\npy.title('Koufopanos 1991 primary and secondary reactions at T = {} K'.format(T))\npy.xlabel('Time (s)')\npy.ylabel('Concentration (normalized mass basis)')\npy.legend(loc='best', numpoints=1)\npy.grid()\n",
"\"\"\"\nFunction for Thurner 1981 kinetic scheme for biomass pyrolysis. See comments in\nthe function for more details.\n\nReference:\nThurner, Mann, 1981. Ind. Eng. Chem. Process Des. Dev., 20, pp 482-488.\n\"\"\"\n\nimport numpy as np\n\ndef thurner(wood, gas, tar, char, T, dt):\n \"\"\"\n Primary kinetic reactions from Thurner 1981 paper for biomass pyrolysis.\n\n Parameters\n ----------\n wood = wood concentration, kg/m^3\n gas = gas concentation, kg/m^3\n tar = tar concentation, kg/m^3\n char = char concentation, kg/m^3\n T = temperature, K\n dt = time step, s\n\n Returns\n -------\n nwood = new wood concentration, kg/m^3\n ngas = new gas concentration, kg/m^3\n ntar = new tar concentration, kg/m^3\n nchar = new char concentration, kg/m^3\n \"\"\"\n # A = pre-factor (1/s) and E = activation energy (kJ/mol)\n A1 = 1.44e4; E1 = 88.6 # wood -> gas\n A2 = 4.13e6; E2 = 112.7 # wood -> tar\n A3 = 7.38e5; E3 = 106.5 # wood -> char\n R = 0.008314 # universal gas constant, kJ/mol*K\n\n # reaction rate constant for each reaction, 1/s\n K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas\n K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar\n K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char\n\n # primary reactions only\n rw = -(K1+K2+K3) * wood # wood rate\n rg = K1 * wood # gas rate\n rt = K2 * wood # tar rate\n rc = K3 * wood # char rate\n nwood = wood + rw*dt # update wood concentration\n ngas = gas + rg*dt # update gas concentration\n ntar = tar + rt*dt # update tar concentration\n nchar = char + rc*dt # update char concentration\n\n # return new wood, char, gas, tar as mass concentrations, kg/m^3\n return nwood, ngas, ntar, nchar\n"
] | [
[
"numpy.exp"
],
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.grid",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ion",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sethusaim/Automatic-Number-Plate-Recognition | [
"8b26008f8511e52600b150157901079e0fd0ebfe",
"8b26008f8511e52600b150157901079e0fd0ebfe",
"8b26008f8511e52600b150157901079e0fd0ebfe"
] | [
"base2designs/utils/vrd_evaluation.py",
"base2designs/utils/np_box_list_ops_test.py",
"base2designs/utils/np_mask_ops.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluator class for Visual Relations Detection.\n\nVRDDetectionEvaluator is a class which manages ground truth information of a\nvisual relations detection (vrd) dataset, and computes frequently used detection\nmetrics such as Precision, Recall, Recall@k, of the provided vrd detection\nresults.\nIt supports the following operations:\n1) Adding ground truth information of images sequentially.\n2) Adding detection results of images sequentially.\n3) Evaluating detection metrics on already inserted detection results.\n\nNote1: groundtruth should be inserted before evaluation.\nNote2: This module operates on numpy boxes and box lists.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom abc import abstractmethod\nimport collections\nimport logging\nimport numpy as np\nimport six\nfrom six.moves import range\n\nfrom object_detection.core import standard_fields\nfrom object_detection.utils import metrics\nfrom object_detection.utils import object_detection_evaluation\nfrom object_detection.utils import per_image_vrd_evaluation\n\n# Below standard input numpy datatypes are defined:\n# box_data_type - datatype of the groundtruth visual relations box annotations;\n# this datatype consists of two named boxes: subject bounding box and object\n# bounding box. Each box is of the format [y_min, x_min, y_max, x_max], each\n# coordinate being of type float32.\n# label_data_type - corresponding datatype of the visual relations label\n# annotaions; it consists of three numerical class labels: subject class label,\n# object class label and relation class label, each class label being of type\n# int32.\nvrd_box_data_type = np.dtype([(\"subject\", \"f4\", (4,)), (\"object\", \"f4\", (4,))])\nsingle_box_data_type = np.dtype([(\"box\", \"f4\", (4,))])\nlabel_data_type = np.dtype([(\"subject\", \"i4\"), (\"object\", \"i4\"), (\"relation\", \"i4\")])\n\n\nclass VRDDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):\n \"\"\"A class to evaluate VRD detections.\n\n This class serves as a base class for VRD evaluation in two settings:\n - phrase detection\n - relation detection.\n \"\"\"\n\n def __init__(self, matching_iou_threshold=0.5, metric_prefix=None):\n \"\"\"Constructor.\n\n Args:\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n metric_prefix: (optional) string prefix for metric name; if None, no\n prefix is used.\n\n \"\"\"\n super(VRDDetectionEvaluator, self).__init__([])\n self._matching_iou_threshold = matching_iou_threshold\n self._evaluation = _VRDDetectionEvaluation(\n matching_iou_threshold=self._matching_iou_threshold\n )\n self._image_ids = set([])\n self._metric_prefix = (metric_prefix + \"_\") if metric_prefix else \"\"\n self._evaluatable_labels = {}\n self._negative_labels = {}\n\n @abstractmethod\n def _process_groundtruth_boxes(self, groundtruth_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n Phrase detection and Relation detection subclasses re-implement this method\n depending on the task.\n\n Args:\n groundtruth_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see\n datatype vrd_box_data_type, single_box_data_type above).\n \"\"\"\n raise NotImplementedError(\n \"_process_groundtruth_boxes method should be implemented in subclasses\"\n \"of VRDDetectionEvaluator.\"\n )\n\n @abstractmethod\n def _process_detection_boxes(self, detections_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n Phrase detection and Relation detection subclasses re-implement this method\n depending on the task.\n\n Args:\n detections_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see\n datatype vrd_box_data_type, single_box_data_type above).\n \"\"\"\n raise NotImplementedError(\n \"_process_detection_boxes method should be implemented in subclasses\"\n \"of VRDDetectionEvaluator.\"\n )\n\n def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):\n \"\"\"Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary containing -\n standard_fields.InputDataFields.groundtruth_boxes: A numpy array\n of structures with the shape [M, 1], representing M tuples, each tuple\n containing the same number of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max] (see\n datatype vrd_box_data_type, single_box_data_type above).\n standard_fields.InputDataFields.groundtruth_classes: A numpy array of\n structures shape [M, 1], representing the class labels of the\n corresponding bounding boxes and possibly additional classes (see\n datatype label_data_type above).\n standard_fields.InputDataFields.groundtruth_image_classes: numpy array\n of shape [K] containing verified labels.\n Raises:\n ValueError: On adding groundtruth for an image more than once.\n \"\"\"\n if image_id in self._image_ids:\n raise ValueError(\"Image with id {} already added.\".format(image_id))\n\n groundtruth_class_tuples = groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_classes\n ]\n groundtruth_box_tuples = groundtruth_dict[\n standard_fields.InputDataFields.groundtruth_boxes\n ]\n\n self._evaluation.add_single_ground_truth_image_info(\n image_key=image_id,\n groundtruth_box_tuples=self._process_groundtruth_boxes(\n groundtruth_box_tuples\n ),\n groundtruth_class_tuples=groundtruth_class_tuples,\n )\n self._image_ids.update([image_id])\n all_classes = []\n for field in groundtruth_box_tuples.dtype.fields:\n all_classes.append(groundtruth_class_tuples[field])\n groudtruth_positive_classes = np.unique(np.concatenate(all_classes))\n verified_labels = groundtruth_dict.get(\n standard_fields.InputDataFields.groundtruth_image_classes,\n np.array([], dtype=int),\n )\n self._evaluatable_labels[image_id] = np.unique(\n np.concatenate((verified_labels, groudtruth_positive_classes))\n )\n\n self._negative_labels[image_id] = np.setdiff1d(\n verified_labels, groudtruth_positive_classes\n )\n\n def add_single_detected_image_info(self, image_id, detections_dict):\n \"\"\"Adds detections for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n standard_fields.DetectionResultFields.detection_boxes: A numpy array of\n structures with shape [N, 1], representing N tuples, each tuple\n containing the same number of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max] (as an example\n see datatype vrd_box_data_type, single_box_data_type above).\n standard_fields.DetectionResultFields.detection_scores: float32 numpy\n array of shape [N] containing detection scores for the boxes.\n standard_fields.DetectionResultFields.detection_classes: A numpy array\n of structures shape [N, 1], representing the class labels of the\n corresponding bounding boxes and possibly additional classes (see\n datatype label_data_type above).\n \"\"\"\n if image_id not in self._image_ids:\n logging.warning(\"No groundtruth for the image with id %s.\", image_id)\n # Since for the correct work of evaluator it is assumed that groundtruth\n # is inserted first we make sure to break the code if is it not the case.\n self._image_ids.update([image_id])\n self._negative_labels[image_id] = np.array([])\n self._evaluatable_labels[image_id] = np.array([])\n\n num_detections = detections_dict[\n standard_fields.DetectionResultFields.detection_boxes\n ].shape[0]\n detection_class_tuples = detections_dict[\n standard_fields.DetectionResultFields.detection_classes\n ]\n detection_box_tuples = detections_dict[\n standard_fields.DetectionResultFields.detection_boxes\n ]\n negative_selector = np.zeros(num_detections, dtype=bool)\n selector = np.ones(num_detections, dtype=bool)\n # Only check boxable labels\n for field in detection_box_tuples.dtype.fields:\n # Verify if one of the labels is negative (this is sure FP)\n negative_selector |= np.isin(\n detection_class_tuples[field], self._negative_labels[image_id]\n )\n # Verify if all labels are verified\n selector &= np.isin(\n detection_class_tuples[field], self._evaluatable_labels[image_id]\n )\n selector |= negative_selector\n self._evaluation.add_single_detected_image_info(\n image_key=image_id,\n detected_box_tuples=self._process_detection_boxes(\n detection_box_tuples[selector]\n ),\n detected_scores=detections_dict[\n standard_fields.DetectionResultFields.detection_scores\n ][selector],\n detected_class_tuples=detection_class_tuples[selector],\n )\n\n def evaluate(self, relationships=None):\n \"\"\"Compute evaluation result.\n\n Args:\n relationships: A dictionary of numerical label-text label mapping; if\n specified, returns per-relationship AP.\n\n Returns:\n A dictionary of metrics with the following fields -\n\n summary_metrics:\n 'weightedAP@<matching_iou_threshold>IOU' : weighted average precision\n at the specified IOU threshold.\n 'AP@<matching_iou_threshold>IOU/<relationship>' : AP per relationship.\n 'mAP@<matching_iou_threshold>IOU': mean average precision at the\n specified IOU threshold.\n 'Recall@50@<matching_iou_threshold>IOU': recall@50 at the specified IOU\n threshold.\n 'Recall@100@<matching_iou_threshold>IOU': recall@100 at the specified\n IOU threshold.\n if relationships is specified, returns <relationship> in AP metrics as\n readable names, otherwise the names correspond to class numbers.\n \"\"\"\n (\n weighted_average_precision,\n mean_average_precision,\n average_precisions,\n _,\n _,\n recall_50,\n recall_100,\n _,\n _,\n ) = self._evaluation.evaluate()\n\n vrd_metrics = {\n (\n self._metric_prefix\n + \"weightedAP@{}IOU\".format(self._matching_iou_threshold)\n ): weighted_average_precision,\n self._metric_prefix\n + \"mAP@{}IOU\".format(self._matching_iou_threshold): mean_average_precision,\n self._metric_prefix\n + \"Recall@50@{}IOU\".format(self._matching_iou_threshold): recall_50,\n self._metric_prefix\n + \"Recall@100@{}IOU\".format(self._matching_iou_threshold): recall_100,\n }\n if relationships:\n for key, average_precision in six.iteritems(average_precisions):\n vrd_metrics[\n self._metric_prefix\n + \"AP@{}IOU/{}\".format(\n self._matching_iou_threshold, relationships[key]\n )\n ] = average_precision\n else:\n for key, average_precision in six.iteritems(average_precisions):\n vrd_metrics[\n self._metric_prefix\n + \"AP@{}IOU/{}\".format(self._matching_iou_threshold, key)\n ] = average_precision\n\n return vrd_metrics\n\n def clear(self):\n \"\"\"Clears the state to prepare for a fresh evaluation.\"\"\"\n self._evaluation = _VRDDetectionEvaluation(\n matching_iou_threshold=self._matching_iou_threshold\n )\n self._image_ids.clear()\n self._negative_labels.clear()\n self._evaluatable_labels.clear()\n\n\nclass VRDRelationDetectionEvaluator(VRDDetectionEvaluator):\n \"\"\"A class to evaluate VRD detections in relations setting.\n\n Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth\n labels datatype is label_data_type.\n Expected detection box datatype is vrd_box_data_type, expected detection\n labels\n datatype is label_data_type.\n \"\"\"\n\n def __init__(self, matching_iou_threshold=0.5):\n super(VRDRelationDetectionEvaluator, self).__init__(\n matching_iou_threshold=matching_iou_threshold,\n metric_prefix=\"VRDMetric_Relationships\",\n )\n\n def _process_groundtruth_boxes(self, groundtruth_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n Args:\n groundtruth_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max].\n\n Returns:\n Unchanged input.\n \"\"\"\n\n return groundtruth_box_tuples\n\n def _process_detection_boxes(self, detections_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n Phrase detection and Relation detection subclasses re-implement this method\n depending on the task.\n\n Args:\n detections_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max] (see\n datatype vrd_box_data_type, single_box_data_type above).\n Returns:\n Unchanged input.\n \"\"\"\n return detections_box_tuples\n\n\nclass VRDPhraseDetectionEvaluator(VRDDetectionEvaluator):\n \"\"\"A class to evaluate VRD detections in phrase setting.\n\n Expected groundtruth box datatype is vrd_box_data_type, expected groudtruth\n labels datatype is label_data_type.\n Expected detection box datatype is single_box_data_type, expected detection\n labels datatype is label_data_type.\n \"\"\"\n\n def __init__(self, matching_iou_threshold=0.5):\n super(VRDPhraseDetectionEvaluator, self).__init__(\n matching_iou_threshold=matching_iou_threshold,\n metric_prefix=\"VRDMetric_Phrases\",\n )\n\n def _process_groundtruth_boxes(self, groundtruth_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n In case of phrase evaluation task, evaluation expects exactly one bounding\n box containing all objects in the phrase. This bounding box is computed\n as an enclosing box of all groundtruth boxes of a phrase.\n\n Args:\n groundtruth_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See\n vrd_box_data_type for an example of structure.\n\n Returns:\n result: A numpy array of structures with the shape [M, 1], each\n structure containing exactly one named bounding box. i-th output\n structure corresponds to the result of processing i-th input structure,\n where the named bounding box is computed as an enclosing bounding box\n of all bounding boxes of the i-th input structure.\n \"\"\"\n first_box_key = next(six.iterkeys(groundtruth_box_tuples.dtype.fields))\n miny = groundtruth_box_tuples[first_box_key][:, 0]\n minx = groundtruth_box_tuples[first_box_key][:, 1]\n maxy = groundtruth_box_tuples[first_box_key][:, 2]\n maxx = groundtruth_box_tuples[first_box_key][:, 3]\n for fields in groundtruth_box_tuples.dtype.fields:\n miny = np.minimum(groundtruth_box_tuples[fields][:, 0], miny)\n minx = np.minimum(groundtruth_box_tuples[fields][:, 1], minx)\n maxy = np.maximum(groundtruth_box_tuples[fields][:, 2], maxy)\n maxx = np.maximum(groundtruth_box_tuples[fields][:, 3], maxx)\n data_result = []\n for i in range(groundtruth_box_tuples.shape[0]):\n data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))\n result = np.array(data_result, dtype=[(\"box\", \"f4\", (4,))])\n return result\n\n def _process_detection_boxes(self, detections_box_tuples):\n \"\"\"Pre-processes boxes before adding them to the VRDDetectionEvaluation.\n\n In case of phrase evaluation task, evaluation expects exactly one bounding\n box containing all objects in the phrase. This bounding box is computed\n as an enclosing box of all groundtruth boxes of a phrase.\n\n Args:\n detections_box_tuples: A numpy array of structures with the shape\n [M, 1], each structure containing the same number of named bounding\n boxes. Each box is of the format [y_min, x_min, y_max, x_max]. See\n vrd_box_data_type for an example of this structure.\n\n Returns:\n result: A numpy array of structures with the shape [M, 1], each\n structure containing exactly one named bounding box. i-th output\n structure corresponds to the result of processing i-th input structure,\n where the named bounding box is computed as an enclosing bounding box\n of all bounding boxes of the i-th input structure.\n \"\"\"\n first_box_key = next(six.iterkeys(detections_box_tuples.dtype.fields))\n miny = detections_box_tuples[first_box_key][:, 0]\n minx = detections_box_tuples[first_box_key][:, 1]\n maxy = detections_box_tuples[first_box_key][:, 2]\n maxx = detections_box_tuples[first_box_key][:, 3]\n for fields in detections_box_tuples.dtype.fields:\n miny = np.minimum(detections_box_tuples[fields][:, 0], miny)\n minx = np.minimum(detections_box_tuples[fields][:, 1], minx)\n maxy = np.maximum(detections_box_tuples[fields][:, 2], maxy)\n maxx = np.maximum(detections_box_tuples[fields][:, 3], maxx)\n data_result = []\n for i in range(detections_box_tuples.shape[0]):\n data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],))\n result = np.array(data_result, dtype=[(\"box\", \"f4\", (4,))])\n return result\n\n\nVRDDetectionEvalMetrics = collections.namedtuple(\n \"VRDDetectionEvalMetrics\",\n [\n \"weighted_average_precision\",\n \"mean_average_precision\",\n \"average_precisions\",\n \"precisions\",\n \"recalls\",\n \"recall_50\",\n \"recall_100\",\n \"median_rank_50\",\n \"median_rank_100\",\n ],\n)\n\n\nclass _VRDDetectionEvaluation(object):\n \"\"\"Performs metric computation for the VRD task. This class is internal.\n \"\"\"\n\n def __init__(self, matching_iou_threshold=0.5):\n \"\"\"Constructor.\n\n Args:\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n \"\"\"\n self._per_image_eval = per_image_vrd_evaluation.PerImageVRDEvaluation(\n matching_iou_threshold=matching_iou_threshold\n )\n\n self._groundtruth_box_tuples = {}\n self._groundtruth_class_tuples = {}\n self._num_gt_instances = 0\n self._num_gt_imgs = 0\n self._num_gt_instances_per_relationship = {}\n\n self.clear_detections()\n\n def clear_detections(self):\n \"\"\"Clears detections.\"\"\"\n self._detection_keys = set()\n self._scores = []\n self._relation_field_values = []\n self._tp_fp_labels = []\n self._average_precisions = {}\n self._precisions = []\n self._recalls = []\n\n def add_single_ground_truth_image_info(\n self, image_key, groundtruth_box_tuples, groundtruth_class_tuples\n ):\n \"\"\"Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_key: A unique string/integer identifier for the image.\n groundtruth_box_tuples: A numpy array of structures with the shape\n [M, 1], representing M tuples, each tuple containing the same number\n of named bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max].\n groundtruth_class_tuples: A numpy array of structures shape [M, 1],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n \"\"\"\n if image_key in self._groundtruth_box_tuples:\n logging.warning(\n \"image %s has already been added to the ground truth database.\",\n image_key,\n )\n return\n\n self._groundtruth_box_tuples[image_key] = groundtruth_box_tuples\n self._groundtruth_class_tuples[image_key] = groundtruth_class_tuples\n\n self._update_groundtruth_statistics(groundtruth_class_tuples)\n\n def add_single_detected_image_info(\n self, image_key, detected_box_tuples, detected_scores, detected_class_tuples\n ):\n \"\"\"Adds detections for a single image to be used for evaluation.\n\n Args:\n image_key: A unique string/integer identifier for the image.\n detected_box_tuples: A numpy array of structures with shape [N, 1],\n representing N tuples, each tuple containing the same number of named\n bounding boxes.\n Each box is of the format [y_min, x_min, y_max, x_max].\n detected_scores: A float numpy array of shape [N, 1], representing\n the confidence scores of the detected N object instances.\n detected_class_tuples: A numpy array of structures shape [N, 1],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n \"\"\"\n self._detection_keys.add(image_key)\n if image_key in self._groundtruth_box_tuples:\n groundtruth_box_tuples = self._groundtruth_box_tuples[image_key]\n groundtruth_class_tuples = self._groundtruth_class_tuples[image_key]\n else:\n groundtruth_box_tuples = np.empty(\n shape=[0, 4], dtype=detected_box_tuples.dtype\n )\n groundtruth_class_tuples = np.array([], dtype=detected_class_tuples.dtype)\n\n scores, tp_fp_labels, mapping = self._per_image_eval.compute_detection_tp_fp(\n detected_box_tuples=detected_box_tuples,\n detected_scores=detected_scores,\n detected_class_tuples=detected_class_tuples,\n groundtruth_box_tuples=groundtruth_box_tuples,\n groundtruth_class_tuples=groundtruth_class_tuples,\n )\n\n self._scores += [scores]\n self._tp_fp_labels += [tp_fp_labels]\n self._relation_field_values += [detected_class_tuples[mapping][\"relation\"]]\n\n def _update_groundtruth_statistics(self, groundtruth_class_tuples):\n \"\"\"Updates grouth truth statistics.\n\n Args:\n groundtruth_class_tuples: A numpy array of structures shape [M, 1],\n representing the class labels of the corresponding bounding boxes and\n possibly additional classes.\n \"\"\"\n self._num_gt_instances += groundtruth_class_tuples.shape[0]\n self._num_gt_imgs += 1\n for relation_field_value in np.unique(groundtruth_class_tuples[\"relation\"]):\n if relation_field_value not in self._num_gt_instances_per_relationship:\n self._num_gt_instances_per_relationship[relation_field_value] = 0\n self._num_gt_instances_per_relationship[relation_field_value] += np.sum(\n groundtruth_class_tuples[\"relation\"] == relation_field_value\n )\n\n def evaluate(self):\n \"\"\"Computes evaluation result.\n\n Returns:\n A named tuple with the following fields -\n average_precision: a float number corresponding to average precision.\n precisions: an array of precisions.\n recalls: an array of recalls.\n recall@50: recall computed on 50 top-scoring samples.\n recall@100: recall computed on 100 top-scoring samples.\n median_rank@50: median rank computed on 50 top-scoring samples.\n median_rank@100: median rank computed on 100 top-scoring samples.\n \"\"\"\n if self._num_gt_instances == 0:\n logging.warning(\"No ground truth instances\")\n\n if not self._scores:\n scores = np.array([], dtype=float)\n tp_fp_labels = np.array([], dtype=bool)\n else:\n scores = np.concatenate(self._scores)\n tp_fp_labels = np.concatenate(self._tp_fp_labels)\n relation_field_values = np.concatenate(self._relation_field_values)\n\n for relation_field_value, _ in six.iteritems(\n self._num_gt_instances_per_relationship\n ):\n precisions, recalls = metrics.compute_precision_recall(\n scores[relation_field_values == relation_field_value],\n tp_fp_labels[relation_field_values == relation_field_value],\n self._num_gt_instances_per_relationship[relation_field_value],\n )\n self._average_precisions[\n relation_field_value\n ] = metrics.compute_average_precision(precisions, recalls)\n\n self._mean_average_precision = np.mean(list(self._average_precisions.values()))\n\n self._precisions, self._recalls = metrics.compute_precision_recall(\n scores, tp_fp_labels, self._num_gt_instances\n )\n self._weighted_average_precision = metrics.compute_average_precision(\n self._precisions, self._recalls\n )\n\n self._recall_50 = metrics.compute_recall_at_k(\n self._tp_fp_labels, self._num_gt_instances, 50\n )\n self._median_rank_50 = metrics.compute_median_rank_at_k(self._tp_fp_labels, 50)\n self._recall_100 = metrics.compute_recall_at_k(\n self._tp_fp_labels, self._num_gt_instances, 100\n )\n self._median_rank_100 = metrics.compute_median_rank_at_k(\n self._tp_fp_labels, 100\n )\n\n return VRDDetectionEvalMetrics(\n self._weighted_average_precision,\n self._mean_average_precision,\n self._average_precisions,\n self._precisions,\n self._recalls,\n self._recall_50,\n self._recall_100,\n self._median_rank_50,\n self._median_rank_100,\n )\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.utils.np_box_list_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.utils import np_box_list\nfrom object_detection.utils import np_box_list_ops\n\n\nclass AreaRelatedTest(tf.test.TestCase):\n def setUp(self):\n boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)\n boxes2 = np.array(\n [[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],\n dtype=float,\n )\n self.boxlist1 = np_box_list.BoxList(boxes1)\n self.boxlist2 = np_box_list.BoxList(boxes2)\n\n def test_area(self):\n areas = np_box_list_ops.area(self.boxlist1)\n expected_areas = np.array([6.0, 5.0], dtype=float)\n self.assertAllClose(expected_areas, areas)\n\n def test_intersection(self):\n intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)\n expected_intersection = np.array(\n [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float\n )\n self.assertAllClose(intersection, expected_intersection)\n\n def test_iou(self):\n iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)\n expected_iou = np.array(\n [[2.0 / 16.0, 0.0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]],\n dtype=float,\n )\n self.assertAllClose(iou, expected_iou)\n\n def test_ioa(self):\n boxlist1 = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n boxlist2 = np_box_list.BoxList(\n np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)\n )\n ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)\n expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)\n self.assertAllClose(ioa21, expected_ioa21)\n\n def test_scale(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)\n expected_boxlist_scaled = np_box_list.BoxList(\n np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32)\n )\n self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())\n\n def test_clip_to_window(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [\n [0.25, 0.25, 0.75, 0.75],\n [0.0, 0.0, 0.5, 0.75],\n [-0.2, -0.3, 0.7, 1.5],\n ],\n dtype=np.float32,\n )\n )\n boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])\n expected_boxlist_clipped = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]],\n dtype=np.float32,\n )\n )\n self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())\n\n def test_prune_outside_window(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [\n [0.25, 0.25, 0.75, 0.75],\n [0.0, 0.0, 0.5, 0.75],\n [-0.2, -0.3, 0.7, 1.5],\n ],\n dtype=np.float32,\n )\n )\n boxlist_pruned, _ = np_box_list_ops.prune_outside_window(\n boxlist, [0.0, 0.0, 1.0, 1.0]\n )\n expected_boxlist_pruned = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())\n\n def test_concatenate(self):\n boxlist1 = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n boxlist2 = np_box_list.BoxList(\n np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)\n )\n boxlists = [boxlist1, boxlist2]\n boxlist_concatenated = np_box_list_ops.concatenate(boxlists)\n boxlist_concatenated_expected = np_box_list.BoxList(\n np.array(\n [\n [0.25, 0.25, 0.75, 0.75],\n [0.0, 0.0, 0.5, 0.75],\n [0.5, 0.25, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n ],\n dtype=np.float32,\n )\n )\n self.assertAllClose(\n boxlist_concatenated_expected.get(), boxlist_concatenated.get()\n )\n\n def test_change_coordinate_frame(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n boxlist_coord = np_box_list_ops.change_coordinate_frame(\n boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32)\n )\n expected_boxlist_coord = np_box_list.BoxList(\n np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32)\n )\n self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())\n\n def test_filter_scores_greater_than(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32\n )\n )\n boxlist.add_field(\"scores\", np.array([0.8, 0.2], np.float32))\n boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)\n\n expected_boxlist_greater = np_box_list.BoxList(\n np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32)\n )\n\n self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())\n\n\nclass GatherOpsTest(tf.test.TestCase):\n def setUp(self):\n boxes = np.array(\n [[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],\n dtype=float,\n )\n self.boxlist = np_box_list.BoxList(boxes)\n self.boxlist.add_field(\"scores\", np.array([0.5, 0.7, 0.9], dtype=float))\n self.boxlist.add_field(\n \"labels\",\n np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),\n )\n\n def test_gather_with_out_of_range_indices(self):\n indices = np.array([3, 1], dtype=int)\n boxlist = self.boxlist\n with self.assertRaises(ValueError):\n np_box_list_ops.gather(boxlist, indices)\n\n def test_gather_with_invalid_multidimensional_indices(self):\n indices = np.array([[0, 1], [1, 2]], dtype=int)\n boxlist = self.boxlist\n with self.assertRaises(ValueError):\n np_box_list_ops.gather(boxlist, indices)\n\n def test_gather_without_fields_specified(self):\n indices = np.array([2, 0, 1], dtype=int)\n boxlist = self.boxlist\n subboxlist = np_box_list_ops.gather(boxlist, indices)\n\n expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)\n self.assertAllClose(expected_scores, subboxlist.get_field(\"scores\"))\n\n expected_boxes = np.array(\n [[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],\n dtype=float,\n )\n self.assertAllClose(expected_boxes, subboxlist.get())\n\n expected_labels = np.array(\n [[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int\n )\n self.assertAllClose(expected_labels, subboxlist.get_field(\"labels\"))\n\n def test_gather_with_invalid_field_specified(self):\n indices = np.array([2, 0, 1], dtype=int)\n boxlist = self.boxlist\n\n with self.assertRaises(ValueError):\n np_box_list_ops.gather(boxlist, indices, \"labels\")\n\n with self.assertRaises(ValueError):\n np_box_list_ops.gather(boxlist, indices, [\"objectness\"])\n\n def test_gather_with_fields_specified(self):\n indices = np.array([2, 0, 1], dtype=int)\n boxlist = self.boxlist\n subboxlist = np_box_list_ops.gather(boxlist, indices, [\"labels\"])\n\n self.assertFalse(subboxlist.has_field(\"scores\"))\n\n expected_boxes = np.array(\n [[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],\n dtype=float,\n )\n self.assertAllClose(expected_boxes, subboxlist.get())\n\n expected_labels = np.array(\n [[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int\n )\n self.assertAllClose(expected_labels, subboxlist.get_field(\"labels\"))\n\n\nclass SortByFieldTest(tf.test.TestCase):\n def setUp(self):\n boxes = np.array(\n [[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],\n dtype=float,\n )\n self.boxlist = np_box_list.BoxList(boxes)\n self.boxlist.add_field(\"scores\", np.array([0.5, 0.9, 0.4], dtype=float))\n self.boxlist.add_field(\n \"labels\",\n np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),\n )\n\n def test_with_invalid_field(self):\n with self.assertRaises(ValueError):\n np_box_list_ops.sort_by_field(self.boxlist, \"objectness\")\n with self.assertRaises(ValueError):\n np_box_list_ops.sort_by_field(self.boxlist, \"labels\")\n\n def test_with_invalid_sorting_order(self):\n with self.assertRaises(ValueError):\n np_box_list_ops.sort_by_field(self.boxlist, \"scores\", \"Descending\")\n\n def test_with_descending_sorting(self):\n sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, \"scores\")\n\n expected_boxes = np.array(\n [[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]],\n dtype=float,\n )\n self.assertAllClose(expected_boxes, sorted_boxlist.get())\n\n expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)\n self.assertAllClose(expected_scores, sorted_boxlist.get_field(\"scores\"))\n\n def test_with_ascending_sorting(self):\n sorted_boxlist = np_box_list_ops.sort_by_field(\n self.boxlist, \"scores\", np_box_list_ops.SortOrder.ASCEND\n )\n\n expected_boxes = np.array(\n [[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],],\n dtype=float,\n )\n self.assertAllClose(expected_boxes, sorted_boxlist.get())\n\n expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)\n self.assertAllClose(expected_scores, sorted_boxlist.get_field(\"scores\"))\n\n\nclass NonMaximumSuppressionTest(tf.test.TestCase):\n def setUp(self):\n self._boxes = np.array(\n [\n [0, 0, 1, 1],\n [0, 0.1, 1, 1.1],\n [0, -0.1, 1, 0.9],\n [0, 10, 1, 11],\n [0, 10.1, 1, 11.1],\n [0, 100, 1, 101],\n ],\n dtype=float,\n )\n self._boxlist = np_box_list.BoxList(self._boxes)\n\n def test_with_no_scores_field(self):\n boxlist = np_box_list.BoxList(self._boxes)\n max_output_size = 3\n iou_threshold = 0.5\n\n with self.assertRaises(ValueError):\n np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)\n\n def test_nms_disabled_max_output_size_equals_three(self):\n boxlist = np_box_list.BoxList(self._boxes)\n boxlist.add_field(\n \"scores\", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)\n )\n max_output_size = 3\n iou_threshold = 1.0 # No NMS\n\n expected_boxes = np.array(\n [[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float\n )\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_select_from_three_clusters(self):\n boxlist = np_box_list.BoxList(self._boxes)\n boxlist.add_field(\n \"scores\", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)\n )\n max_output_size = 3\n iou_threshold = 0.5\n\n expected_boxes = np.array(\n [[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float\n )\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_select_at_most_two_from_three_clusters(self):\n boxlist = np_box_list.BoxList(self._boxes)\n boxlist.add_field(\n \"scores\", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)\n )\n max_output_size = 2\n iou_threshold = 0.5\n\n expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_select_at_most_thirty_from_three_clusters(self):\n boxlist = np_box_list.BoxList(self._boxes)\n boxlist.add_field(\n \"scores\", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)\n )\n max_output_size = 30\n iou_threshold = 0.5\n\n expected_boxes = np.array(\n [[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float\n )\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_select_from_ten_indentical_boxes(self):\n boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)\n boxlist = np_box_list.BoxList(boxes)\n boxlist.add_field(\"scores\", np.array(10 * [0.8]))\n iou_threshold = 0.5\n max_output_size = 3\n expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_different_iou_threshold(self):\n boxes = np.array(\n [\n [0, 0, 20, 100],\n [0, 0, 20, 80],\n [200, 200, 210, 300],\n [200, 200, 210, 250],\n ],\n dtype=float,\n )\n boxlist = np_box_list.BoxList(boxes)\n boxlist.add_field(\"scores\", np.array([0.9, 0.8, 0.7, 0.6]))\n max_output_size = 4\n\n iou_threshold = 0.4\n expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],], dtype=float)\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n iou_threshold = 0.5\n expected_boxes = np.array(\n [[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float\n )\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n iou_threshold = 0.8\n expected_boxes = np.array(\n [\n [0, 0, 20, 100],\n [0, 0, 20, 80],\n [200, 200, 210, 300],\n [200, 200, 210, 250],\n ],\n dtype=float,\n )\n nms_boxlist = np_box_list_ops.non_max_suppression(\n boxlist, max_output_size, iou_threshold\n )\n self.assertAllClose(nms_boxlist.get(), expected_boxes)\n\n def test_multiclass_nms(self):\n boxlist = np_box_list.BoxList(\n np.array(\n [[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],\n dtype=np.float32,\n )\n )\n scores = np.array(\n [\n [-0.2, 0.1, 0.5, -0.4, 0.3],\n [0.7, -0.7, 0.6, 0.2, -0.9],\n [0.4, 0.34, -0.9, 0.2, 0.31],\n ],\n dtype=np.float32,\n )\n boxlist.add_field(\"scores\", scores)\n boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(\n boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3\n )\n\n scores_clean = boxlist_clean.get_field(\"scores\")\n classes_clean = boxlist_clean.get_field(\"classes\")\n boxes = boxlist_clean.get()\n expected_scores = np.array([0.7, 0.6, 0.34, 0.31])\n expected_classes = np.array([0, 2, 1, 4])\n expected_boxes = np.array(\n [\n [0.4, 0.2, 0.8, 0.8],\n [0.4, 0.2, 0.8, 0.8],\n [0.6, 0.0, 1.0, 1.0],\n [0.6, 0.0, 1.0, 1.0],\n ],\n dtype=np.float32,\n )\n self.assertAllClose(scores_clean, expected_scores)\n self.assertAllClose(classes_clean, expected_classes)\n self.assertAllClose(boxes, expected_boxes)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Operations for [N, height, width] numpy arrays representing masks.\n\nExample mask operations that are supported:\n * Areas: compute mask areas\n * IOU: pairwise intersection-over-union scores\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nEPSILON = 1e-7\n\n\ndef area(masks):\n \"\"\"Computes area of masks.\n\n Args:\n masks: Numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N*1] representing mask areas.\n\n Raises:\n ValueError: If masks.dtype is not np.uint8\n \"\"\"\n if masks.dtype != np.uint8:\n raise ValueError(\"Masks type should be np.uint8\")\n return np.sum(masks, axis=(1, 2), dtype=np.float32)\n\n\ndef intersection(masks1, masks2):\n \"\"\"Compute pairwise intersection areas between masks.\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding M masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N*M] representing pairwise intersection area.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n \"\"\"\n if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:\n raise ValueError(\"masks1 and masks2 should be of type np.uint8\")\n n = masks1.shape[0]\n m = masks2.shape[0]\n answer = np.zeros([n, m], dtype=np.float32)\n for i in np.arange(n):\n for j in np.arange(m):\n answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)\n return answer\n\n\ndef iou(masks1, masks2):\n \"\"\"Computes pairwise intersection-over-union between mask collections.\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N, M] representing pairwise iou scores.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n \"\"\"\n if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:\n raise ValueError(\"masks1 and masks2 should be of type np.uint8\")\n intersect = intersection(masks1, masks2)\n area1 = area(masks1)\n area2 = area(masks2)\n union = np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) - intersect\n return intersect / np.maximum(union, EPSILON)\n\n\ndef ioa(masks1, masks2):\n \"\"\"Computes pairwise intersection-over-area between box collections.\n\n Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as\n their intersection area over mask2's area. Note that ioa is not symmetric,\n that is, IOA(mask1, mask2) != IOA(mask2, mask1).\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N, M] representing pairwise ioa scores.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n \"\"\"\n if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:\n raise ValueError(\"masks1 and masks2 should be of type np.uint8\")\n intersect = intersection(masks1, masks2)\n areas = np.expand_dims(area(masks2), axis=0)\n return intersect / (areas + EPSILON)\n"
] | [
[
"numpy.minimum",
"numpy.maximum",
"numpy.unique",
"numpy.setdiff1d",
"numpy.dtype",
"numpy.ones",
"numpy.concatenate",
"numpy.empty",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isin"
],
[
"numpy.array",
"tensorflow.test.main"
],
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.arange",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nuannuanhcc/ps_reppoint | [
"abf9a82f53e5812936d0eed46f417c4500ffe151"
] | [
"mmdetection/mmdet/models/anchor_heads/anchor_head.py"
] | [
"from __future__ import division\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, force_fp32,\n multi_apply, multiclass_nms)\nfrom ..builder import build_loss\nfrom ..registry import HEADS\n\n\[email protected]_module\nclass AnchorHead(nn.Module):\n \"\"\"Anchor-based head (RPN, RetinaNet, SSD, etc.).\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n feat_channels (int): Number of channels of the feature map.\n anchor_scales (Iterable): Anchor scales.\n anchor_ratios (Iterable): Anchor aspect ratios.\n anchor_strides (Iterable): Anchor strides.\n anchor_base_sizes (Iterable): Anchor base sizes.\n target_means (Iterable): Mean values of regression targets.\n target_stds (Iterable): Std values of regression targets.\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_classes,\n in_channels,\n feat_channels=256,\n anchor_scales=[8, 16, 32],\n anchor_ratios=[0.5, 1.0, 2.0],\n anchor_strides=[4, 8, 16, 32, 64],\n anchor_base_sizes=None,\n target_means=(.0, .0, .0, .0),\n target_stds=(1.0, 1.0, 1.0, 1.0),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n loss_bbox=dict(\n type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)):\n super(AnchorHead, self).__init__()\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.feat_channels = feat_channels\n self.anchor_scales = anchor_scales\n self.anchor_ratios = anchor_ratios\n self.anchor_strides = anchor_strides\n self.anchor_base_sizes = list(\n anchor_strides) if anchor_base_sizes is None else anchor_base_sizes\n self.target_means = target_means\n self.target_stds = target_stds\n\n self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']\n if self.use_sigmoid_cls:\n self.cls_out_channels = num_classes - 1\n else:\n self.cls_out_channels = num_classes\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.fp16_enabled = False\n\n self.anchor_generators = []\n for anchor_base in self.anchor_base_sizes:\n self.anchor_generators.append(\n AnchorGenerator(anchor_base, anchor_scales, anchor_ratios))\n\n self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)\n self._init_layers()\n\n def _init_layers(self):\n self.conv_cls = nn.Conv2d(self.feat_channels,\n self.num_anchors * self.cls_out_channels, 1)\n self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)\n\n def init_weights(self):\n normal_init(self.conv_cls, std=0.01)\n normal_init(self.conv_reg, std=0.01)\n\n def forward_single(self, x):\n cls_score = self.conv_cls(x)\n bbox_pred = self.conv_reg(x)\n return cls_score, bbox_pred\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats)\n\n def get_anchors(self, featmap_sizes, img_metas):\n \"\"\"Get anchors according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n img_metas (list[dict]): Image meta info.\n\n Returns:\n tuple: anchors of each image, valid flags of each image\n \"\"\"\n num_imgs = len(img_metas)\n num_levels = len(featmap_sizes)\n\n # since feature map sizes of all images are the same, we only compute\n # anchors for one time\n multi_level_anchors = []\n for i in range(num_levels):\n anchors = self.anchor_generators[i].grid_anchors(\n featmap_sizes[i], self.anchor_strides[i])\n multi_level_anchors.append(anchors)\n anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n # for each image, we compute valid flags of multi level anchors\n valid_flag_list = []\n for img_id, img_meta in enumerate(img_metas):\n multi_level_flags = []\n for i in range(num_levels):\n anchor_stride = self.anchor_strides[i]\n feat_h, feat_w = featmap_sizes[i]\n h, w, _ = img_meta['pad_shape']\n valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)\n valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)\n flags = self.anchor_generators[i].valid_flags(\n (feat_h, feat_w), (valid_feat_h, valid_feat_w))\n multi_level_flags.append(flags)\n valid_flag_list.append(multi_level_flags)\n\n return anchor_list, valid_flag_list\n\n def loss_single(self, cls_score, bbox_pred, labels, label_weights,\n bbox_targets, bbox_weights, num_total_samples, cfg):\n # classification loss\n if labels.dim() == 3:\n all_labels = labels.clone()\n labels = all_labels[:, :, 0]\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n cls_score = cls_score.permute(0, 2, 3,\n 1).reshape(-1, self.cls_out_channels)\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=num_total_samples)\n # regression loss\n bbox_targets = bbox_targets.reshape(-1, 4)\n bbox_weights = bbox_weights.reshape(-1, 4)\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n loss_bbox = self.loss_bbox(\n bbox_pred,\n bbox_targets,\n bbox_weights,\n avg_factor=num_total_samples)\n return loss_cls, loss_bbox\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n def loss(self,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_bboxes_ignore=None):\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == len(self.anchor_generators)\n\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n cls_reg_targets = anchor_target(\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n self.target_means,\n self.target_stds,\n cfg,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=label_channels,\n sampling=self.sampling)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg) = cls_reg_targets\n num_total_samples = (\n num_total_pos + num_total_neg if self.sampling else num_total_pos)\n losses_cls, losses_bbox = multi_apply(\n self.loss_single,\n cls_scores,\n bbox_preds,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n num_total_samples=num_total_samples,\n cfg=cfg)\n return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg,\n rescale=False):\n assert len(cls_scores) == len(bbox_preds)\n num_levels = len(cls_scores)\n\n mlvl_anchors = [\n self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:],\n self.anchor_strides[i])\n for i in range(num_levels)\n ]\n result_list = []\n for img_id in range(len(img_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,\n mlvl_anchors, img_shape,\n scale_factor, cfg, rescale)\n result_list.append(proposals)\n return result_list\n\n def get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n mlvl_anchors,\n img_shape,\n scale_factor,\n cfg,\n rescale=False):\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)\n mlvl_bboxes = []\n mlvl_scores = []\n for cls_score, bbox_pred, anchors in zip(cls_scores, bbox_preds,\n mlvl_anchors):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n cls_score = cls_score.permute(1, 2,\n 0).reshape(-1, self.cls_out_channels)\n if self.use_sigmoid_cls:\n scores = cls_score.sigmoid()\n else:\n scores = cls_score.softmax(-1)\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n if self.use_sigmoid_cls:\n max_scores, _ = scores.max(dim=1)\n else:\n max_scores, _ = scores[:, 1:].max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n anchors = anchors[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n bboxes = delta2bbox(anchors, bbox_pred, self.target_means,\n self.target_stds, img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n if self.use_sigmoid_cls:\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)\n det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n return det_bboxes, det_labels\n"
] | [
[
"numpy.ceil",
"torch.nn.Conv2d",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gongzhitaao/adversarial-classifier | [
"ded40b5b319fe13e8eb40147113e9fced53433ed"
] | [
"src/figure_1.py"
] | [
"import os\n# supress tensorflow logging other than errors\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom keras import backend as K\nfrom keras.datasets import mnist\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom attacks.fgsm import fgsm\n\n\nimg_rows = 28\nimg_cols = 28\nimg_chas = 1\ninput_shape = (img_rows, img_cols, img_chas)\nnb_classes = 10\n\n\nprint('\\nLoading mnist')\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.astype('float32') / 255.\nX_test = X_test.astype('float32') / 255.\n\nX_train = X_train.reshape(-1, img_rows, img_cols, img_chas)\nX_test = X_test.reshape(-1, img_rows, img_cols, img_chas)\n\n# one hot encoding\ny_train = np_utils.to_categorical(y_train, nb_classes)\nz0 = y_test.copy()\ny_test = np_utils.to_categorical(y_test, nb_classes)\n\n\nsess = tf.InteractiveSession()\nK.set_session(sess)\n\n\nif False:\n print('\\nLoading model')\n model = load_model('model/figure_1.h5')\nelse:\n print('\\nBuilding model')\n model = Sequential([\n Convolution2D(32, 3, 3, input_shape=input_shape),\n Activation('relu'),\n Convolution2D(32, 3, 3),\n Activation('relu'),\n MaxPooling2D(pool_size=(2, 2)),\n Dropout(0.25),\n Flatten(),\n Dense(128),\n Activation('relu'),\n Dropout(0.5),\n Dense(10),\n Activation('softmax')])\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n print('\\nTraining model')\n model.fit(X_train, y_train, nb_epoch=10)\n\n print('\\nSaving model')\n os.makedirs('model', exist_ok=True)\n model.save('model/figure_1.h5')\n\n\nx = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_chas))\nx_adv = fgsm(model, x, nb_epoch=9, eps=0.02)\n\n\nprint('\\nTest against clean data')\nscore = model.evaluate(X_test, y_test)\nprint('\\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))\n\n\nif False:\n print('\\nLoading adversarial data')\n X_adv = np.load('data/figure_1.npy')\nelse:\n print('\\nGenerating adversarial data')\n nb_sample = X_test.shape[0]\n batch_size = 128\n nb_batch = int(np.ceil(nb_sample/batch_size))\n X_adv = np.empty(X_test.shape)\n for batch in range(nb_batch):\n print('batch {0}/{1}'.format(batch+1, nb_batch), end='\\r')\n start = batch * batch_size\n end = min(nb_sample, start+batch_size)\n tmp = sess.run(x_adv, feed_dict={x: X_test[start:end],\n K.learning_phase(): 0})\n X_adv[start:end] = tmp\n\n os.makedirs('data', exist_ok=True)\n np.save('data/figure_1.npy', X_adv)\n\n\nprint('\\nTest against adversarial data')\nscore = model.evaluate(X_adv, y_test)\nprint('\\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))\n\n\nprint('\\nMake predictions')\ny1 = model.predict(X_test)\nz1 = np.argmax(y1, axis=1)\ny2 = model.predict(X_adv)\nz2 = np.argmax(y2, axis=1)\n\nprint('\\nSelecting figures')\nX_tmp = np.empty((2, 10, 28, 28))\ny_proba = np.empty((2, 10, 10))\nfor i in range(10):\n print('Target {0}'.format(i))\n ind, = np.where(np.all([z0==i, z1==i, z2!=i], axis=0))\n cur = np.random.choice(ind)\n X_tmp[0][i] = np.squeeze(X_test[cur])\n X_tmp[1][i] = np.squeeze(X_adv[cur])\n y_proba[0][i] = y1[cur]\n y_proba[1][i] = y2[cur]\n\n\nprint('\\nPlotting results')\nfig = plt.figure(figsize=(10, 3))\ngs = gridspec.GridSpec(2, 10, wspace=0.1, hspace=0.1)\n\nlabel = np.argmax(y_proba, axis=2)\nproba = np.max(y_proba, axis=2)\nfor i in range(10):\n for j in range(2):\n ax = fig.add_subplot(gs[j, i])\n ax.imshow(X_tmp[j][i], cmap='gray', interpolation='none')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel('{0} ({1:.2f})'.format(label[j][i],\n proba[j][i]),\n fontsize=12)\n\nprint('\\nSaving figure')\ngs.tight_layout(fig)\nos.makedirs('img', exist_ok=True)\nplt.savefig('img/figure_1.pdf')\n"
] | [
[
"tensorflow.InteractiveSession",
"numpy.random.choice",
"matplotlib.use",
"numpy.squeeze",
"tensorflow.placeholder",
"matplotlib.pyplot.savefig",
"numpy.save",
"numpy.all",
"numpy.max",
"numpy.ceil",
"numpy.argmax",
"matplotlib.gridspec.GridSpec",
"numpy.load",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
dreadbird06/tso_vace_wpe | [
"3e9d9d9b0ebe3d3e360e678af5960ff23d57eae2"
] | [
"run.py"
] | [
"\"\"\"\nExample codes for speech dereverberation based on the WPE variants.\n\nauthor: Joon-Young Yang (E-mail: [email protected])\n\"\"\"\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_id\n\nimport numpy as np\nimport soundfile as sf\n\nimport torch\ntorch.set_printoptions(precision=10)\n\nfrom torch_custom.torch_utils import load_checkpoint, to_arr\nfrom torch_custom.iterative_wpe import IterativeWPE\nfrom torch_custom.neural_wpe import NeuralWPE\n\nfrom bldnn_4M62 import LstmDnnNet as LPSEstimator\nfrom gcunet4c_4M4390 import VACENet\nfrom vace_wpe import VACEWPE\n\n\n## ----------------------------------------------------- ##\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('device = {}'.format(device))\n# device = \"cpu\"\n\nstft_opts_torch = dict(\n n_fft=1024, hop_length=256, win_length=1024, win_type='hanning', \n symmetric=True)\nfft_bins = stft_opts_torch['n_fft']//2 + 1\n\n# mfcc_opts_torch = dict(\n# fs=16000, nfft=1024, lowfreq=20., maxfreq=7600., \n# nlinfilt=0, nlogfilt=40, nceps=40, \n# lifter_type='sinusoidal', lift=-22.0) # only useful during fine-tuning\n\ndelay, taps1, taps2 = 3, 30, 15\n## ----------------------------------------------------- ##\n\n\ndef apply_drc(audio, drc_ratio=0.25, n_pop=100, dtype='float32'):\n normalized = (max(audio.max(), abs(audio.min())) <= 1.0)\n normalizer = 1.0 if normalized else float(2**15)\n ## compute MMD\n audio_sorted = np.sort(audio.squeeze(), axis=-1) # either (N,) or (D, N)\n audio_mmd = audio_sorted[..., -1:-n_pop-1:-1].mean(dtype=dtype) \\\n - audio_sorted[..., :n_pop].mean(dtype=dtype)\n drange_gain = 2 * (normalizer/audio_mmd) * drc_ratio\n return (audio * drange_gain).astype(dtype), drange_gain.astype(dtype)\n\n\ndef run_vace_wpe(wpath, prefix, pretrain_opt='late', simp_opt='b'):\n assert pretrain_opt == 'late' # VACENet should be pretrained to estimate late reverberation components\n assert simp_opt == 'b' # Simplified VACE-WPE architecture is used\n print(f'Running \"{prefix}-VACE-WPE\"...')\n\n ## Saved checkpoint file\n prefix = prefix.lower()\n if prefix == 'drv': # Drv-VACE-WPE\n ckpt_file = 'models/20210615-183131/ckpt-ep60'\n elif prefix == 'dns': # Dns-VACE-WPE\n ckpt_file = 'models/20210615-221501/ckpt-ep60'\n elif prefix == 'dr-tsoc': # DR-TSO_\\mathcal{C}-VACE-WPE\n ckpt_file = 'models/20210617-095331/ckpt-ep30'\n elif prefix == 'tsoc': # TSO_\\mathcal{C}-VACE-WPE\n ckpt_file = 'models/20210617-103601/ckpt-ep30'\n elif prefix == 'tson': # TSO_\\mathcal{N}-VACE-WPE\n ckpt_file = 'models/20210617-125831/ckpt-ep30'\n\n ## ------------------------------------------------- ##\n\n ## VACENet\n fake_scale = 2.0\n vacenet = VACENet(\n input_dim=fft_bins, stft_opts=stft_opts_torch, \n input_norm='globalmvn', # loaded from the saved checkpoint\n scope='vace_unet', fake_scale=fake_scale)\n vacenet = vacenet.to(device)\n vacenet.eval()\n # print('VACENet size = {:.2f}M'.format(vacenet.size))\n # vacenet.check_trainable_parameters()\n\n ## LPSNet\n lpsnet = LPSEstimator(\n input_dim=fft_bins, \n stft_opts=stft_opts_torch, \n input_norm='globalmvn', # loaded from the saved checkpoint\n scope='ldnn_lpseir_ns')\n lpsnet = lpsnet.to(device)\n lpsnet.eval()\n # lpsnet.freeze() # should be frozen when fine-tuning the VACENet\n # print('LPSNet size = {:.2f}M'.format(lpsnet.size))\n # lpsnet.check_trainable_parameters()\n\n ## VACE-WPE\n dnn_vwpe = VACEWPE(\n stft_opts=stft_opts_torch, \n lpsnet=lpsnet, vacenet=vacenet)#, \n # mfcc_opts=mfcc_opts_torch) # only useful when fine-tuning the VACENet\n dnn_vwpe, *_ = load_checkpoint(dnn_vwpe, checkpoint=ckpt_file, strict=False)\n dnn_vwpe.to(device)\n dnn_vwpe.eval()\n # print('VACE-WPE size = {:.2f}M'.format(dnn_vwpe.size))\n # dnn_vwpe.check_trainable_parameters()\n\n ## ------------------------------------------------- ##\n\n ## Load audio and apply DRC\n aud, fs = sf.read(wpath) # (t,), 16000\n aud, drc_gain = apply_drc(aud) # (t,), ()\n\n ## Perform dereverberation\n aud = torch.from_numpy(aud)[None] # (batch, samples)\n with torch.no_grad():\n ## The input audio is in shape (batch, samples) (always assume #channels == 1)\n enh = dnn_vwpe.dereverb(\n aud.to(device), delay=delay, taps=taps2) # (t,)\n ## Save\n output_wav_path = f'data/{prefix}-vace_wpe_taps{taps2}.wav'\n sf.write(output_wav_path, data=enh, samplerate=fs)\n\n\ndef run_neural_wpe(wpath, chs='single', dtype=torch.float64):\n print(f'Running \"Neural-WPE-{chs}\"...')\n\n ckpt_file = np.random.choice([\n 'models/20210615-183131/ckpt-ep60', # Drv-VACE-WPE\n 'models/20210615-221501/ckpt-ep60', # Dns-VACE-WPE\n 'models/20210617-095331/ckpt-ep30', # TSON-VACE-WPE\n 'models/20210617-103601/ckpt-ep30', # TSOC-VACE-WPE\n 'models/20210617-125831/ckpt-ep30', # DR-TSOC-VACE-WPE\n ]) # the VACE-WPE variants share the same LPSNet model for PSD estimation\n\n ## ------------------------------------------------- ##\n\n ## LPSNet\n lpsnet = LPSEstimator(\n input_dim=fft_bins, \n stft_opts=stft_opts_torch, \n input_norm='globalmvn', # loaded from the saved checkpoint\n scope='ldnn_lpseir_ns')\n lpsnet = lpsnet.to(device)\n # lpsnet.freeze() # should be frozen when fine-tuning the VACENet\n lpsnet.eval()\n # print('LPSNet size = {:.2f}M'.format(lpsnet.size))\n # lpsnet.check_trainable_parameters()\n\n ## Neural WPE\n dnn_wpe = NeuralWPE(\n stft_opts=stft_opts_torch, \n lpsnet=lpsnet)\n dnn_wpe, *_ = load_checkpoint(dnn_wpe, checkpoint=ckpt_file, strict=False)\n dnn_wpe.to(device)\n dnn_wpe.eval()\n # print('Neural WPE size = {:.2f}M'.format(dnn_wpe.size))\n # dnn_wpe.check_trainable_parameters()\n\n ## ------------------------------------------------- ##\n\n ## Load audio and apply DRC\n aud, fs = sf.read(wpath) # (t,), 16000\n aud, drc_gain = apply_drc(aud) # (t,), ()\n\n if chs == 'single':\n aud = aud[None] # (channels=1, samples)\n taps = taps1\n if chs == 'dual':\n aud2, fs2 = sf.read(sample_wav2, dtype='float32')\n aud2 = aud2 * drc_gain\n aud = np.stack((aud, aud2), axis=0) # (channels=2, samples)\n taps = taps2\n\n ## Perform dereverberation\n aud = torch.from_numpy(aud)[None] # (batch, channels, samples)\n with torch.no_grad():\n ## The input audio must be in shape (batch, channels, samples)\n enh = dnn_wpe(\n aud.to(device), delay=delay, taps=taps, dtype=dtype) # (t,)\n enh = to_arr(enh).squeeze() # convert to numpy array and squeeze\n ## Save\n if chs == 'dual':\n enh = enh[0] # only save the first channel\n # print(enh.sum())\n output_wav_path = f'data/nwpe_{chs}_taps{taps}.wav'\n sf.write(output_wav_path, data=enh, samplerate=fs)\n\n\ndef run_iterative_wpe(wpath, chs='single', n_iter=1, dtype=torch.float64):\n print(f'Running \"Iterative-WPE-{chs}\"...')\n\n ## IterativeWPE WPE\n iter_wpe = IterativeWPE(\n stft_opts=stft_opts_torch)\n\n ## Load audio and apply DRC\n aud, fs = sf.read(wpath) # (t,), 16000\n aud, drc_gain = apply_drc(aud) # (t,), ()\n\n if chs == 'single':\n aud = aud[None] # (channels=1, samples)\n taps = taps1\n if chs == 'dual':\n aud2, fs2 = sf.read(sample_wav2, dtype='float32')\n aud2 = aud2 * drc_gain\n aud = np.stack((aud, aud2), axis=0) # (channels=2, samples)\n taps = taps2\n\n ## Perform dereverberation\n aud = torch.from_numpy(aud)[None] # (batch, channels, samples)\n with torch.no_grad():\n ## The input audio must be in shape (batch, channels, samples)\n enh = iter_wpe(\n aud.to(device), delay=delay, taps=taps, dtype=dtype) # (t,)\n enh = to_arr(enh).squeeze() # convert to numpy array and squeeze\n ## Save\n if chs == 'dual':\n enh = enh[0] # only save the first channel\n output_wav_path = f'data/iwpe_{chs}_taps{taps}_iter{n_iter}.wav'\n sf.write(output_wav_path, data=enh, samplerate=fs)\n\n\n\nif __name__==\"__main__\":\n dtype = torch.float64\n\n sample_wav = 'data/AMI_WSJ20-Array1-1_T10c0201.wav'\n # sample_wav2 = 'data/AMI_WSJ20-Array1-2_T10c0201.wav'\n\n sample_wav = 'data/VOiCES_2019_Challenge_SID_eval_1327.wav' # babble noise\n # sample_wav = 'data/VOiCES_2019_Challenge_SID_eval_8058.wav' # ambient noise\n # sample_wav = 'data/VOiCES_2019_Challenge_SID_eval_11391.wav' # music + vocal\n\n\n\n ## Save DRC-applied raw signal\n aud, fs = sf.read(sample_wav) # (t,), 16000\n aud, drc_gain = apply_drc(aud) # (t,), ()\n sample_wav_drc = 'data/raw_signal.wav'\n sf.write(sample_wav_drc, data=aud, samplerate=fs)\n\n # ## Iterative WPE\n # run_iterative_wpe('single', n_iter=1, dtype=dtype)\n # ### run_iterative_wpe('dual', n_iter=1, dtype=dtype)\n\n ## Neural WPE\n run_neural_wpe(sample_wav, 'single', dtype=dtype)\n ### run_neural_wpe('dual', dtype=dtype)\n\n ## VACE-WPE\n run_vace_wpe(sample_wav, prefix='drv')\n run_vace_wpe(sample_wav, prefix='dns')\n run_vace_wpe(sample_wav, prefix='tson')\n run_vace_wpe(sample_wav, prefix='tsoc')\n run_vace_wpe(sample_wav, prefix='dr-tsoc')\n"
] | [
[
"numpy.random.choice",
"torch.set_printoptions",
"torch.from_numpy",
"numpy.stack",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
faridsaud/ML-Snippets | [
"4e846e7dde63480c34f51329f261aef73040b0d6"
] | [
"UnsupervisedLearning/HerarchicalClustering/Hierarchical Clustering Lab.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Hierarchical Clustering Lab\n# In this notebook, we will be using sklearn to conduct hierarchical clustering on the [Iris dataset](https://archive.ics.uci.edu/ml/datasets/iris) which contains 4 dimensions/attributes and 150 samples. Each sample is labeled as one of the three type of Iris flowers.\n# \n# In this exercise, we'll ignore the labeling and cluster based on the attributes, then we'll compare the results of different hierarchical clustering techniques with the original labels to see which one does a better job in this scenario. We'll then proceed to visualize the resulting cluster hierarchies.\n# \n# ## 1. Importing the Iris dataset\n# \n\n# In[1]:\n\n\nfrom sklearn import datasets\n\niris = datasets.load_iris()\n\n\n# A look at the first 10 samples in the dataset\n\n# In[2]:\n\n\niris.data[:10]\n\n\n# ```iris.target``` contains the labels that indicate which type of Iris flower each sample is\n\n# In[3]:\n\n\niris.target\n\n\n# ## 2. Clustering\n# Let's now use sklearn's ```AgglomerativeClustering``` to conduct the heirarchical clustering\n\n# In[4]:\n\n\nfrom sklearn.cluster import AgglomerativeClustering\n\n# Hierarchical clustering\n# Ward is the default linkage algorithm, so we'll start with that\nward = AgglomerativeClustering(n_clusters=3)\nward_pred = ward.fit_predict(iris.data)\n\n\n# Let's also try complete and average linkages\n# \n# **Exercise**:\n# * Conduct hierarchical clustering with complete linkage, store the predicted labels in the variable ```complete_pred```\n# * Conduct hierarchical clustering with average linkage, store the predicted labels in the variable ```avg_pred```\n\n# In[5]:\n\n\n# Hierarchical clustering using complete linkage\n# TODO: Create an instance of AgglomerativeClustering with the appropriate parameters\ncomplete = AgglomerativeClustering(n_clusters=3, linkage=\"complete\")\n# Fit & predict\n# TODO: Make AgglomerativeClustering fit the dataset and predict the cluster labels\ncomplete_pred = complete.fit_predict(iris.data)\n\n# Hierarchical clustering using average linkage\n# TODO: Create an instance of AgglomerativeClustering with the appropriate parameters\navg = AgglomerativeClustering(n_clusters=3, linkage=\"average\")\n# Fit & predict\n# TODO: Make AgglomerativeClustering fit the dataset and predict the cluster labels\navg_pred = avg.fit_predict(iris.data)\n\n\n# To determine which clustering result better matches the original labels of the samples, we can use ```adjusted_rand_score``` which is an *external cluster validation index* which results in a score between -1 and 1, where 1 means two clusterings are identical of how they grouped the samples in a dataset (regardless of what label is assigned to each cluster).\n# \n# Cluster validation indices are discussed later in the course.\n\n# In[6]:\n\n\nfrom sklearn.metrics import adjusted_rand_score\n\nward_ar_score = adjusted_rand_score(iris.target, ward_pred)\n\n\n# **Exercise**:\n# * Calculate the Adjusted Rand score of the clusters resulting from complete linkage and average linkage\n\n# In[7]:\n\n\n# TODO: Calculated the adjusted Rand score for the complete linkage clustering labels\ncomplete_ar_score = adjusted_rand_score(iris.target, complete_pred)\n\n# TODO: Calculated the adjusted Rand score for the average linkage clustering labels\navg_ar_score = adjusted_rand_score(iris.target, avg_pred)\n\n\n# Which algorithm results in the higher Adjusted Rand Score?\n\n# In[8]:\n\n\nprint( \"Scores: \\nWard:\", ward_ar_score,\"\\nComplete: \", complete_ar_score, \"\\nAverage: \", avg_ar_score)\n\n\n# ## 3. The Effect of Normalization on Clustering\n# \n# Can we improve on this clustering result?\n# \n# Let's take another look at the dataset\n\n# In[9]:\n\n\niris.data[:15]\n\n\n# Looking at this, we can see that the forth column has smaller values than the rest of the columns, and so its variance counts for less in the clustering process (since clustering is based on distance). Let us [normalize](https://en.wikipedia.org/wiki/Feature_scaling) the dataset so that each dimension lies between 0 and 1, so they have equal weight in the clustering process.\n# \n# This is done by subtracting the minimum from each column then dividing the difference by the range.\n# \n# sklearn provides us with a useful utility called ```preprocessing.normalize()``` that can do that for us\n\n# In[9]:\n\n\nfrom sklearn import preprocessing\n\nnormalized_X = preprocessing.normalize(iris.data)\nnormalized_X[:10]\n\n\n# Now all the columns are in the range between 0 and 1. Would clustering the dataset after this transformation lead to a better clustering? (one that better matches the original labels of the samples)\n\n# In[10]:\n\n\nward = AgglomerativeClustering(n_clusters=3)\nward_pred = ward.fit_predict(normalized_X)\n\ncomplete = AgglomerativeClustering(n_clusters=3, linkage=\"complete\")\ncomplete_pred = complete.fit_predict(normalized_X)\n\navg = AgglomerativeClustering(n_clusters=3, linkage=\"average\")\navg_pred = avg.fit_predict(normalized_X)\n\n\nward_ar_score = adjusted_rand_score(iris.target, ward_pred)\ncomplete_ar_score = adjusted_rand_score(iris.target, complete_pred)\navg_ar_score = adjusted_rand_score(iris.target, avg_pred)\n\nprint( \"Scores: \\nWard:\", ward_ar_score,\"\\nComplete: \", complete_ar_score, \"\\nAverage: \", avg_ar_score)\n\n\n# ## 4. Dendrogram visualization with scipy\n# \n# Let's visualize the highest scoring clustering result. \n# \n# To do that, we'll need to use Scipy's [```linkage```](https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html) function to perform the clusteirng again so we can obtain the linkage matrix it will later use to visualize the hierarchy\n\n# In[11]:\n\n\n# Import scipy's linkage function to conduct the clustering\nfrom scipy.cluster.hierarchy import linkage\n\n# Specify the linkage type. Scipy accepts 'ward', 'complete', 'average', as well as other values\n# Pick the one that resulted in the highest Adjusted Rand Score\nlinkage_type = 'ward'\n\nlinkage_matrix = linkage(normalized_X, linkage_type)\n\n\n# Plot using scipy's [dendrogram](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.cluster.hierarchy.dendrogram.html) function\n\n# In[13]:\n\n\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(22,18))\n\n# plot using 'dendrogram()'\ndendrogram(linkage_matrix)\n\nplt.show()\n\n\n# ## 5. Visualization with Seaborn's ```clustermap``` \n# \n# The [seaborn](http://seaborn.pydata.org/index.html) plotting library for python can plot a [clustermap](http://seaborn.pydata.org/generated/seaborn.clustermap.html), which is a detailed dendrogram which also visualizes the dataset in more detail. It conducts the clustering as well, so we only need to pass it the dataset and the linkage type we want, and it will use scipy internally to conduct the clustering\n\n# In[ ]:\n\n\nimport seaborn as sns\n\nsns.clustermap(normalized_X, figsize=(12,18), method=linkage_type, cmap='viridis')\n\n# Expand figsize to a value like (18, 50) if you want the sample labels to be readable\n# Draw back is that you'll need more scrolling to observe the dendrogram\n\nplt.show()\n\n\n# Looking at the colors of the dimensions can you observe how they differ between the three type of flowers? You should at least be able to notice how one is vastly different from the two others (in the top third of the image).\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"scipy.cluster.hierarchy.dendrogram",
"sklearn.datasets.load_iris",
"sklearn.preprocessing.normalize",
"scipy.cluster.hierarchy.linkage",
"sklearn.metrics.adjusted_rand_score",
"sklearn.cluster.AgglomerativeClustering",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
aassumpcao/tseresearch | [
"8c46a81fddee1f2a18b35a28a32dfe0a1f294750"
] | [
"scripts/10_tse_sentence_classification.py"
] | [
"### electoral crime and performance paper\n# judicial decisions script\n# this script uses the trained models to predict sentence categories. i\n# use the textual info in the sentences to determine the (class)\n# allegations against individual candidates running for office.\n# author: andre assumpcao\n# by [email protected]\n\n# import statements\nfrom sklearn.svm import LinearSVC\nfrom sklearn.externals import joblib\nimport pickle, csv\nimport pandas as pd\nimport scipy.sparse as sparse\n\n# define function to load the data\ndef load_tse():\n kwargs = {'index_col': False, 'encoding': 'utf-8'}\n df = pd.read_csv('data/tsePredictions.csv', **kwargs)\n df['classID'] = df['class'].factorize()[0]\n df = df.sort_values('classID').reset_index(drop = True)\n return df\n\n# define function to save candidateIDs\ndef save_candidateID(df):\n split = len(df[df['classID'] == -1])\n return (\n df.loc[split:, 'candidateID'].reset_index(drop = True),\n df.loc[:(split-1), 'candidateID'].reset_index(drop = True)\n )\n\n# define function to split validation and classification samples\ndef split_labels_tse(df):\n split = len(df[df['classID'] == -1])\n return (\n df.loc[split:, 'classID'].reset_index(drop = True),\n df.loc[:(split-1), 'classID'].reset_index(drop = True)\n )\n\n# define function to load features into python\ndef load_features():\n features_cv = sparse.load_npz('data/features_tfidf_cv.npz').toarray()\n features_pr = sparse.load_npz('data/features_tfidf_pr.npz').toarray()\n return features_cv, features_pr\n\n# define main program block\ndef main():\n\n # load dataset and split labels for validation and classification\n tse = load_tse()\n\n # load features for validation and classification\n labels_cv, labels_pr = split_labels_tse(tse)\n\n # save candidateIDs\n id_cv, id_pr = save_candidateID(tse)\n\n # load features for validation and classification\n features_cv, features_pr = load_features()\n\n # load linear SVC model\n model = joblib.load('data/LinearSVC.pkl')\n\n # predict classes\n y_pred = model.predict(features_pr)\n\n # check dimensions of all prediction files\n len(labels_pr) == len(id_pr) == len(features_pr) == len(y_pred)\n\n # create new datasets with observed and predicted classes\n tseObserved = pd.DataFrame({'class': labels_cv, 'candidateID': id_cv})\n tsePredicted = pd.DataFrame({'class': y_pred, 'candidateID': id_pr})\n\n # create new dataset with the class probability from dnn model\n tseClasses = pd.concat([tseObserved, tsePredicted], ignore_index = True)\n\n # save to file\n kwargs = {'index': False, 'quoting': csv.QUOTE_ALL}\n tseClasses.to_csv('data/tseClasses.csv', **kwargs)\n\n# define main program block\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"scipy.sparse.load_npz",
"pandas.DataFrame",
"sklearn.externals.joblib.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
tylertownsend/bingo | [
"0aeebe03df71a632f833c56ceb9c697dddbe78fc",
"0aeebe03df71a632f833c56ceb9c697dddbe78fc"
] | [
"tests/Base/test_continuous_local_optimization.py",
"tests/island_benchmarks.py"
] | [
"# Ignoring some linting rules in tests\n# pylint: disable=redefined-outer-name\n# pylint: disable=missing-docstring\nimport pytest\nimport numpy as np\n\nfrom bingo.Base.FitnessFunction import FitnessFunction, VectorBasedFunction\nfrom bingo.Base.ContinuousLocalOptimization import ContinuousLocalOptimization\nfrom bingo.Base.MultipleFloats import MultipleFloatChromosome\n\nNUM_VALS = 10\nNUM_OPT = 3\n\n\nclass MultipleFloatValueFitnessFunction(FitnessFunction):\n def __call__(self, individual):\n print(individual)\n return np.linalg.norm(individual.values)\n\n\nclass FloatVectorFitnessFunction(VectorBasedFunction):\n def _evaluate_fitness_vector(self, individual):\n vals = individual.values\n return [x - 0 for x in vals]\n\n\[email protected]\ndef opt_individual():\n vals = [1. for _ in range(NUM_VALS)]\n return MultipleFloatChromosome(vals, [1, 3, 4])\n\n\[email protected]\ndef reg_individual():\n vals = [1. for _ in range(NUM_VALS)]\n return MultipleFloatChromosome(vals)\n\n\[email protected](\"algorithm\", [\n 'Nelder-Mead',\n 'Powell',\n 'CG',\n 'BFGS',\n # 'Newton-CG',\n 'L-BFGS-B',\n # 'TNC',\n # 'COBYLA',\n 'SLSQP'\n # 'trust-constr'\n # 'dogleg',\n # 'trust-ncg',\n # 'trust-exact',\n # 'trust-krylov'\n])\ndef test_optimize_params(opt_individual, reg_individual, algorithm):\n fitness_function = MultipleFloatValueFitnessFunction()\n local_opt_fitness_function = ContinuousLocalOptimization(\n fitness_function, algorithm)\n opt_indv_fitness = local_opt_fitness_function(opt_individual)\n reg_indv_fitness = local_opt_fitness_function(reg_individual)\n assert opt_indv_fitness == pytest.approx(np.sqrt(NUM_VALS - NUM_OPT))\n assert reg_indv_fitness == pytest.approx(np.sqrt(NUM_VALS))\n\n\[email protected](\"algorithm\", [\n # 'hybr',\n 'lm'\n # 'broyden1',\n # 'broyden2',\n # 'anderson',\n # 'linearmixing',\n # 'diagbroyden',\n # 'excitingmixing',\n # 'krylov',\n # 'df-sane'\n])\ndef test_optimize_fitness_vector(opt_individual, reg_individual, algorithm):\n reg_list = [1. for _ in range(NUM_VALS)]\n opt_list = [1. for _ in range(NUM_VALS)]\n opt_list[:3] = [0., 0., 0.]\n fitness_function = FloatVectorFitnessFunction()\n local_opt_fitness_function = ContinuousLocalOptimization(\n fitness_function, algorithm)\n opt_indv_fitness = local_opt_fitness_function(opt_individual)\n reg_indv_fitness = local_opt_fitness_function(reg_individual)\n assert opt_indv_fitness == pytest.approx(np.mean(opt_list))\n assert reg_indv_fitness == pytest.approx(np.mean(reg_list))\n\n\ndef test_valid_fitness_function():\n fitness_function = MultipleFloatValueFitnessFunction()\n with pytest.raises(TypeError):\n ContinuousLocalOptimization(fitness_function, algorithm='lm')\n\n\ndef test_not_valid_algorithm():\n fitness_function = MultipleFloatValueFitnessFunction()\n with pytest.raises(KeyError):\n ContinuousLocalOptimization(fitness_function,\n algorithm='Dwayne - The Rock - Johnson')\n\n\ndef test_get_eval_count_pass_through():\n fitness_function = MultipleFloatValueFitnessFunction()\n fitness_function.eval_count = 123\n local_opt_fitness_function = \\\n ContinuousLocalOptimization(fitness_function, \"Powell\")\n assert local_opt_fitness_function.eval_count == 123\n\n\ndef test_set_eval_count_pass_through():\n fitness_function = MultipleFloatValueFitnessFunction()\n local_opt_fitness_function = \\\n ContinuousLocalOptimization(fitness_function, \"Powell\")\n local_opt_fitness_function.eval_count = 123\n assert fitness_function.eval_count == 123\n\n\ndef test_get_training_data_pass_through():\n fitness_function = MultipleFloatValueFitnessFunction()\n fitness_function.training_data = 123\n local_opt_fitness_function = \\\n ContinuousLocalOptimization(fitness_function, \"Powell\")\n assert local_opt_fitness_function.training_data == 123\n\n\ndef test_set_training_data_pass_through():\n fitness_function = MultipleFloatValueFitnessFunction()\n local_opt_fitness_function = \\\n ContinuousLocalOptimization(fitness_function, \"Powell\")\n local_opt_fitness_function.training_data = 123\n assert fitness_function.training_data == 123\n",
"import timeit\n\nimport numpy as np\n\nfrom bingo.SymbolicRegression.AGraph.AGraphCrossover import AGraphCrossover\nfrom bingo.SymbolicRegression.AGraph.AGraphMutation import AGraphMutation\nfrom bingo.SymbolicRegression.AGraph.AGraphGenerator import AGraphGenerator\nfrom bingo.SymbolicRegression.AGraph.ComponentGenerator \\\n import ComponentGenerator\nfrom bingo.SymbolicRegression.ExplicitRegression import ExplicitRegression, \\\n ExplicitTrainingData\nfrom bingo.Base.AgeFitnessEA import AgeFitnessEA\nfrom bingo.Base.Evaluation import Evaluation\nfrom bingo.Base.Island import Island\nfrom bingo.Base.ContinuousLocalOptimization import ContinuousLocalOptimization\nfrom performance_benchmarks import StatsPrinter\n\nPOP_SIZE = 128\nSTACK_SIZE = 64\nMUTATION_PROBABILITY = 0.4\nCROSSOVER_PROBABILITY = 0.4\nNUM_POINTS = 100\nSTART = -10\nSTOP = 10\nERROR_TOLERANCE = 10e-9\nSEED = 20\n\ndef init_x_vals(start, stop, num_points):\n return np.linspace(start, stop, num_points).reshape([-1, 1])\n\ndef equation_eval(x):\n return x**2 + 3.5*x**3\n\ndef init_island():\n np.random.seed(15)\n x = init_x_vals(START, STOP, NUM_POINTS)\n y = equation_eval(x)\n training_data = ExplicitTrainingData(x, y)\n\n component_generator = ComponentGenerator(x.shape[1])\n component_generator.add_operator(2)\n component_generator.add_operator(3)\n component_generator.add_operator(4)\n\n crossover = AGraphCrossover()\n mutation = AGraphMutation(component_generator)\n\n agraph_generator = AGraphGenerator(STACK_SIZE, component_generator)\n\n fitness = ExplicitRegression(training_data=training_data)\n local_opt_fitness = ContinuousLocalOptimization(fitness, algorithm='lm')\n evaluator = Evaluation(local_opt_fitness)\n\n ea_algorithm = AgeFitnessEA(evaluator, agraph_generator, crossover,\n mutation, MUTATION_PROBABILITY,\n CROSSOVER_PROBABILITY, POP_SIZE)\n\n island = Island(ea_algorithm, agraph_generator, POP_SIZE)\n return island\n\nTEST_ISLAND = init_island()\n\nclass IslandStatsPrinter(StatsPrinter):\n def __init__(self):\n super().__init__()\n self._output = [\"-\"*24+\":::: REGRESSION BENCHMARKS ::::\" + \"-\"*23,\n self._header_format_string.format(\"NAME\", \"MEAN\",\n \"STD\", \"MIN\", \"MAX\"),\n \"-\"*78]\n\ndef explicit_regression_benchmark():\n island = init_island()\n while island.best_individual().fitness > ERROR_TOLERANCE:\n island.execute_generational_step()\n\ndef do_benchmarking():\n printer = IslandStatsPrinter()\n printer.add_stats(\"Explicit Regression\",\n timeit.repeat(explicit_regression_benchmark,\n number=10,\n repeat=10))\n printer.print()\n\nif __name__ == \"__main__\":\n do_benchmarking()"
] | [
[
"numpy.sqrt",
"numpy.mean",
"numpy.linalg.norm"
],
[
"numpy.random.seed",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ruizca/astromatch | [
"14fa56768149d0d292b939248560210c17d6d3b1"
] | [
"astromatch/priors.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nastromatch module for calculation of magnitude priors.\n\n@author: A.Ruiz\n\"\"\"\nimport os\nimport warnings\n\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage import convolve\n\n\nfrom .catalogues import Catalogue\n\n\nclass Prior(object):\n \"\"\"\n Class for probability priors.\n \"\"\"\n\n def __init__(\n self,\n pcat=None,\n scat=None,\n rndcat=True,\n radius=5*u.arcsec,\n magmin='auto',\n magmax='auto',\n magbinsize='auto',\n match_mags=None,\n prior_dict=None\n ):\n \"\"\"\n Estimates the prior probability distribution for a source in the\n primary catalogue `pcat` having a counterpart in the secondary\n catalogue `scat` with magnitude m.\n\n The a priori probability is determined as follows. First, we estimate\n the magnitude distribution of the spurious matches and it is scaled\n to the area within which we search for counterparts. This is then\n subtracted from the magnitude distribution of all counterparts in the\n secondary catalogue to determine the magnitude distribution of the\n true associations.\n\n Parameters\n ----------\n pcat, scat : ``Catalogue``\n rndcat : `boolean`, optional\n We implemented two methods for estimating the magnitude distribution\n of spurious matches: If 'rndcat' is ``False``, it removes all sources\n in the secondary catalogue within one arcmin of the positions of the\n primary sources. The magnitude distribution of the remaining sources,\n divided by the remaining catalogue area, corresponds to the\n probability distribution of a spurious match per magnitude and per\n square degree.\n If 'rndcat' is ``True``, it generates a catalogue of random\n positions away from the primary sources and searchs for all available\n counterparts in the secondary catalogue. The magnitude distribution\n of these sources corresponds to the probability distribution of a\n spurious match.\n radius : Astropy ``Quantity``, optional\n Distance limit used for searching counterparts in the secondary\n catalogue in angular units. Default to 5 arcsec.\n magmin : `float` or 'auto', optional\n Lower magnitude limit when estimating magnitude distributions.\n Default to 'auto'.\n magmax : `float` or 'auto', optional\n Upper magnitude limit when estimating magnitude distributions.\n Default to 'auto'.\n magbinsize : `float` or 'auto', optional\n Magnitude bin width when estimating magnitude distributions.\n Default to 'auto'.\n \"\"\"\n if prior_dict is None:\n self._from_catalogues(pcat, scat, match_mags, rndcat,\n radius, magmin, magmax, magbinsize)\n else:\n self.prior_dict = prior_dict\n self.rndcat = None\n\n def _from_catalogues(self, pcat, scat, match_mags, rndcat,\n radius, magmin, magmax, magbinsize):\n if None in [pcat, scat]:\n raise ValueError('Two Catalogues must be passed!')\n\n if rndcat is True:\n self.rndcat = pcat.randomise()\n\n elif isinstance(rndcat, Catalogue):\n self.rndcat = rndcat\n\n else:\n message = 'Using mask method for the prior calculation.'\n warnings.warn(message, AstropyUserWarning)\n self.rndcat = None\n\n if match_mags is None:\n match_mags = self._get_match_mags(pcat, scat, radius)\n\n self.prior_dict = self._calc_prior_dict(\n pcat, scat, radius, match_mags, magmin, magmax, magbinsize\n )\n\n @property\n def magnames(self):\n return list(self.prior_dict.keys())\n\n @classmethod\n def from_nway_hists(cls, cat, renorm_factors, path='.'):\n \"\"\"\n Create a ``Prior`` object using nway histogram files.\n \"\"\"\n prior_dict = {}\n for mag in cat.mags.colnames:\n filename = '{}_{}_fit.txt'.format(cat.name, mag)\n filename = os.path.join(path, filename)\n prior_dict[mag] = cls._from_nway_maghist(filename, renorm_factors[mag])\n\n return cls(prior_dict=prior_dict)\n\n @classmethod\n def from_table(cls, priors_table, magnames):\n \"\"\"\n Create a ``Prior`` object using an Astropy Table.\n\n Parameters\n ----------\n priors_table : `str` or ``Table``\n Astropy table with the prior values or, alternatively,\n a file path containing a table in a format readable by Astropy.\n Note: If the table does not include priors for \"field\" sources,\n they are set to zero.\n magnames : `list`\n Magnitude names.\n \"\"\"\n if not isinstance(priors_table, Table):\n priors_table = Table.read(priors_table)\n\n # TODO: how to do this when magbinsize is 'auto'\n bins = cls._midvals_to_bins(priors_table['MAG'])\n\n prior_dict = {}\n for mag in magnames:\n maghist = {}\n maghist['bins'] = bins\n maghist['target'] = priors_table['PRIOR_' + mag].data\n\n try:\n maghist['field'] = priors_table['PRIOR_BKG_' + mag].data\n except KeyError:\n maghist['field'] = np.zeros_like(bins)\n\n message = 'Field prior for {} set to zero.'.format(mag)\n warnings.warn(message, AstropyUserWarning)\n\n prior_dict[mag] = maghist\n\n return cls(prior_dict=prior_dict)\n\n def to_nway_hists(self, output_path=None):\n \"\"\"\n Returns a dictionary with the prior histograms in\n a format compatible with nway. If `output_path` is not ``None``,\n a text file is created with a formatting compatible with nway.\n \"\"\"\n nhists = []\n for magcol in self.magnames:\n if output_path is not None:\n filename = '{}_{}_fit.txt'.format(self.scat.name, magcol)\n filename = os.path.join(output_path, filename)\n else:\n filename = None\n\n maghist = self._to_nway_maghist(self.prior_dict[magcol], filename)\n nhists.append(maghist)\n\n return nhists\n\n def interp(self, mags, magcol):\n \"\"\"\n Return the prior at magnitude values `mags` for magnitude `magcol`.\n\n Parameters\n ----------\n \"\"\"\n if magcol not in self.prior_dict:\n raise ValueError('Unknown magcol: {}'.format(magcol))\n\n bins = self.bins_midvals(magcol)\n prior = self.prior_dict[magcol]\n\n itp = interp1d(\n bins, prior['target'], kind='nearest', fill_value=0, bounds_error=False\n )\n pvals = itp(mags)\n\n return pvals\n\n def qcap(self, magcol):\n \"\"\"\n Overall identification ratio for magnitude `magcol`\n between the two catalogues used to build the prior.\n \"\"\"\n if magcol not in self.prior_dict:\n raise ValueError('Unknown magcol: {}'.format(magcol))\n\n prior = self.prior_dict[magcol]\n\n # Whatch out prior is dN/dm,\n # i.e. I have divided by dm so it is probability density and\n # Sum(dN/dm*dm)=Q ie the overall identification ratio (not 1)\n return np.sum(prior['target'] * np.diff(prior['bins']))\n\n def bins_midvals(self, magcol):\n if magcol not in self.prior_dict:\n raise ValueError('Unknown magcol: {}'.format(magcol))\n\n edges = self.prior_dict[magcol]['bins']\n\n return (edges[1:] + edges[:-1])/2\n\n def to_table(self, include_bkg_priors=False):\n \"\"\"\n Dump prior data into an Astropy Table.\n \"\"\"\n # TODO: how to build this table when magbinsize is 'auto'\n priors_table = Table()\n priors_table['MAG'] = self.bins_midvals(self.magnames[0])\n for mag in self.magnames:\n priors_table['PRIOR_' + mag] = self.prior_dict[mag]['target']\n\n if include_bkg_priors:\n priors_table['PRIOR_BKG_' + mag] = self.prior_dict[mag]['field']\n\n return priors_table\n\n def plot(self, magname, filename=None):\n \"\"\"\n Plot priors for magnitude `magname`.\n\n Parameters\n \"\"\"\n import matplotlib.pyplot as plt\n\n mbins = self.bins_midvals(magname)\n prior = self.prior_dict[magname]\n\n plt.plot(mbins, prior['target'])\n plt.plot(mbins, prior['field'])\n plt.title(magname)\n\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename)\n plt.close()\n\n\n def _get_match_mags(self, pcat, scat, radius):\n _, idx_near, _, _ = scat.coords.search_around_sky(pcat.coords, radius)\n\n return scat.mags[idx_near]\n\n def _calc_prior_dict(\n self,\n pcat,\n scat,\n radius,\n match_mags,\n magmin,\n magmax,\n magbinsize,\n mask_radius=1*u.arcmin\n ):\n if self.rndcat is None:\n field_cat = self._field_sources(pcat, scat, mask_radius)\n else:\n field_cat = self._random_sources(scat, radius)\n\n renorm_factor = len(pcat) * np.pi*radius**2 / field_cat.area # area_match / area field\n\n prior_dict = {}\n for magcol in match_mags.colnames:\n target_mags = match_mags[magcol]\n field_mags = field_cat.mags[magcol]\n prior_dict[magcol] = self._mag_hist(\n len(pcat), target_mags, field_mags, renorm_factor, magmin, magmax, magbinsize\n )\n return prior_dict\n\n def _field_sources(self, pcat, scat, mask_radius):\n # Find sources within the mask_radius\n pcoords = pcat.coords\n scoords = scat.coords\n _, idx_near, _, _ = scoords.search_around_sky(pcoords, mask_radius)\n\n # Select all sources but those within the mask_radius\n idx_all = range(len(scat))\n idx_far = list(set(idx_all) - set(idx_near))\n field_cat = scat[idx_far]\n\n # Area covered by the new catalogue\n field_cat.area = scat.area - len(pcat)*np.pi*mask_radius**2\n field_cat.moc = None\n\n return field_cat\n\n def _random_sources(self, scat, radius):\n assert self.rndcat is not None\n\n # Select sources from the secondary catalogue within radius of random sources\n pcoords = self.rndcat.coords\n scoords = scat.coords\n _, sidx, _, _ = scoords.search_around_sky(pcoords, radius)\n rnd_scat = scat[sidx]\n\n # Area covered by the new catalogue\n rnd_scat.area = len(self.rndcat)*np.pi*radius**2\n rnd_scat.moc = None\n\n return rnd_scat\n\n def _mag_hist(\n self,\n pcat_nsources,\n target_mags,\n field_mags,\n renorm_factor,\n magmin,\n magmax,\n magbinsize\n ):\n if magmin == 'auto':\n magmin = np.nanmin(target_mags)\n\n if magmax == 'auto':\n magmax = np.nanmax(target_mags)\n\n bins, magrange = _define_magbins(magmin, magmax, magbinsize)\n target_counts, bins = np.histogram(target_mags, range=magrange, bins=bins)\n field_counts, _ = np.histogram(field_mags, range=magrange, bins=bins)\n magbinsize = np.diff(bins)\n\n target_prior = target_counts - field_counts * renorm_factor\n target_prior[target_prior < 0] = 0.0\n # TODO: calculate general values for the convolution parameters\n # (magbinsize dependent)\n target_prior = convolve(target_prior, [0.25, 0.5, 0.25])\n # target_prior = convolve(target_prior, [magbinsize[0]/2., magbinsize[0], magbinsize[0]/2.])\n\n # renormalise here to 0.999 in case\n # prior sums to a value above unit\n # Not unit because then zeros in Reliability\n # estimation, i.e. (1-QCAP) term\n# test = target_prior.sum() / len(self.pcat)\n# if test > 1:\n# target_prior = 0.999 * target_prior / test\n\n maghist = {\n 'bins': bins,\n 'target': target_prior / pcat_nsources / magbinsize,\n 'field': 1.0*field_counts / len(field_mags) / magbinsize,\n }\n\n return maghist\n\n @staticmethod\n def _to_nway_maghist(maghist, filename=None):\n nrows = maghist['target'].size\n\n hist_data = np.zeros((nrows, 4))\n hist_data[:, 0] = maghist['bins'][:-1]\n hist_data[:, 1] = maghist['bins'][1:]\n hist_data[:, 2] = (\n maghist['target'] / np.sum(maghist['target'] * np.diff(maghist['bins']))\n )\n hist_data[:, 3] = maghist['field']\n\n if filename is not None:\n header = '{}\\nlo hi selected others'.format(filename)\n np.savetxt(filename, hist_data, fmt='%10.5f', header=header)\n\n return [row for row in hist_data.T]\n\n @staticmethod\n def _from_nway_maghist(filename, renorm_factor):\n hist_data = Table.read(filename, format='ascii')\n\n maghist = {\n 'bins': np.concatenate((hist_data['lo'], [hist_data['hi'][-1]])),\n 'target': renorm_factor * hist_data['selected'].data,\n 'field': hist_data['others'].data,\n }\n\n return maghist\n\n # @staticmethod\n # def _midvals_to_bins(midvals):\n # dbins = np.diff(midvals) / 2\n # bins_lo = set(midvals[:-1] - dbins)\n # bins_hi = set(midvals[1:] + dbins)\n # bins = np.array(list(bins_lo.union(bins_hi)))\n # bins.sort()\n\n # return bins\n\n @staticmethod\n def _midvals_to_bins(midvals):\n dbins = np.diff(midvals) / 2\n bins = np.zeros_like(midvals)\n bins[1:] = midvals[1:] + dbins\n bins[0] = midvals[0] - dbins[0]\n\n return bins\n\n\nclass BKGpdf(object):\n\n def __init__(self, cat, magmin='auto', magmax='auto', magbinsize='auto'):\n \"\"\"\n Magnitude probability distribution of sources in ``Catalogue`` 'cat'.\n\n Parameters\n ----------\n cat : ``Catalogue``\n ``Catalogue`` object.\n magmin : `float` or 'auto', optional\n Lower magnitude limit when estimating magnitude distributions.\n Default to 'auto'.\n magmax : `float` or 'auto', optional\n Upper magnitude limit when estimating magnitude distributions.\n Default to 'auto'.\n magbinsize : `float` or 'auto', optional\n Magnitude bin width when estimating magnitude distributions.\n Default to 'auto'.\n\n Return\n ------\n bkg : Astropy ``Table``\n Table with the background probability distribution for each\n available magnitude in the secondary catalogue.\n \"\"\"\n if cat.mags is None:\n raise ValueError('No magnitudes defined in the catalogue!')\n\n #self.magnames = self._set_magnames(cat)\n self.pdf_dict = self._calc_pdf(cat, magmin, magmax, magbinsize)\n\n @property\n def magnames(self):\n return list(self.pdf_dict.keys())\n\n def bins_midvals(self, magcol):\n edges = self.pdf_dict[magcol]['bins']\n\n return (edges[1:] + edges[:-1])/2\n\n def interp(self, mags, magcol):\n assert magcol in self.pdf_dict\n\n bins = self.bins_midvals(magcol)\n pdf = self.pdf_dict[magcol]['pdf']\n itp = interp1d(\n bins, pdf, kind='nearest', fill_value=np.inf, bounds_error=False\n )\n # We use inf as fill_value because these results are mostly used\n # as divisor (e.g. LR method), this way we avoid dividing by zero.\n\n return itp(mags)\n\n def to_table(self):\n # TODO: how to build this table when magbinsize is 'auto'\n pdf_table = Table()\n pdf_table['MAG'] = self.bins_midvals(self.magnames[0])\n for mag in self.magnames:\n pdf_table['BKG_' + mag] = self.pdf_dict[mag]['pdf']\n\n return pdf_table\n\n def _set_magnames(self, cat):\n return cat.mags.colnames\n\n def _calc_pdf(self, cat, magmin, magmax, magbinsize):\n mags = cat.mags\n area = cat.area.to(u.arcsec**2)\n\n pdf_dict = {}\n for magcol in mags.colnames:\n pdf_dict[magcol] = self._mag_hist(\n mags[magcol], area, magmin, magmax, magbinsize\n )\n\n return pdf_dict\n\n def _mag_hist(self, mags, area, magmin, magmax, magbinsize):\n\n if magmin == 'auto':\n magmin = np.nanmin(mags)\n\n if magmax == 'auto':\n magmax = np.nanmax(mags)\n\n bins, magrange = _define_magbins(magmin, magmax, magbinsize)\n counts, bins = np.histogram(mags, range=magrange, bins=bins)\n magbinsize = np.diff(bins)\n\n maghist = {}\n maghist['bins'] = bins\n maghist['pdf'] = counts / magbinsize / area ## in arcsec**2!!!\n\n return maghist\n\n\ndef _define_magbins(magmin, magmax, magbinsize):\n if magbinsize == 'auto':\n bins = 'auto'\n else:\n nbins = 1 + int((magmax - magmin)/magbinsize)\n bins = np.linspace(magmin, magmax, num=nbins)\n\n limits = (magmin, magmax)\n\n return bins, limits"
] | [
[
"numpy.nanmax",
"matplotlib.pyplot.title",
"numpy.linspace",
"numpy.nanmin",
"scipy.ndimage.convolve",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.savetxt",
"matplotlib.pyplot.show",
"numpy.histogram",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
predictive-analytics-lab/pgmpy | [
"6c2a31641adc72793acd130d007190fdb1632271",
"6c2a31641adc72793acd130d007190fdb1632271",
"6c2a31641adc72793acd130d007190fdb1632271"
] | [
"pgmpy/factors/discrete/JointProbabilityDistribution.py",
"pgmpy/estimators/CITests.py",
"pgmpy/factors/continuous/discretize.py"
] | [
"import itertools\nfrom operator import mul\n\nimport numpy as np\n\nfrom pgmpy.factors.discrete import DiscreteFactor\nfrom pgmpy.independencies import Independencies\nfrom pgmpy.extern.six.moves import range, zip\nfrom pgmpy.extern import six\n\n\nclass JointProbabilityDistribution(DiscreteFactor):\n \"\"\"\n Base class for Joint Probability Distribution\n\n Public Methods\n --------------\n conditional_distribution(values)\n create_bayesian_model()\n get_independencies()\n pmap()\n marginal_distribution(variables)\n minimal_imap()\n is_imap(model)\n \"\"\"\n\n def __init__(self, variables, cardinality, values):\n \"\"\"\n Initialize a Joint Probability Distribution class.\n\n Defined above, we have the following mapping from variable\n assignments to the index of the row vector in the value field:\n\n +-----+-----+-----+-------------------------+\n | x1 | x2 | x3 | P(x1, x2, x2) |\n +-----+-----+-----+-------------------------+\n | x1_0| x2_0| x3_0| P(x1_0, x2_0, x3_0) |\n +-----+-----+-----+-------------------------+\n | x1_1| x2_0| x3_0| P(x1_1, x2_0, x3_0) |\n +-----+-----+-----+-------------------------+\n | x1_0| x2_1| x3_0| P(x1_0, x2_1, x3_0) |\n +-----+-----+-----+-------------------------+\n | x1_1| x2_1| x3_0| P(x1_1, x2_1, x3_0) |\n +-----+-----+-----+-------------------------+\n | x1_0| x2_0| x3_1| P(x1_0, x2_0, x3_1) |\n +-----+-----+-----+-------------------------+\n | x1_1| x2_0| x3_1| P(x1_1, x2_0, x3_1) |\n +-----+-----+-----+-------------------------+\n | x1_0| x2_1| x3_1| P(x1_0, x2_1, x3_1) |\n +-----+-----+-----+-------------------------+\n | x1_1| x2_1| x3_1| P(x1_1, x2_1, x3_1) |\n +-----+-----+-----+-------------------------+\n\n Parameters\n ----------\n variables: list\n List of scope of Joint Probability Distribution.\n cardinality: list, array_like\n List of cardinality of each variable\n value: list, array_like\n List or array of values of factor.\n A Joint Probability Distribution's values are stored in a row\n vector in the value using an ordering such that the left-most\n variables as defined in the variable field cycle through their\n values the fastest.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)\n >>> print(prob)\n x1 x2 x3 P(x1,x2,x3)\n ---- ---- ---- -------------\n x1_0 x2_0 x3_0 0.1250\n x1_0 x2_0 x3_1 0.1250\n x1_0 x2_1 x3_0 0.1250\n x1_0 x2_1 x3_1 0.1250\n x1_1 x2_0 x3_0 0.1250\n x1_1 x2_0 x3_1 0.1250\n x1_1 x2_1 x3_0 0.1250\n x1_1 x2_1 x3_1 0.1250\n \"\"\"\n if np.isclose(np.sum(values), 1):\n super(JointProbabilityDistribution, self).__init__(variables, cardinality, values)\n else:\n raise ValueError(\"The probability values doesn't sum to 1.\")\n\n def __repr__(self):\n var_card = \", \".join(\n [\n \"{var}:{card}\".format(var=var, card=card)\n for var, card in zip(self.variables, self.cardinality)\n ]\n )\n return \"<Joint Distribution representing P({var_card}) at {address}>\".format(\n address=hex(id(self)), var_card=var_card\n )\n\n def __str__(self):\n if six.PY2:\n return self._str(phi_or_p=\"P\", tablefmt=\"pqsl\")\n else:\n return self._str(phi_or_p=\"P\")\n\n def marginal_distribution(self, variables, inplace=True):\n \"\"\"\n Returns the marginal distribution over variables.\n\n Parameters\n ----------\n variables: string, list, tuple, set, dict\n Variable or list of variables over which marginal distribution needs\n to be calculated\n inplace: Boolean (default True)\n If False return a new instance of JointProbabilityDistribution\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> values = np.random.rand(12)\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))\n >>> prob.marginal_distribution(['x1', 'x2'])\n >>> print(prob)\n x1 x2 P(x1,x2)\n ---- ---- ----------\n x1_0 x2_0 0.1502\n x1_0 x2_1 0.1626\n x1_0 x2_2 0.1197\n x1_1 x2_0 0.2339\n x1_1 x2_1 0.1996\n x1_1 x2_2 0.1340\n \"\"\"\n return self.marginalize(\n list(\n set(list(self.variables))\n - set(variables if isinstance(variables, (list, set, dict, tuple)) else [variables])\n ),\n inplace=inplace,\n )\n\n def check_independence(self, event1, event2, event3=None, condition_random_variable=False):\n \"\"\"\n Check if the Joint Probability Distribution satisfies the given independence condition.\n\n Parameters\n ----------\n event1: list\n random variable whose independence is to be checked.\n event2: list\n random variable from which event1 is independent.\n values: 2D array or list like or 1D array or list like\n A 2D list of tuples of the form (variable_name, variable_state).\n A 1D list or array-like to condition over randome variables (condition_random_variable must be True)\n The values on which to condition the Joint Probability Distribution.\n condition_random_variable: Boolean (Default false)\n If true and event3 is not None than will check independence condition over random variable.\n\n For random variables say X, Y, Z to check if X is independent of Y given Z.\n event1 should be either X or Y.\n event2 should be either Y or X.\n event3 should Z.\n\n Examples\n --------\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution as JPD\n >>> prob = JPD(['I','D','G'],[2,2,3],\n [0.126,0.168,0.126,0.009,0.045,0.126,0.252,0.0224,0.0056,0.06,0.036,0.024])\n >>> prob.check_independence(['I'], ['D'])\n True\n >>> prob.check_independence(['I'], ['D'], [('G', 1)]) # Conditioning over G_1\n False\n >>> # Conditioning over random variable G\n >>> prob.check_independence(['I'], ['D'], ('G',), condition_random_variable=True)\n False\n \"\"\"\n JPD = self.copy()\n if isinstance(event1, six.string_types):\n raise TypeError(\"Event 1 should be a list or array-like structure\")\n\n if isinstance(event2, six.string_types):\n raise TypeError(\"Event 2 should be a list or array-like structure\")\n\n if event3:\n if isinstance(event3, six.string_types):\n raise TypeError(\"Event 3 cannot of type string\")\n\n elif condition_random_variable:\n if not all(isinstance(var, six.string_types) for var in event3):\n raise TypeError(\"Event3 should be a 1d list of strings\")\n event3 = list(event3)\n # Using the definition of conditional independence\n # If P(X,Y|Z) = P(X|Z)*P(Y|Z)\n # This can be expanded to P(X,Y,Z)*P(Z) == P(X,Z)*P(Y,Z)\n phi_z = JPD.marginal_distribution(event3, inplace=False).to_factor()\n for variable_pair in itertools.product(event1, event2):\n phi_xyz = JPD.marginal_distribution(\n event3 + list(variable_pair), inplace=False\n ).to_factor()\n phi_xz = JPD.marginal_distribution(\n event3 + [variable_pair[0]], inplace=False\n ).to_factor()\n phi_yz = JPD.marginal_distribution(\n event3 + [variable_pair[1]], inplace=False\n ).to_factor()\n if phi_xyz * phi_z != phi_xz * phi_yz:\n return False\n return True\n else:\n JPD.conditional_distribution(event3)\n\n for variable_pair in itertools.product(event1, event2):\n if JPD.marginal_distribution(variable_pair, inplace=False) != JPD.marginal_distribution(\n variable_pair[0], inplace=False\n ) * JPD.marginal_distribution(variable_pair[1], inplace=False):\n return False\n return True\n\n def get_independencies(self, condition=None):\n \"\"\"\n Returns the independent variables in the joint probability distribution.\n Returns marginally independent variables if condition=None.\n Returns conditionally independent variables if condition!=None\n\n Parameter\n ---------\n condition: array_like\n Random Variable on which to condition the Joint Probability Distribution.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)\n >>> prob.get_independencies()\n (x1 _|_ x2)\n (x1 _|_ x3)\n (x2 _|_ x3)\n \"\"\"\n JPD = self.copy()\n if condition:\n JPD.conditional_distribution(condition)\n independencies = Independencies()\n for variable_pair in itertools.combinations(list(JPD.variables), 2):\n if JPD.marginal_distribution(variable_pair, inplace=False) == JPD.marginal_distribution(\n variable_pair[0], inplace=False\n ) * JPD.marginal_distribution(variable_pair[1], inplace=False):\n independencies.add_assertions(variable_pair)\n return independencies\n\n def conditional_distribution(self, values, inplace=True):\n \"\"\"\n Returns Conditional Probability Distribution after setting values to 1.\n\n Parameters\n ----------\n values: list or array_like\n A list of tuples of the form (variable_name, variable_state).\n The values on which to condition the Joint Probability Distribution.\n inplace: Boolean (default True)\n If False returns a new instance of JointProbabilityDistribution\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)\n >>> prob.conditional_distribution([('x1', 1)])\n >>> print(prob)\n x2 x3 P(x2,x3)\n ---- ---- ----------\n x2_0 x3_0 0.2500\n x2_0 x3_1 0.2500\n x2_1 x3_0 0.2500\n x2_1 x3_1 0.2500\n \"\"\"\n JPD = self if inplace else self.copy()\n JPD.reduce(values)\n JPD.normalize()\n if not inplace:\n return JPD\n\n def copy(self):\n \"\"\"\n Returns A copy of JointProbabilityDistribution object\n\n Examples\n ---------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)\n >>> prob_copy = prob.copy()\n >>> prob_copy.values == prob.values\n True\n >>> prob_copy.variables == prob.variables\n True\n >>> prob_copy.variables[1] = 'y'\n >>> prob_copy.variables == prob.variables\n False\n \"\"\"\n return JointProbabilityDistribution(self.scope(), self.cardinality, self.values)\n\n def minimal_imap(self, order):\n \"\"\"\n Returns a Bayesian Model which is minimal IMap of the Joint Probability Distribution\n considering the order of the variables.\n\n Parameters\n ----------\n order: array-like\n The order of the random variables.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)\n >>> bayesian_model = prob.minimal_imap(order=['x2', 'x1', 'x3'])\n >>> bayesian_model\n <pgmpy.models.models.models at 0x7fd7440a9320>\n >>> bayesian_model.edges()\n [('x1', 'x3'), ('x2', 'x3')]\n \"\"\"\n from pgmpy.models import BayesianModel\n\n def get_subsets(u):\n for r in range(len(u) + 1):\n for i in itertools.combinations(u, r):\n yield i\n\n G = BayesianModel()\n for variable_index in range(len(order)):\n u = order[:variable_index]\n for subset in get_subsets(u):\n if len(subset) < len(u) and self.check_independence(\n [order[variable_index]], set(u) - set(subset), subset, True\n ):\n G.add_edges_from([(variable, order[variable_index]) for variable in subset])\n return G\n\n def is_imap(self, model):\n \"\"\"\n Checks whether the given BayesianModel is Imap of JointProbabilityDistribution\n\n Parameters\n -----------\n model : An instance of BayesianModel Class, for which you want to\n check the Imap\n\n Returns\n --------\n boolean : True if given bayesian model is Imap for Joint Probability Distribution\n False otherwise\n Examples\n --------\n >>> from pgmpy.models import BayesianModel\n >>> from pgmpy.factors.discrete import TabularCPD\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')])\n >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])\n >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])\n >>> grade_cpd = TabularCPD('grade', 3,\n ... [[0.1,0.1,0.1,0.1,0.1,0.1],\n ... [0.1,0.1,0.1,0.1,0.1,0.1],\n ... [0.8,0.8,0.8,0.8,0.8,0.8]],\n ... evidence=['diff', 'intel'],\n ... evidence_card=[2, 3])\n >>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)\n >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,\n 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]\n >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)\n >>> JPD.is_imap(bm)\n True\n \"\"\"\n from pgmpy.models import BayesianModel\n\n if not isinstance(model, BayesianModel):\n raise TypeError(\"model must be an instance of BayesianModel\")\n factors = [cpd.to_factor() for cpd in model.get_cpds()]\n factor_prod = six.moves.reduce(mul, factors)\n JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values)\n if JPD_fact == factor_prod:\n return True\n else:\n return False\n\n def to_factor(self):\n \"\"\"\n Returns JointProbabilityDistribution as a DiscreteFactor object\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.discrete import JointProbabilityDistribution\n >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)\n >>> phi = prob.to_factor()\n >>> type(phi)\n pgmpy.factors.DiscreteFactor.DiscreteFactor\n \"\"\"\n return DiscreteFactor(self.variables, self.cardinality, self.values)\n\n def pmap(self):\n pass\n",
"from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import stats\n\n\ndef chi_square(X, Y, Z, data, **kwargs):\n \"\"\"\n Chi-square conditional independence test.\n Tests the null hypothesis that X is independent from Y given Zs.\n\n This is done by comparing the observed frequencies with the expected\n frequencies if X,Y were conditionally independent, using a chisquare\n deviance statistic. The expected frequencies given independence are\n `P(X,Y,Zs) = P(X|Zs)*P(Y|Zs)*P(Zs)`. The latter term can be computed\n as `P(X,Zs)*P(Y,Zs)/P(Zs).\n\n Parameters\n ----------\n X: int, string, hashable object\n A variable name contained in the data set\n Y: int, string, hashable object\n A variable name contained in the data set, different from X\n Zs: list of variable names\n A list of variable names contained in the data set, different from X and Y.\n This is the separating set that (potentially) makes X and Y independent.\n Default: []\n\n Returns\n -------\n chi2: float\n The chi2 test statistic.\n p_value: float\n The p_value, i.e. the probability of observing the computed chi2\n statistic (or an even higher value), given the null hypothesis\n that X _|_ Y | Zs.\n sufficient_data: bool\n A flag that indicates if the sample size is considered sufficient.\n As in [4], require at least 5 samples per parameter (on average).\n That is, the size of the data set must be greater than\n `5 * (c(X) - 1) * (c(Y) - 1) * prod([c(Z) for Z in Zs])`\n (c() denotes the variable cardinality).\n\n\n References\n ----------\n [1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009\n Section 18.2.2.3 (page 789)\n [2] Neapolitan, Learning Bayesian Networks, Section 10.3 (page 600ff)\n http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf\n [3] Chi-square test https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence\n [4] Tsamardinos et al., The max-min hill-climbing BN structure learning algorithm, 2005, Section 4\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from pgmpy.estimators import ConstraintBasedEstimator\n >>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))\n >>> data['E'] = data['A'] + data['B'] + data['C']\n >>> c = ConstraintBasedEstimator(data)\n >>> print(c.test_conditional_independence('A', 'C')) # independent\n (0.95035644482050263, 0.8132617142699442, True)\n >>> print(c.test_conditional_independence('A', 'B', 'D')) # independent\n (5.5227461320130899, 0.59644169242588885, True)\n >>> print(c.test_conditional_independence('A', 'B', ['D', 'E'])) # dependent\n (9192.5172226063387, 0.0, True)\n \"\"\"\n\n if isinstance(Z, (frozenset, list, set, tuple)):\n Z = list(Z)\n else:\n Z = [Z]\n\n state_names = kwargs[\"state_names\"]\n num_params = (\n (len(state_names[X]) - 1)\n * (len(state_names[Y]) - 1)\n * np.prod([len(state_names[z]) for z in Z])\n )\n sufficient_data = len(data) >= num_params * 5\n\n if not sufficient_data:\n warn(\n \"Insufficient data for testing {0} _|_ {1} | {2}. \".format(X, Y, Z)\n + \"At least {0} samples recommended, {1} present.\".format(5 * num_params, len(data))\n )\n\n # compute actual frequency/state_count table:\n # = P(X,Y,Zs)\n XYZ_state_counts = pd.crosstab(index=data[X], columns=[data[Y]] + [data[z] for z in Z])\n # reindex to add missing rows & columns (if some values don't appear in data)\n row_index = state_names[X]\n column_index = pd.MultiIndex.from_product(\n [state_names[Y]] + [state_names[z] for z in Z], names=[Y] + Z\n )\n if not isinstance(XYZ_state_counts.columns, pd.MultiIndex):\n XYZ_state_counts.columns = pd.MultiIndex.from_arrays([XYZ_state_counts.columns])\n XYZ_state_counts = XYZ_state_counts.reindex(index=row_index, columns=column_index).fillna(0)\n\n # compute the expected frequency/state_count table if X _|_ Y | Zs:\n # = P(X|Zs)*P(Y|Zs)*P(Zs) = P(X,Zs)*P(Y,Zs)/P(Zs)\n if Z:\n XZ_state_counts = XYZ_state_counts.sum(axis=1, level=Z) # marginalize out Y\n YZ_state_counts = XYZ_state_counts.sum().unstack(Z) # marginalize out X\n else:\n XZ_state_counts = XYZ_state_counts.sum(axis=1)\n YZ_state_counts = XYZ_state_counts.sum()\n Z_state_counts = YZ_state_counts.sum() # marginalize out both\n\n XYZ_expected = pd.DataFrame(index=XYZ_state_counts.index, columns=XYZ_state_counts.columns)\n for X_val in XYZ_expected.index:\n if Z:\n for Y_val in XYZ_expected.columns.levels[0]:\n XYZ_expected.loc[X_val, Y_val] = (\n XZ_state_counts.loc[X_val] * YZ_state_counts.loc[Y_val] / Z_state_counts\n ).values\n else:\n for Y_val in XYZ_expected.columns:\n XYZ_expected.loc[X_val, Y_val] = (\n XZ_state_counts.loc[X_val] * YZ_state_counts.loc[Y_val] / float(Z_state_counts)\n )\n\n observed = XYZ_state_counts.values.flatten()\n expected = XYZ_expected.fillna(0).values.flatten()\n # remove elements where the expected value is 0;\n # this also corrects the degrees of freedom for chisquare\n observed, expected = zip(*((o, e) for o, e in zip(observed, expected) if not e == 0))\n\n chi2, significance_level = stats.chisquare(observed, expected)\n\n return chi2, significance_level\n\n\ndef pearsonr(X, Y, Z, data):\n \"\"\"\n Computes Pearson correlation coefficient and p-value for testing non-correlation. Should be used\n only on continuous data. In case when :math:`Z != \\null` uses linear regression and computes pearson\n coefficient on residuals.\n\n Parameters\n ----------\n X: str\n The first variable for testing the independence condition X _|_ Y | Z\n\n Y: str\n The second variable for testing the independence condition X _|_ Y | Z\n\n Z: list/array-like\n A list of conditional variable for testing the condition X _|_ Y | Z\n\n data: pandas.DataFrame\n The dataset in which to test the indepenedence condition.\n\n Returns\n -------\n Pearson's correlation coefficient: float\n p-value: float\n\n References\n ----------\n [1] https://en.wikipedia.org/wiki/Pearson_correlation_coefficient\n [2] https://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression\n \"\"\"\n # Step 1: Test if the inputs are correct\n if not hasattr(Z, \"__iter__\"):\n raise ValueError(\"Variable Z. Expected type: iterable. Got type: {t}\".format(t=type(Z)))\n else:\n Z = list(Z)\n\n if not isinstance(data, pd.DataFrame):\n raise ValueError(\n \"Variable data. Expected type: pandas.DataFrame. Got type: {t}\".format(t=type(data))\n )\n\n # Step 2: If Z is empty compute a non-conditional test.\n if len(Z) == 0:\n return stats.pearsonr(data.loc[:, X], data.loc[:, Y])\n\n # Step 3: If Z is non-empty, use linear regression to compute residuals and test independence on it.\n else:\n X_coef = np.linalg.lstsq(data.loc[:, Z], data.loc[:, X], rcond=None)[0]\n Y_coef = np.linalg.lstsq(data.loc[:, Z], data.loc[:, Y], rcond=None)[0]\n\n residual_X = data.loc[:, X] - data.loc[:, Z].dot(X_coef)\n residual_Y = data.loc[:, Y] - data.loc[:, Z].dot(Y_coef)\n\n return stats.pearsonr(residual_X, residual_Y)\n",
"from __future__ import division\n\nfrom six import with_metaclass\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom scipy import integrate\n\n\nclass BaseDiscretizer(with_metaclass(ABCMeta)):\n \"\"\"\n Base class for the discretizer classes in pgmpy. The discretizer\n classes are used to discretize a continuous random variable\n distribution into discrete probability masses.\n\n Parameters\n ----------\n factor: A ContinuousNode or a ContinuousFactor object\n the continuous node or factor representing the distribution\n to be discretized.\n\n low, high: float\n the range over which the function will be discretized.\n\n cardinality: int\n the number of states required in the discretized output.\n\n Examples\n --------\n >>> from scipy.stats import norm\n >>> from pgmpy.factors.continuous import ContinuousNode\n >>> normal = ContinuousNode(norm(0, 1).pdf)\n >>> from pgmpy.discretize import BaseDiscretizer\n >>> class ChildDiscretizer(BaseDiscretizer):\n ... def get_discrete_values(self):\n ... pass\n >>> discretizer = ChildDiscretizer(normal, -3, 3, 10)\n >>> discretizer.factor\n <pgmpy.factors.continuous.ContinuousNode.ContinuousNode object at 0x04C98190>\n >>> discretizer.cardinality\n 10\n >>> discretizer.get_labels()\n ['x=-3.0', 'x=-2.4', 'x=-1.8', 'x=-1.2', 'x=-0.6', 'x=0.0', 'x=0.6', 'x=1.2', 'x=1.8', 'x=2.4']\n\n \"\"\"\n\n def __init__(self, factor, low, high, cardinality):\n self.factor = factor\n self.low = low\n self.high = high\n self.cardinality = cardinality\n\n @abstractmethod\n def get_discrete_values(self):\n \"\"\"\n This method implements the algorithm to discretize the given\n continuous distribution.\n\n It must be implemented by all the subclasses of BaseDiscretizer.\n\n Returns\n -------\n A list of discrete values or a DiscreteFactor object.\n \"\"\"\n pass\n\n def get_labels(self):\n \"\"\"\n Returns a list of strings representing the values about\n which the discretization method calculates the probabilty\n masses.\n\n Default value is the points -\n [low, low+step, low+2*step, ......... , high-step]\n unless the method is overridden by a subclass.\n\n Examples\n --------\n >>> from pgmpy.factors import ContinuousNode\n >>> from pgmpy.discretize import BaseDiscretizer\n >>> class ChildDiscretizer(BaseDiscretizer):\n ... def get_discrete_values(self):\n ... pass\n >>> from scipy.stats import norm\n >>> node = ContinuousNode(norm(0).pdf)\n >>> child = ChildDiscretizer(node, -5, 5, 20)\n >>> chld.get_labels()\n ['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',\n 'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',\n 'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5']\n\n \"\"\"\n step = (self.high - self.low) / self.cardinality\n labels = [\n \"x={i}\".format(i=str(i)) for i in np.round(np.arange(self.low, self.high, step), 3)\n ]\n return labels\n\n\nclass RoundingDiscretizer(BaseDiscretizer):\n \"\"\"\n This class uses the rounding method for discretizing the\n given continuous distribution.\n\n For the rounding method,\n\n The probability mass is,\n cdf(x+step/2)-cdf(x), for x = low\n\n cdf(x+step/2)-cdf(x-step/2), for low < x <= high\n\n where, cdf is the cumulative density function of the distribution\n and step = (high-low)/cardinality.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors.continuous import ContinuousNode\n >>> from pgmpy.factors.continuous import RoundingDiscretizer\n >>> std_normal_pdf = lambda x : np.exp(-x*x/2) / (np.sqrt(2*np.pi))\n >>> std_normal = ContinuousNode(std_normal_pdf)\n >>> std_normal.discretize(RoundingDiscretizer, low=-3, high=3,\n ... cardinality=12)\n [0.001629865203424451, 0.009244709419989363, 0.027834684208773178,\n 0.065590616803038182, 0.120977578710013, 0.17466632194020804,\n 0.19741265136584729, 0.17466632194020937, 0.12097757871001302,\n 0.065590616803036905, 0.027834684208772664, 0.0092447094199902269]\n \"\"\"\n\n def get_discrete_values(self):\n step = (self.high - self.low) / self.cardinality\n\n # for x=[low]\n discrete_values = [self.factor.cdf(self.low + step / 2) - self.factor.cdf(self.low)]\n\n # for x=[low+step, low+2*step, ........., high-step]\n points = np.linspace(self.low + step, self.high - step, self.cardinality - 1)\n discrete_values.extend(\n [self.factor.cdf(i + step / 2) - self.factor.cdf(i - step / 2) for i in points]\n )\n\n return discrete_values\n\n\nclass UnbiasedDiscretizer(BaseDiscretizer):\n \"\"\"\n This class uses the unbiased method for discretizing the\n given continuous distribution.\n\n The unbiased method for discretization is the matching of the\n first moment method. It involves calculating the first order\n limited moment of the distribution which is done by the _lim_moment\n method.\n\n For this method,\n\n The probability mass is,\n (E(x) - E(x + step))/step + 1 - cdf(x), for x = low\n\n (2 * E(x) - E(x - step) - E(x + step))/step, for low < x < high\n\n (E(x) - E(x - step))/step - 1 + cdf(x), for x = high\n\n where, E(x) is the first limiting moment of the distribution\n about the point x, cdf is the cumulative density function\n and step = (high-low)/cardinality.\n\n Reference\n ---------\n Klugman, S. A., Panjer, H. H. and Willmot, G. E.,\n Loss Models, From Data to Decisions, Fourth Edition,\n Wiley, section 9.6.5.2 (Method of local monment matching) and\n exercise 9.41.\n\n Examples\n --------\n >>> import numpy as np\n >>> from pgmpy.factors import ContinuousNode\n >>> from pgmpy.factors.continuous import UnbiasedDiscretizer\n # exponential distribution with rate = 2\n >>> exp_pdf = lambda x: 2*np.exp(-2*x) if x>=0 else 0\n >>> exp_node = ContinuousNode(exp_pdf)\n >>> exp_node.discretize(UnbiasedDiscretizer, low=0, high=5, cardinality=10)\n [0.39627368905806137, 0.4049838434034298, 0.13331784003148325,\n 0.043887287876647259, 0.014447413395300212, 0.0047559685431339703,\n 0.0015656350182896128, 0.00051540201980112557, 0.00016965346326140994,\n 3.7867260839208328e-05]\n\n \"\"\"\n\n def get_discrete_values(self):\n lev = self._lim_moment\n step = (self.high - self.low) / (self.cardinality - 1)\n\n # for x=[low]\n discrete_values = [\n (lev(self.low) - lev(self.low + step)) / step + 1 - self.factor.cdf(self.low)\n ]\n\n # for x=[low+step, low+2*step, ........., high-step]\n points = np.linspace(self.low + step, self.high - step, self.cardinality - 2)\n discrete_values.extend(\n [(2 * lev(i) - lev(i - step) - lev(i + step)) / step for i in points]\n )\n\n # for x=[high]\n discrete_values.append(\n (lev(self.high) - lev(self.high - step)) / step - 1 + self.factor.cdf(self.high)\n )\n\n return discrete_values\n\n def _lim_moment(self, u, order=1):\n \"\"\"\n This method calculates the kth order limiting moment of\n the distribution. It is given by -\n\n E(u) = Integral (-inf to u) [ (x^k)*pdf(x) dx ] + (u^k)(1-cdf(u))\n\n where, pdf is the probability density function and cdf is the\n cumulative density function of the distribution.\n\n Reference\n ---------\n Klugman, S. A., Panjer, H. H. and Willmot, G. E.,\n Loss Models, From Data to Decisions, Fourth Edition,\n Wiley, definition 3.5 and equation 3.8.\n\n Parameters\n ----------\n u: float\n The point at which the moment is to be calculated.\n\n order: int\n The order of the moment, default is first order.\n \"\"\"\n\n def fun(x):\n return np.power(x, order) * self.factor.pdf(x)\n\n return integrate.quad(fun, -np.inf, u)[0] + np.power(u, order) * (1 - self.factor.cdf(u))\n\n def get_labels(self):\n labels = list(\n \"x={i}\".format(i=str(i))\n for i in np.round(np.linspace(self.low, self.high, self.cardinality), 3)\n )\n return labels\n"
] | [
[
"numpy.sum"
],
[
"pandas.crosstab",
"scipy.stats.pearsonr",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"numpy.linalg.lstsq",
"pandas.MultiIndex.from_product",
"scipy.stats.chisquare"
],
[
"scipy.integrate.quad",
"numpy.arange",
"numpy.linspace",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Deci-AI/super-gradients | [
"bfed440ecaf485af183570bf965eb5b74cb9f832",
"bfed440ecaf485af183570bf965eb5b74cb9f832",
"658f638389654668a085e23c3b19622241fd9267",
"658f638389654668a085e23c3b19622241fd9267"
] | [
"src/super_gradients/training/losses/r_squared_loss.py",
"src/super_gradients/training/models/classification_models/senet.py",
"src/super_gradients/examples/cifar10_training_torch_objects/cifar10_training_torch_objects_example.py",
"tests/unit_tests/early_stop_test.py"
] | [
"from __future__ import print_function, absolute_import\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.loss import _Loss\n\nfrom super_gradients.training.utils import convert_to_tensor\n\n\nclass RSquaredLoss(_Loss):\n\n def forward(self, output, target):\n # FIXME - THIS NEEDS TO BE CHANGED SUCH THAT THIS CLASS INHERETS FROM _Loss (TAKE A LOOK AT YoLoV3DetectionLoss)\n \"\"\"Computes the R-squared for the output and target values\n :param output: Tensor / Numpy / List\n The prediction\n :param target: Tensor / Numpy / List\n The corresponding lables\n \"\"\"\n # Convert to tensor\n output = convert_to_tensor(output)\n target = convert_to_tensor(target)\n\n criterion_mse = nn.MSELoss()\n return 1 - criterion_mse(output, target).item() / torch.var(target).item()\n",
"'''SENet in PyTorch.\n\nSENet is the winner of ImageNet-2017. The paper is not released yet.\n\nCode adapted from https://github.com/fastai/imagenet-fast/blob/master/cifar10/models/cifar10/senet.py\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom super_gradients.training.models.sg_module import SgModule\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes)\n )\n\n # SE layers\n self.fc1 = nn.Conv2d(planes, planes // 16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear\n self.fc2 = nn.Conv2d(planes // 16, planes, kernel_size=1)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n\n # Squeeze\n w = F.avg_pool2d(out, out.size(2))\n w = F.relu(self.fc1(w))\n w = F.sigmoid(self.fc2(w))\n # Excitation\n out = out * w # New broadcasting feature from v0.2!\n\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass PreActBlock(nn.Module):\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)\n )\n\n # SE layers\n self.fc1 = nn.Conv2d(planes, planes // 16, kernel_size=1)\n self.fc2 = nn.Conv2d(planes // 16, planes, kernel_size=1)\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n\n # Squeeze\n w = F.avg_pool2d(out, out.size(2))\n w = F.relu(self.fc1(w))\n w = F.sigmoid(self.fc2(w))\n # Excitation\n out = out * w\n\n out += shortcut\n return out\n\n\nclass SENet(SgModule):\n def __init__(self, block, num_blocks, num_classes=10):\n super(SENet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef SENet18():\n return SENet(PreActBlock, [2, 2, 2, 2])\n\n\ndef test():\n net = SENet18()\n y = net(torch.randn(1, 3, 32, 32))\n print(y.size())\n\n# test()\n",
"\n\"\"\"\nCifar10 training with SuperGradients training with the following initialized torch objects:\n\n DataLoaders\n Optimizers\n Networks (nn.Module)\n Schedulers\n Loss functions\n\nMain purpose is to demonstrate training in SG with minimal abstraction and maximal flexibility\n\"\"\"\n\nfrom super_gradients import SgModel\nfrom super_gradients.training.metrics.classification_metrics import Accuracy, Top5\nfrom super_gradients.training import MultiGPUMode\nfrom torch.optim import ASGD\nfrom torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau\nfrom torch.nn import CrossEntropyLoss\nfrom super_gradients.training.utils.callbacks import Phase, LRSchedulerCallback\nfrom torchvision.datasets import CIFAR10\nfrom torchvision.transforms import ToTensor\nfrom torchvision.models import resnet18\nfrom torch.utils.data import DataLoader\n\n\n# Define any torch DataLoaders, need at least train & valid loaders\ntrain_dataset = CIFAR10(root='data/', download=True, train=True, transform=ToTensor())\nvalid_dataset = CIFAR10(root='data/', download=True, train=False, transform=ToTensor())\n\ntrain_loader = DataLoader(train_dataset, shuffle=True, batch_size=16)\nvalid_loader = DataLoader(valid_dataset, batch_size=32)\n\n# Define any network of type nn.Module\nnet = resnet18(num_classes=len(train_dataset.classes))\n\n# Define any optimizer of type torch.optim.Optimizer (and schedulers)\nlr = 2.5e-4\noptimizer = ASGD(net.parameters(), lr=lr, weight_decay=0.0001)\n\nrop_lr_scheduler = ReduceLROnPlateau(optimizer, mode=\"max\", patience=10, verbose=True)\nstep_lr_scheduler = MultiStepLR(optimizer, milestones=[0, 150, 200], gamma=0.1)\n\n# Define any loss function of type torch.nn.modules.loss._Loss\nloss_fn = CrossEntropyLoss()\n\n# Define phase callbacks\nphase_callbacks = [LRSchedulerCallback(scheduler=rop_lr_scheduler, phase=Phase.VALIDATION_EPOCH_END, metric_name=\"Accuracy\"),\n LRSchedulerCallback(scheduler=step_lr_scheduler, phase=Phase.TRAIN_EPOCH_END)]\n\n# Bring everything together with SgModel and start training\nmodel = SgModel(\"Cifar10_external_objects_example\", multi_gpu=MultiGPUMode.OFF,\n train_loader=train_loader, valid_loader=valid_loader, classes=train_dataset.classes)\nmodel.build_model(net)\n\ntrain_params = {\"max_epochs\": 300,\n \"phase_callbacks\": phase_callbacks,\n \"initial_lr\": lr,\n \"loss\": loss_fn,\n \"criterion_params\": {},\n 'optimizer': optimizer,\n \"train_metrics_list\": [Accuracy(), Top5()],\n \"valid_metrics_list\": [Accuracy(), Top5()],\n \"loss_logging_items_names\": [\"Loss\"], \"metric_to_watch\": \"Accuracy\",\n \"greater_metric_to_watch_is_better\": True,\n \"lr_scheduler_step_type\": \"epoch\"}\n\nmodel.train(training_params=train_params)\n",
"import torch\nimport torch.nn as nn\nimport unittest\n\nfrom super_gradients.training.utils.early_stopping import EarlyStop\nfrom super_gradients.training.utils.callbacks import Phase\nfrom super_gradients.training.sg_model import SgModel\nfrom super_gradients.training.datasets.dataset_interfaces import ClassificationTestDatasetInterface\nfrom super_gradients.training.models.classification_models.resnet import ResNet18\nfrom super_gradients.training.metrics import Accuracy, Top5\nfrom torchmetrics.metric import Metric\n\n\nclass MetricTest(Metric):\n def __init__(self, metric_values):\n super().__init__()\n self.metrics_values = metric_values\n self.count = 0\n\n def update(self, *args, **kwargs) -> None:\n pass\n\n def compute(self):\n value = self.metrics_values[self.count]\n self.count += 1\n return value\n\n\nclass LossTest(nn.Module):\n def __init__(self, loss_values):\n super(LossTest, self).__init__()\n self.loss_values = loss_values\n self.count = 0\n\n def forward(self, pred, label):\n # double the loss values, one step for training and one for validation\n # make returned loss differentiable\n loss = (pred * 0).sum() + self.loss_values[self.count // 2]\n self.count += 1\n return loss, torch.stack([loss]).detach()\n\n\nclass EarlyStopTest(unittest.TestCase):\n def setUp(self) -> None:\n # batch_size is equal to length of dataset, to have only one step per epoch, to ease the test.\n dataset_params = {\"batch_size\": 10}\n self.dataset = ClassificationTestDatasetInterface(dataset_params=dataset_params, batch_size=10)\n self.net = ResNet18(num_classes=5, arch_params={})\n self.max_epochs = 10\n self.train_params = {\"max_epochs\": self.max_epochs, \"lr_updates\": [1], \"lr_decay_factor\": 0.1,\n \"lr_mode\": \"step\",\n \"lr_warmup_epochs\": 0, \"initial_lr\": 0.1, \"loss\": \"cross_entropy\", \"optimizer\": \"SGD\",\n \"criterion_params\": {}, \"optimizer_params\": {\"weight_decay\": 1e-4, \"momentum\": 0.9},\n \"train_metrics_list\": [Accuracy()], \"valid_metrics_list\": [Top5()],\n \"loss_logging_items_names\": [\"Loss\"], \"metric_to_watch\": \"Top5\",\n \"greater_metric_to_watch_is_better\": True, \"average_best_models\": False}\n\n def test_min_mode_patience_metric(self):\n \"\"\"\n Test for mode=min metric, test that training stops after no improvement in metric value for amount of `patience`\n epochs.\n \"\"\"\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_loss = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"Loss\", mode=\"min\", patience=3, verbose=True)\n phase_callbacks = [early_stop_loss]\n\n loss_values = torch.tensor([1., 0.8, 0.81, 0.8, 0.9, 0.2, 0.1, 0.3, 0.05, 0.9])\n fake_loss = LossTest(loss_values)\n train_params = self.train_params.copy()\n train_params.update({\"loss\": fake_loss, \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 5\n\n # count divided by 2, because loss counter used for both train and eval.\n self.assertEqual(excepted_end_epoch, fake_loss.count // 2)\n\n def test_max_mode_patience_metric(self):\n \"\"\"\n Test for mode=max metric, test that training stops after no improvement in metric value for amount of `patience`\n epochs.\n \"\"\"\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_acc = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"MetricTest\", mode=\"max\", patience=3,\n verbose=True)\n phase_callbacks = [early_stop_acc]\n\n metric_values = torch.tensor([0.2, 0.1, 0.3, 0.28, 0.2, 0.1, 0.33, 0.05, 0.9, 0.99])\n fake_metric = MetricTest(metric_values)\n train_params = self.train_params.copy()\n train_params.update(\n {\"valid_metrics_list\": [fake_metric], \"metric_to_watch\": \"MetricTest\", \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 6\n\n self.assertEqual(excepted_end_epoch, fake_metric.count)\n\n def test_min_mode_threshold_metric(self):\n \"\"\"\n Test for mode=min metric, test that training stops after metric value reaches the `threshold` value.\n \"\"\"\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_loss = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"Loss\", mode=\"min\", threshold=0.1, verbose=True)\n phase_callbacks = [early_stop_loss]\n\n loss_values = torch.tensor([1., 0.8, 0.4, 0.2, 0.09, 0.11, 0.105, 0.3, 0.05, 0.02])\n fake_loss = LossTest(loss_values)\n train_params = self.train_params.copy()\n train_params.update({\"loss\": fake_loss, \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 5\n # count divided by 2, because loss counter used for both train and eval.\n self.assertEqual(excepted_end_epoch, fake_loss.count // 2)\n\n def test_max_mode_threshold_metric(self):\n \"\"\"\n Test for mode=max metric, test that training stops after metric value reaches the `threshold` value.\n \"\"\"\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_acc = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"MetricTest\", mode=\"max\", threshold=0.94,\n verbose=True)\n phase_callbacks = [early_stop_acc]\n\n metric_values = torch.tensor([0.2, 0.1, 0.6, 0.8, 0.9, 0.92, 0.95, 0.94, 0.948, 0.99])\n fake_metric = MetricTest(metric_values)\n train_params = self.train_params.copy()\n train_params.update(\n {\"valid_metrics_list\": [fake_metric], \"metric_to_watch\": \"MetricTest\", \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 7\n\n self.assertEqual(excepted_end_epoch, fake_metric.count)\n\n def test_no_finite_stoppage(self):\n \"\"\"\n Test that training stops when monitor value is not a finite number. Test case of NaN and Inf values.\n \"\"\"\n # test Nan value\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_loss = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"Loss\", mode=\"min\", check_finite=True,\n verbose=True)\n phase_callbacks = [early_stop_loss]\n\n loss_values = torch.tensor([1., float('nan'), 0.81, 0.8, 0.9, 0.2, 0.1, 0.3, 0.05, 0.9])\n fake_loss = LossTest(loss_values)\n train_params = self.train_params.copy()\n train_params.update({\"loss\": fake_loss, \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 2\n\n self.assertEqual(excepted_end_epoch, fake_loss.count // 2)\n\n # test Inf value\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_loss = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"Loss\", mode=\"min\", patience=3, verbose=True)\n phase_callbacks = [early_stop_loss]\n\n loss_values = torch.tensor([1., 0.8, float('inf'), 0.8, 0.9, 0.2, 0.1, 0.3, 0.05, 0.9])\n fake_loss = LossTest(loss_values)\n train_params = self.train_params.copy()\n train_params.update({\"loss\": fake_loss, \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 3\n # count divided by 2, because loss counter used for both train and eval.\n self.assertEqual(excepted_end_epoch, fake_loss.count // 2)\n\n def test_min_delta(self):\n \"\"\"\n Test for `min_delta` argument, metric value is considered an improvement only if\n current_value - min_delta > best_value\n \"\"\"\n model = SgModel(\"early_stop_test\", model_checkpoints_location='local')\n model.connect_dataset_interface(self.dataset)\n model.build_model(self.net)\n\n early_stop_acc = EarlyStop(Phase.VALIDATION_EPOCH_END, monitor=\"MetricTest\", mode=\"max\", patience=2,\n min_delta=0.1, verbose=True)\n phase_callbacks = [early_stop_acc]\n\n metric_values = torch.tensor([0.1, 0.2, 0.305, 0.31, 0.34, 0.42, 0.6, 0.8, 0.9, 0.99])\n fake_metric = MetricTest(metric_values)\n train_params = self.train_params.copy()\n train_params.update(\n {\"valid_metrics_list\": [fake_metric], \"metric_to_watch\": \"MetricTest\", \"phase_callbacks\": phase_callbacks})\n\n model.train(train_params)\n\n excepted_end_epoch = 5\n\n self.assertEqual(excepted_end_epoch, fake_metric.count)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"torch.var",
"torch.nn.MSELoss"
],
[
"torch.nn.Sequential",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
],
[
"torch.stack",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucyundead/athena--fork | [
"04a4027299145f61bdc08528548e0b1b398ba0a6"
] | [
"tst/regression/scripts/tests/pgen/hdf5_reader_serial.py"
] | [
"# Serial test script for initializing problem with preexisting array\n\n# Standard modules\nimport sys\n\n# Other modules\nimport logging\nimport numpy as np\nimport h5py\n\n# Athena modules\nimport scripts.utils.athena as athena\nsys.path.insert(0, '../../vis/python')\nimport athena_read # noqa\nathena_read.check_nan_flag = True\nlogger = logging.getLogger('athena' + __name__[7:]) # set logger name based on module\n\n# Parameters\nfilename_input = 'initial_data.hdf5'\nfilename_output = 'from_array.cons.00000.athdf'\ndataset_cons = 'cons'\ndataset_b1 = 'b1'\ndataset_b2 = 'b2'\ndataset_b3 = 'b3'\nnb1 = 4\nnx1 = 4\nnx2 = 6\nnx3 = 4\ngamma = 5.0/3.0\n\n\n# Prepare Athena++\ndef prepare(**kwargs):\n logger.debug('Running test ' + __name__)\n\n # Configure and compile code\n athena.configure('b',\n 'hdf5', 'h5double',\n prob='from_array',\n **kwargs)\n athena.make()\n\n # Calculate initial field values\n b1 = np.empty((nx3, nx2, nb1 * nx1 + 1))\n b1[...] = np.arange(nx2)[None, :, None] - np.arange(nx3)[:, None, None]\n b1_input = np.empty((nb1, nx3, nx2, nx1 + 1))\n b2_input = np.zeros((nb1, nx3, nx2 + 1, nx1))\n b3_input = np.zeros((nb1, nx3 + 1, nx2, nx1))\n for n in range(nb1):\n b1_input[n, ...] = b1[:, :, n*nx1:(n+1)*nx1+1]\n # (second-order accurate assumption)\n b1v = 0.5 * (b1_input[:, :, :, :-1] + b1_input[:, :, :, 1:])\n\n # Calculate initial conserved values\n num_cells = nb1 * nx1 * nx2 * nx3\n density = np.reshape(np.arange(1, num_cells+1), (1, nb1, nx3, nx2, nx1))\n momentum = np.zeros((3, nb1, nx3, nx2, nx1))\n energy = np.ones((1, nb1, nx3, nx2, nx1)) / (gamma - 1.0) + 0.5 * b1v[None, ...] ** 2\n cons_input = np.vstack((density, momentum, energy))\n\n # Write file to be loaded\n with h5py.File('bin/{0}'.format(filename_input), 'w') as f:\n f.create_dataset(dataset_cons, data=cons_input)\n f.create_dataset(dataset_b1, data=b1_input)\n f.create_dataset(dataset_b2, data=b2_input)\n f.create_dataset(dataset_b3, data=b3_input)\n\n\n# Run Athena++\ndef run(**kwargs):\n arguments = ['time/tlim=0',\n 'time/ncycle_out=0',\n 'mesh/nx1={0}'.format(nb1 * nx1),\n 'mesh/nx2={0}'.format(nx2),\n 'mesh/nx3={0}'.format(nx3),\n 'meshblock/nx1={0}'.format(nx1),\n 'meshblock/nx2={0}'.format(nx2),\n 'meshblock/nx3={0}'.format(nx3),\n 'problem/input_filename={0}'.format(filename_input)]\n athena.run('mhd/athinput.from_array', arguments)\n\n\n# Analyze outputs\ndef analyze():\n analyze_status = True\n # Read input data\n with h5py.File('bin/{0}'.format(filename_input), 'r') as f:\n cons_input = f[dataset_cons][:]\n b1_input = f[dataset_b1][:]\n b2_input = f[dataset_b2][:]\n b3_input = f[dataset_b3][:]\n\n # Calculate cell-centered field inputs from face-centered values\n # (second-order accurate assumption)\n b1v = 0.5 * (b1_input[:, :, :, :-1] + b1_input[:, :, :, 1:])\n b2v = 0.5 * (b2_input[:, :, :-1, :] + b2_input[:, :, 1:, :])\n b3v = 0.5 * (b3_input[:, :-1, :, :] + b3_input[:, 1:, :, :])\n\n # Read output data\n with h5py.File('bin/{0}'.format(filename_output), 'r') as f:\n num_vars = f.attrs['NumVariables']\n dataset_names = f.attrs['DatasetNames'].astype('U')\n output_vars = f.attrs['VariableNames'].astype('U')\n cons_output = f['cons'][:]\n field_output = f['B'][:]\n\n # Order conserved output data to match inputs\n index_cons = np.where(dataset_names == 'cons')[0][0]\n num_vars_cons = num_vars[index_cons]\n num_vars_pre_cons = np.sum(num_vars[:index_cons])\n output_vars_cons = output_vars[num_vars_pre_cons:num_vars_pre_cons+num_vars_cons]\n dens = cons_output[np.where(output_vars_cons == 'dens')[0], ...]\n mom1 = cons_output[np.where(output_vars_cons == 'mom1')[0], ...]\n mom2 = cons_output[np.where(output_vars_cons == 'mom2')[0], ...]\n mom3 = cons_output[np.where(output_vars_cons == 'mom3')[0], ...]\n etot = cons_output[np.where(output_vars_cons == 'Etot')[0], ...]\n cons_output = np.vstack((dens, mom1, mom2, mom3, etot))\n\n # Order field output data to match inputs\n index_field = np.where(dataset_names == 'B')[0][0]\n num_vars_field = num_vars[index_field]\n num_vars_pre_field = np.sum(num_vars[:index_field])\n output_vars_field = output_vars[num_vars_pre_field:num_vars_pre_field+num_vars_field]\n b1_output = field_output[np.where(output_vars_field == 'Bcc1')[0][0], ...]\n b2_output = field_output[np.where(output_vars_field == 'Bcc2')[0][0], ...]\n b3_output = field_output[np.where(output_vars_field == 'Bcc3')[0][0], ...]\n\n # Check that outputs match inputs\n if not np.all(cons_output == cons_input):\n analyze_status = False\n if not np.all(b1_output == b1v):\n analyze_status = False\n if not np.all(b2_output == b2v):\n analyze_status = False\n if not np.all(b3_output == b3v):\n analyze_status = False\n return analyze_status\n"
] | [
[
"numpy.arange",
"numpy.empty",
"numpy.ones",
"numpy.all",
"numpy.where",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cbfinn/ray | [
"18f9fe0e2b85d04f22e3e04907bbfacadca4bc2d",
"18f9fe0e2b85d04f22e3e04907bbfacadca4bc2d",
"18f9fe0e2b85d04f22e3e04907bbfacadca4bc2d"
] | [
"examples/a3c/runner.py",
"python/ray/test/test_functions.py",
"examples/hyperopt/hyperopt_simple.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport numpy as np\nimport tensorflow as tf\nimport six.moves.queue as queue\nimport scipy.signal\nimport threading\n\n\ndef discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\ndef process_rollout(rollout, gamma, lambda_=1.0):\n \"\"\"Given a rollout, compute its returns and the advantage.\"\"\"\n batch_si = np.asarray(rollout.states)\n batch_a = np.asarray(rollout.actions)\n rewards = np.asarray(rollout.rewards)\n vpred_t = np.asarray(rollout.values + [rollout.r])\n\n rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])\n batch_r = discount(rewards_plus_v, gamma)[:-1]\n delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]\n # This formula for the advantage comes \"Generalized Advantage Estimation\":\n # https://arxiv.org/abs/1506.02438\n batch_adv = discount(delta_t, gamma * lambda_)\n\n features = rollout.features[0]\n return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal,\n features)\n\n\nBatch = namedtuple(\"Batch\", [\"si\", \"a\", \"adv\", \"r\", \"terminal\", \"features\"])\n\n\nclass PartialRollout(object):\n \"\"\"A piece of a complete rollout.\n\n We run our agent, and process its experience once it has processed enough\n steps.\n \"\"\"\n def __init__(self):\n self.states = []\n self.actions = []\n self.rewards = []\n self.values = []\n self.r = 0.0\n self.terminal = False\n self.features = []\n\n def add(self, state, action, reward, value, terminal, features):\n self.states += [state]\n self.actions += [action]\n self.rewards += [reward]\n self.values += [value]\n self.terminal = terminal\n self.features += [features]\n\n def extend(self, other):\n assert not self.terminal\n self.states.extend(other.states)\n self.actions.extend(other.actions)\n self.rewards.extend(other.rewards)\n self.values.extend(other.values)\n self.r = other.r\n self.terminal = other.terminal\n self.features.extend(other.features)\n\n\nclass RunnerThread(threading.Thread):\n \"\"\"This thread interacts with the environment and tells it what to do.\"\"\"\n def __init__(self, env, policy, num_local_steps, visualise=False):\n threading.Thread.__init__(self)\n self.queue = queue.Queue(5)\n self.num_local_steps = num_local_steps\n self.env = env\n self.last_features = None\n self.policy = policy\n self.daemon = True\n self.sess = None\n self.summary_writer = None\n self.visualise = visualise\n\n def start_runner(self, sess, summary_writer):\n self.sess = sess\n self.summary_writer = summary_writer\n self.start()\n\n def run(self):\n with self.sess.as_default():\n self._run()\n\n def _run(self):\n rollout_provider = env_runner(self.env, self.policy, self.num_local_steps,\n self.summary_writer, self.visualise)\n while True:\n # The timeout variable exists because apparently, if one worker dies, the\n # other workers won't die with it, unless the timeout is set to some\n # large number. This is an empirical observation.\n self.queue.put(next(rollout_provider), timeout=600.0)\n\n\ndef env_runner(env, policy, num_local_steps, summary_writer, render):\n \"\"\"This impleents the logic of the thread runner.\n\n It continually runs the policy, and as long as the rollout exceeds a certain\n length, the thread runner appends the policy to the queue.\n \"\"\"\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = 0\n rollout_number = 0\n\n while True:\n terminal_end = False\n rollout = PartialRollout()\n\n for _ in range(num_local_steps):\n fetched = policy.act(last_state, *last_features)\n action, value_, features = fetched[0], fetched[1], fetched[2:]\n # Argmax to convert from one-hot.\n state, reward, terminal, info = env.step(action.argmax())\n if render:\n env.render()\n\n # Collect the experience.\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n length += 1\n rewards += reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, rollout_number)\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get(\"wrapper_config.TimeLimit\"\n \".max_episode_steps\")\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get(\"semantics\"\n \".autoreset\"):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n rollout_number += 1\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.r = policy.value(last_state, *last_features)\n\n # Once we have enough experience, yield it, and have the ThreadRunner\n # place it on a queue.\n yield rollout\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ray\n\nimport numpy as np\n\n# Test simple functionality\n\n\[email protected](num_return_vals=2)\ndef handle_int(a, b):\n return a + 1, b + 1\n\n# Test timing\n\n\[email protected]\ndef empty_function():\n pass\n\n\[email protected]\ndef trivial_function():\n return 1\n\n# Test keyword arguments\n\n\[email protected]\ndef keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n\[email protected]\ndef keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n\[email protected]\ndef keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n# Test variable numbers of arguments\n\n\[email protected]\ndef varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n\[email protected]\ndef varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n\ntry:\n @ray.remote\n def kwargs_throw_exception(**c):\n return ()\n kwargs_exception_thrown = False\nexcept:\n kwargs_exception_thrown = True\n\ntry:\n @ray.remote\n def varargs_and_kwargs_throw_exception(a, b=\"hi\", *c):\n return \"{} {} {}\".format(a, b, c)\n varargs_and_kwargs_exception_thrown = False\nexcept:\n varargs_and_kwargs_exception_thrown = True\n\n# test throwing an exception\n\n\[email protected]\ndef throw_exception_fct1():\n raise Exception(\"Test function 1 intentionally failed.\")\n\n\[email protected]\ndef throw_exception_fct2():\n raise Exception(\"Test function 2 intentionally failed.\")\n\n\[email protected](num_return_vals=3)\ndef throw_exception_fct3(x):\n raise Exception(\"Test function 3 intentionally failed.\")\n\n# test Python mode\n\n\[email protected]\ndef python_mode_f():\n return np.array([0, 0])\n\n\[email protected]\ndef python_mode_g(x):\n x[0] = 1\n return x\n\n# test no return values\n\n\[email protected]\ndef no_op():\n pass\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport ray\nimport argparse\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport objective\n\nparser = argparse.ArgumentParser(description=\"Run the hyperparameter \"\n \"optimization example.\")\nparser.add_argument(\"--trials\", default=2, type=int,\n help=\"The number of random trials to do.\")\nparser.add_argument(\"--steps\", default=10, type=int,\n help=\"The number of steps of training to do per network.\")\nparser.add_argument(\"--redis-address\", default=None, type=str,\n help=\"The Redis address of the cluster.\")\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n ray.init(redis_address=args.redis_address)\n\n # The number of sets of random hyperparameters to try.\n trials = args.trials\n # The number of training passes over the dataset to use for network.\n steps = args.steps\n\n # Load the mnist data and turn the data into remote objects.\n print(\"Downloading the MNIST dataset. This may take a minute.\")\n mnist = input_data.read_data_sets(\"MNIST_data\", one_hot=True)\n train_images = ray.put(mnist.train.images)\n train_labels = ray.put(mnist.train.labels)\n validation_images = ray.put(mnist.validation.images)\n validation_labels = ray.put(mnist.validation.labels)\n\n # Keep track of the best hyperparameters and the best accuracy.\n best_hyperparamemeters = None\n best_accuracy = 0\n # This list holds the object IDs for all of the experiments that we have\n # launched and that have not yet been processed.\n remaining_ids = []\n # This is a dictionary mapping the object ID of an experiment to the\n # hyerparameters used for that experiment.\n hyperparameters_mapping = {}\n\n # A function for generating random hyperparameters.\n def generate_hyperparameters():\n return {\"learning_rate\": 10 ** np.random.uniform(-5, 5),\n \"batch_size\": np.random.randint(1, 100),\n \"dropout\": np.random.uniform(0, 1),\n \"stddev\": 10 ** np.random.uniform(-5, 5)}\n\n # Randomly generate some hyperparameters, and launch a task for each set.\n for i in range(trials):\n hyperparameters = generate_hyperparameters()\n accuracy_id = objective.train_cnn_and_compute_accuracy.remote(\n hyperparameters, steps, train_images, train_labels, validation_images,\n validation_labels)\n remaining_ids.append(accuracy_id)\n # Keep track of which hyperparameters correspond to this experiment.\n hyperparameters_mapping[accuracy_id] = hyperparameters\n\n # Fetch and print the results of the tasks in the order that they complete.\n for i in range(trials):\n # Use ray.wait to get the object ID of the first task that completes.\n ready_ids, remaining_ids = ray.wait(remaining_ids)\n # Process the output of this task.\n result_id = ready_ids[0]\n hyperparameters = hyperparameters_mapping[result_id]\n accuracy, _ = ray.get(result_id)\n print(\"\"\"We achieve accuracy {:.3}% with\n learning_rate: {:.2}\n batch_size: {}\n dropout: {:.2}\n stddev: {:.2}\n \"\"\".format(100 * accuracy,\n hyperparameters[\"learning_rate\"],\n hyperparameters[\"batch_size\"],\n hyperparameters[\"dropout\"],\n hyperparameters[\"stddev\"]))\n if accuracy > best_accuracy:\n best_hyperparameters = hyperparameters\n best_accuracy = accuracy\n\n # Record the best performing set of hyperparameters.\n print(\"\"\"Best accuracy over {} trials was {:.3} with\n learning_rate: {:.2}\n batch_size: {}\n dropout: {:.2}\n stddev: {:.2}\n \"\"\".format(trials, 100 * best_accuracy,\n best_hyperparameters[\"learning_rate\"],\n best_hyperparameters[\"batch_size\"],\n best_hyperparameters[\"dropout\"],\n best_hyperparameters[\"stddev\"]))\n"
] | [
[
"numpy.asarray",
"tensorflow.Summary"
],
[
"numpy.array"
],
[
"numpy.random.uniform",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
perathambkk/ml-techniques | [
"5d6fd122322342c0b47dc65d09c4425fd73f2ea9",
"5d6fd122322342c0b47dc65d09c4425fd73f2ea9",
"5d6fd122322342c0b47dc65d09c4425fd73f2ea9"
] | [
"text/naivebayes.py",
"Bayesian/bayesian_linear_regression.py",
"clustering/kmeanspp.py"
] | [
"\"\"\"\nAuthor: Peratham Wiriyathammabhum\n\n\n\"\"\"\nimport numpy as np \nimport pandas as pd \nimport scipy as sp\nimport matplotlib.pyplot as plt \nimport scipy.sparse.linalg as linalg\n\ndef naivebayes(X):\n\t\"\"\"\n\tPerform spectral clustering on an input row matrix X.\n\tmode \\in {'affinity','neighborhood','gaussian'}\n\tSee: http://www.math.ucsd.edu/~fan/research/revised.html\n\t\thttp://www.math.ucsd.edu/~fan/research/cbms.pdf\n\t\"\"\"\n\tni, nd = X.shape\n\tL = laplacian_graph(X, mode='affinity', knn=knn, eta=eta, sigma=sigma)\n\n\tvals, vecs = linalg.eigs(L, k=k, which='SR')\n\t# ind = np.argsort(vals, axis=0)\n\t# vals = vals[ind]\n\t# vecs = vecs[:, ind]\n\n\tmu = kmeans(vecs, k=k, thres=10**-5, max_iters=max_iters)\n\t\n\tdist = ((vecs[:,None,:] - mu[None,:,:])**2).sum(axis=2)\n\tcidx = np.argmin(dist, axis=1)\n\treturn mu, cidx\n\ndef tfidf():\n\n\treturn\n\ndef main(opts):\n\tk = opts['k']\n\n\t# load data\n\tcategories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']\n\tfrom sklearn.datasets import fetch_20newsgroups\n\tfrom sklearn.feature_extraction.text import CountVectorizer\n\tcount_vect = CountVectorizer()\n\tX_train_counts = count_vect.fit_transform(twenty_train.data)\n\n\t# tf-idf\n\n\t# clustering\n\t_, cidx = spectral_clustering(X, mode=mode, k=k, knn=knn, eta=eta, sigma=sigma, max_iters=max_iters)\n\n\t# plot\n\t\n\treturn\n\nif __name__ == '__main__':\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description='run naivebayes.')\n\tparser.add_argument('--k', dest='k',\n\t\t\t\t\t help='number of clusters',\n\t\t\t\t\t default=2, type=int)\n\targs = parser.parse_args()\n\topts = vars(args)\n\n\tmain(opts)\n",
"\"\"\"\nAuthor: Peratham Wiriyathammabhum\n\n\n\"\"\"\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nfrom scipy import stats\neps = np.finfo(float).eps\n\nclass BayesLinReg(object):\n\t\"\"\"\n\tBayesian linear regression.\n\tSee: Pattern Recognition and Machine Learning by Christopher Bishop ch.3.\n\t\thttps://www.microsoft.com/en-us/research/people/cmbishop/prml-book/\n\tBlogs: https://maxhalford.github.io/blog/bayesian-linear-regression/\n\t\"\"\"\n\tdef __init__(self, num_feas, alpha, beta):\n\t\tself.num_feas = num_feas\n\t\tself.alpha = alpha\n\t\tself.beta = beta\n\t\tself.mean = np.zeros((num_feas,1))\n\t\tself.invcov_mat = np.identity(num_feas) / alpha\n\t\treturn\n\n\tdef update(self, x, y):\n\t\t\"\"\"\n\t\teq 3.50-3.51 in Bishop\n\t\t\"\"\"\n\t\tinvcov_mat_n = self.invcov_mat + self.beta * np.outer(x, x)\n\t\tmean_n = np.matmul(np.linalg.inv(invcov_mat_n), (np.matmul(self.invcov_mat, self.mean) + self.beta* np.expand_dims(np.dot(y, x), axis=1)))\n\t\tassert mean_n.shape == self.mean.shape\n\t\tself.mean = mean_n\n\t\tself.invcov_mat = invcov_mat_n\n\t\treturn self\n\n\tdef predict(self, x):\n\t\t\"\"\"\n\t\teq 3.58-3.59 in Bishop\n\t\t\"\"\"\n\t\tpred_mean = np.dot(x, self.mean)\n\t\tsigma_squared_x = 1./self.beta + np.dot(np.dot(x, np.linalg.inv(self.invcov_mat)), x.T)\n\t\treturn stats.norm(loc=pred_mean.T, scale=sigma_squared_x ** .5)\n\n\t@property\n\tdef weights_dist(self):\n\t\treturn stats.multivariate_normal(mean=self.mean, cov=np.linalg.inv(self.invcov_mat))\n\ndef main(opts):\n\tfrom sklearn import metrics\n\n\talpha = opts['alpha']\n\tbeta = opts['beta']\n\tfrom sklearn import datasets\n\tdiabetes = datasets.load_diabetes()\n\tX, y = diabetes.data, diabetes.target\n\t# whitening\n\tX = (X - np.mean(X, axis=0)) / np.std(X, axis=0)\n\t\n\tmodel = BayesLinReg(num_feas=X.shape[1], alpha=alpha, beta=beta)\n\n\ty_pred = np.empty(len(y))\n\n\tfor i, (xi, yi) in enumerate(zip(X, y)): # one at a time\n\t\ty_pred[i] = model.predict(xi).mean()\n\t\tmodel.update(xi, yi)\n\n\tprint(metrics.mean_absolute_error(y, y_pred))\n\n\t# plot\n\t# input(\"Press Enter to continue...\")\n\n\n\treturn\n\nif __name__ == '__main__':\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description='run bayesian linear regression.')\n\tparser.add_argument('--alpha', dest='alpha',\n\t\t\t\t\t help='alpha',\n\t\t\t\t\t default=.3, type=float)\n\tparser.add_argument('--beta', dest='beta',\n\t\t\t\t\t help='beta',\n\t\t\t\t\t default=1, type=float)\n\targs = parser.parse_args()\n\topts = vars(args)\n\n\tmain(opts)\n",
"\"\"\"\nAuthor: Peratham Wiriyathammabhum\n\n\n\"\"\"\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \n\ndef kmeanspp(X, k=3, thres=10**-5, max_iters=200):\n\t\"\"\"\n\tPerform k-means++ clustering on an input row matrix X.\n\tSee: http://www.ciml.info/dl/v0_9/ciml-v0_9-ch13.pdf\n\t\"\"\"\n\tni, nd = X.shape\n\tmu = np.zeros((k, nd))\n\tkrows = np.random.choice(ni, 1, replace=False)\n\tnmu = X[krows]\n\tnmu = kmeanspp_init(nmu, X, k)\n\tnite = 0\n\twhile not terminating_cond(mu, nmu, thres, nite, max_iters):\n\t\tmu = nmu\n\n\t\t# e-step: centroid assignments from parameters\n\t\tdist = ((X[:,None,:] - mu[None,:,:])**2).sum(axis=2)\n\t\tprint('[Info] iter: {} SSE: {}'.format(nite, dist.sum()))\n\t\t# m-step: estimate latent parameters (centroids)\n\t\tcidx = np.argmin(dist, axis=1)\n\t\tfor i in range(k):\n\t\t\tnmu[i] = X[cidx==i].mean(axis=0)\n\n\t\tnite += 1\n\t\tif nite % 10 == 0:\n\t\t\tprint('[Info] iter: {}'.format(nite))\n\treturn mu\n\ndef kmeanspp_init(nmu, X, k):\n\tni, nd = X.shape\n\tfor i in range(1, k):\n\t\tdist = ((X[:,None,:] - nmu[None,:,:])**2).sum(axis=2)\n\t\tdist = dist.min(axis=1)\n\t\tdist /= dist.sum()\n\t\tnewm = np.random.choice(ni, 1, replace=False, p = dist)\n\t\tnewrow = X[newm]\n\t\tnmu = np.append(nmu, newrow, axis=0)\n\treturn nmu\n\ndef terminating_cond(mu, nmu, thres, nite, max_iters):\n\tif nite >= max_iters:\n\t\tprint('[Info] terminate at iter: {}'.format(nite))\n\t\treturn True\n\t# elif np.linalg.norm(mu - nmu) < thres:\n\t# \tprint('[Info] terminate at iter: {}'.format(nite))\n\t# \treturn True\n\telse:\n\t\treturn False\n\ndef main(opts):\n\tk = opts['k']\n\tmax_iters = opts['max_iters']\n\tfrom sklearn import datasets\n\tiris = datasets.load_iris()\n\tX = iris.data\n\tmu = kmeanspp(X, k=k, thres=10**-5, max_iters=max_iters)\n\n\t# plot\n\tdist = ((X[:,None,:] - mu[None,:,:])**2).sum(axis=2)\n\tprint('[Info] SSE: {}'.format(dist.sum()))\n\tcidx = np.argmin(dist, axis=1)\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tfor c in range(k):\n\t\tcluster_members = [X[i] for i in range(len(X)) if cidx[i] == c] \n\t\tcluster_members = np.array(cluster_members)\n\n\t\tax.scatter(cluster_members[:,0], cluster_members[:,1], cluster_members[:,2], s= 0.5)\n\tinput(\"Press Enter to continue...\")\n\t\"\"\"\n\tFor details see: \n\thttps://blog.paperspace.com/speed-up-kmeans-numpy-vectorization-broadcasting-profiling/\n\thttps://github.com/siddheshk/Faster-Kmeans\n\t\"\"\"\n\treturn\n\nif __name__ == '__main__':\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description='run k-means.')\n\tparser.add_argument('--k', dest='k',\n\t\t\t\t\t help='number of clusters',\n\t\t\t\t\t default=3, type=int)\n\tparser.add_argument('--max_iters', dest='max_iters',\n\t\t\t\t\t help='number of iterations to train',\n\t\t\t\t\t default=200, type=int)\n\targs = parser.parse_args()\n\topts = vars(args)\n\n\tmain(opts)\n"
] | [
[
"scipy.sparse.linalg.eigs",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.argmin"
],
[
"numpy.dot",
"numpy.linalg.inv",
"sklearn.metrics.mean_absolute_error",
"numpy.matmul",
"sklearn.datasets.load_diabetes",
"numpy.finfo",
"scipy.stats.norm",
"numpy.std",
"numpy.identity",
"numpy.mean",
"numpy.outer",
"numpy.zeros"
],
[
"numpy.random.choice",
"sklearn.datasets.load_iris",
"numpy.append",
"numpy.argmin",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Crypto-TII/syndrome_decoding_estimator | [
"c7d9aaeed83708dbf5db3c45a007c0010a1225c8"
] | [
"sd_estimator/estimator.py"
] | [
"from .theoretical_estimates import *\nfrom math import inf, ceil, log2, comb\nfrom prettytable import PrettyTable\nfrom progress.bar import Bar\nfrom scipy.special import binom as binom_sp\nfrom scipy.optimize import fsolve\nfrom warnings import filterwarnings\n\nfilterwarnings(\"ignore\", category=RuntimeWarning)\n\n\ndef binom(n, k):\n return comb(int(n), int(k))\n\n\ndef __truncate(x, precision):\n \"\"\"\n Truncates a float\n\n INPUT:\n\n - ``x`` -- value to be truncated\n - ``precision`` -- number of decimal places to after which the ``x`` is truncated\n\n \"\"\"\n\n return float(int(x * 10 ** precision) / 10 ** precision)\n\n\ndef __concat_pretty_tables(t1, t2):\n v = t1.split(\"\\n\")\n v2 = t2.split(\"\\n\")\n vnew = \"\"\n for i in range(len(v)):\n vnew += v[i] + v2[i][1:] + \"\\n\"\n return vnew[:-1]\n\n\ndef __round_or_truncate_to_given_precision(T, M, truncate, precision):\n if truncate:\n T, M = __truncate(T, precision), __truncate(M, precision)\n else:\n T, M = round(T, precision), round(M, precision)\n return '{:.{p}f}'.format(T, p=precision), '{:.{p}f}'.format(M, p=precision)\n\n\ndef __memory_access_cost(mem, memory_access):\n if memory_access == 0:\n return 0\n elif memory_access == 1:\n return log2(mem)\n elif memory_access == 2:\n return mem / 2\n elif memory_access == 3:\n return mem / 3\n elif callable(memory_access):\n return memory_access(mem)\n return 0\n\n\ndef _gaussian_elimination_complexity(n, k, r):\n \"\"\"\n Complexity estimate of Gaussian elimination routine\n\n INPUT:\n\n - ``n`` -- Row additons are perfomed on ``n`` coordinates\n - ``k`` -- Matrix consists of ``n-k`` rows\n - ``r`` -- Blocksize of method of the four russian for inversion, default is zero\n\n [Bar07]_ Bard, G.V.: Algorithms for solving linear and polynomial systems of equations over finite fields\n with applications to cryptanalysis. Ph.D. thesis (2007)\n\n [BLP08] Bernstein, D.J., Lange, T., Peters, C.: Attacking and defending the mceliece cryptosystem.\n In: International Workshop on Post-Quantum Cryptography. pp. 31–46. Springer (2008)\n\n EXAMPLES::\n\n >>> from .estimator import _gaussian_elimination_complexity\n >>> _gaussian_elimination_complexity(n=100,k=20,r=1) # doctest: +SKIP\n\n \"\"\"\n\n if r != 0:\n return (r ** 2 + 2 ** r + (n - k - r)) * int(((n + r - 1) / r))\n\n return (n - k) ** 2\n\n\ndef _optimize_m4ri(n, k, mem=inf):\n \"\"\"\n Find optimal blocksize for Gaussian elimination via M4RI\n\n INPUT:\n\n - ``n`` -- Row additons are perfomed on ``n`` coordinates\n - ``k`` -- Matrix consists of ``n-k`` rows\n\n \"\"\"\n\n (r, v) = (0, inf)\n for i in range(n - k):\n tmp = log2(_gaussian_elimination_complexity(n, k, i))\n if v > tmp and r < mem:\n r = i\n v = tmp\n return r\n\n\ndef _mem_matrix(n, k, r):\n \"\"\"\n Memory usage of parity check matrix in vector space elements\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``r`` -- block size of M4RI procedure\n\n EXAMPLES::\n\n >>> from .estimator import _mem_matrix\n >>> _mem_matrix(n=100,k=20,r=0) # doctest: +SKIP\n\n \"\"\"\n return n - k + 2 ** r\n\n\ndef _list_merge_complexity(L, l, hmap):\n \"\"\"\n Complexity estimate of merging two lists exact\n\n INPUT:\n\n - ``L`` -- size of lists to be merged\n - ``l`` -- amount of bits used for matching\n - ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)\n\n EXAMPLES::\n\n >>> from .estimator import _list_merge_complexity\n >>> _list_merge_complexity(L=2**16,l=16,hmap=1) # doctest: +SKIP\n\n \"\"\"\n\n if L == 1:\n return 1\n if not hmap:\n return max(1, 2 * int(log2(L)) * L + L ** 2 // 2 ** l)\n else:\n return 2 * L + L ** 2 // 2 ** l\n\n\ndef _indyk_motwani_complexity(L, l, w, hmap):\n \"\"\"\n Complexity of Indyk-Motwani nearest neighbor search\n\n INPUT:\n\n - ``L`` -- size of lists to be matched\n - ``l`` -- amount of bits used for matching\n - ``w`` -- target weight\n - ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)\n\n EXAMPLES::\n\n >>> from .estimator import _indyk_motwani_complexity\n >>> _indyk_motwani_complexity(L=2**16,l=16,w=2,hmap=1) # doctest: +SKIP\n\n \"\"\"\n\n if w == 0:\n return _list_merge_complexity(L, l, hmap)\n lam = max(0, int(min(ceil(log2(L)), l - 2 * w)))\n return binom(l, lam) // binom(l - w, lam) * _list_merge_complexity(L, lam, hmap)\n\n\ndef _mitm_nn_complexity(L, l, w, hmap):\n \"\"\"\n Complexity of Indyk-Motwani nearest neighbor search\n\n INPUT:\n\n - ``L`` -- size of lists to be matched\n - ``l`` -- amount of bits used for matching\n - ``w`` -- target weight\n - ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)\n\n EXAMPLES::\n\n >>> from .estimator import _indyk_motwani_complexity\n >>> _indyk_motwani_complexity(L=2**16,l=16,w=2,hmap=1) # doctest: +SKIP\n\n \"\"\"\n if w == 0:\n return _list_merge_complexity(L, l, hmap)\n L1 = L * binom(l / 2, w / 2)\n return _list_merge_complexity(L1, l, hmap)\n\n\ndef prange_complexity(n, k, w, mem=inf, memory_access=0):\n \"\"\"\n Complexity estimate of Prange's ISD algorithm\n\n [Pra62] Prange, E.: The use of information sets in decoding cyclic codes. IRE Transactions\n on Information Theory 8(5), 5–9 (1962)\n\n expected weight distribution::\n\n +--------------------------------+-------------------------------+\n | <----------+ n - k +---------> | <----------+ k +------------> |\n | w | 0 |\n +--------------------------------+-------------------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2(bits)), default unlimited\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import prange_complexity\n >>> prange_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n\n solutions = max(0, log2(binom(n, w)) - (n - k))\n\n r = _optimize_m4ri(n, k, mem)\n Tp = max(log2(binom(n, w)) - log2(binom(n - k, w)) - solutions, 0)\n Tg = log2(_gaussian_elimination_complexity(n, k, r))\n time = Tp + Tg\n memory = log2(_mem_matrix(n, k, r))\n\n time += __memory_access_cost(memory, memory_access)\n\n params = [r]\n\n par = {\"r\": params[0]}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef stern_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of Stern's ISD algorithm\n\n [Ste88] Stern, J.: A method for finding codewords of small weight. In: International\n Colloquium on Coding Theory and Applications. pp. 106–113. Springer (1988)\n\n [BLP08] Bernstein, D.J., Lange, T., Peters, C.: Attacking and defending the mceliece cryptosystem.\n In: International Workshop on Post-Quantum Cryptography. pp. 31–46. Springer (2008)\n\n expected weight distribution::\n\n +-------------------------+---------+-------------+-------------+\n | <----+ n - k - l +----> |<-- l -->|<--+ k/2 +-->|<--+ k/2 +-->|\n | w - 2p | 0 | p | p |\n +-------------------------+---------+-------------+-------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import stern_complexity\n >>> stern_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n\n solutions = max(0, log2(binom(n, w)) - (n - k))\n\n r = _optimize_m4ri(n, k, mem)\n time = inf\n memory = 0\n params = [-1 for i in range(2)]\n i_val = [20]\n i_val_inc = [10]\n k1 = k // 2\n while True:\n stop = True\n for p in range(min(k1, w // 2, i_val[0])):\n L1 = binom(k1, p)\n l_val = int(log2(L1))\n if log2(L1) > time:\n continue\n for l in range(max(l_val - i_val_inc[0], 0), l_val + i_val_inc[0]):\n\n tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(0,\n log2(binom(n, w)) - log2(binom(n - k, w - 2 * p)) - log2(binom(k1, p) ** 2) - solutions)\n\n # We use Indyk-Motwani (IM) taking into account the possibility of multiple existing solutions\n # with correct weight distribution, decreasing the amount of necessary projections\n # remaining_sol denotes the number of expected solutions per permutation\n # l_part_iterations is the expected number of projections need by IM to find one of those solutions\n\n remaining_sol = (binom(n - k, w - 2 * p) * binom(k1, p) ** 2 * binom(n, w) // 2 ** (n - k)) // binom(n,\n w)\n l_part_iterations = binom(n - k, w - 2 * p) // binom(n - k - l, w - 2 * p)\n\n if remaining_sol > 0:\n l_part_iterations //= max(1, remaining_sol)\n l_part_iterations = max(1, l_part_iterations)\n\n Tg = _gaussian_elimination_complexity(n, k, r)\n tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap) * l_part_iterations)\n\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(time, tmp)\n\n if tmp == time:\n memory = tmp_mem\n params = [p, l]\n\n for i in range(len(i_val)):\n if params[i] == i_val[i] - 1:\n stop = False\n i_val[i] += i_val_inc[i]\n\n if stop:\n break\n\n par = {\"l\": params[1], \"p\": params[0]}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef dumer_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of Dumer's ISD algorithm\n\n [Dum91] Dumer, I.: On minimum distance decoding of linear codes. In: Proc. 5th Joint\n Soviet-Swedish Int. Workshop Inform. Theory. pp. 50–52 (1991)\n\n expected weight distribution::\n\n +--------------------------+------------------+-------------------+\n | <-----+ n - k - l +----->|<-- (k + l)/2 +-->|<--+ (k + l)/2 +-->|\n | w - 2p | p | p |\n +--------------------------+------------------+-------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import dumer_complexity\n >>> dumer_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [10, 40]\n i_val_inc = [10, 10]\n params = [-1 for _ in range(2)]\n while True:\n stop = True\n for p in range(min(w // 2, i_val[0])):\n for l in range(min(n - k - (w - p), i_val[1])):\n k1 = (k + l) // 2\n L1 = binom(k1, p)\n if log2(L1) > time:\n continue\n\n tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - log2(binom(k1, p) ** 2) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap))\n\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(time, tmp)\n if tmp == time:\n memory = tmp_mem\n params = [p, l]\n\n for i in range(len(i_val)):\n if params[i] == i_val[i] - 1:\n stop = False\n i_val[i] += i_val_inc[i]\n\n if stop:\n break\n\n par = {\"l\": params[1], \"p\": params[0]}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef ball_collision_decoding_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of the ball collision decodding algorithm\n\n [BLP11] Bernstein, D.J., Lange, T., Peters, C.: Smaller decoding exponents: ball-collision decoding.\n In: Annual Cryptology Conference. pp. 743–760. Springer (2011)\n\n expected weight distribution::\n\n +------------------+---------+---------+-------------+-------------+\n | <-+ n - k - l +->|<- l/2 ->|<- l/2 ->|<--+ k/2 +-->|<--+ k/2 +-->|\n | w - 2p - 2pl | pl | pl | p | p |\n +------------------+---------+---------+-------------+-------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import ball_collision_decoding_complexity\n >>> ball_collision_decoding_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [10, 80, 4]\n i_val_inc = [10, 10, 10]\n params = [-1 for _ in range(3)]\n k1 = k // 2\n while True:\n stop = True\n for p in range(min(w // 2, i_val[0])):\n for l in range(min(n - k - (w - 2 * p), i_val[1])):\n for pl in range(min(i_val[2], (w - 2 * p) // 2, l // 2 + 1)):\n L1 = binom(k1, p)\n L1 *= max(1, binom(l // 2, pl))\n if log2(L1) > time:\n continue\n\n tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(\n log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p - 2 * pl)) - 2 * log2(\n binom(k1, p)) - 2 * log2(\n binom(l // 2, pl)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap))\n\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(time, tmp)\n if tmp == time:\n memory = tmp_mem\n params = [p, pl, l]\n\n for i in range(len(i_val)):\n if params[i] == i_val[i] - 1:\n stop = False\n i_val[i] += i_val_inc[i]\n\n if stop:\n break\n\n par = {\"l\": params[2], \"p\": params[0], \"pl\": params[1]}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef bjmm_complexity(n, k, w, mem=inf, hmap=1, only_depth_two=0, memory_access=0):\n \"\"\"\n Complexity estimate of BJMM algorithm\n\n [MMT11] May, A., Meurer, A., Thomae, E.: Decoding random linear codes in 2^(0.054n). In: International Conference\n on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)\n\n [BJMM12] Becker, A., Joux, A., May, A., Meurer, A.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0\n improves information set decoding. In: Annual international conference on the theory and applications of\n cryptographic techniques. pp. 520–536. Springer (2012)\n\n expected weight distribution::\n\n +--------------------------+-------------------+-------------------+\n | <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|\n | w - 2p | p | p |\n +--------------------------+-------------------+-------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import bjmm_complexity\n >>> bjmm_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n d2 = bjmm_depth_2_complexity(n, k, w, mem, hmap, memory_access)\n d3 = bjmm_depth_3_complexity(n, k, w, mem, hmap, memory_access)\n return d2 if d2[\"time\"] < d3[\"time\"] or only_depth_two else d3\n\n\ndef bjmm_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0, mmt=0):\n \"\"\"\n Complexity estimate of BJMM algorithm in depth 2\n\n [MMT11] May, A., Meurer, A., Thomae, E.: Decoding random linear codes in 2^(0.054n). In: International Conference\n on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)\n\n [BJMM12] Becker, A., Joux, A., May, A., Meurer, A.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0\n improves information set decoding. In: Annual international conference on the theory and applications of\n cryptographic techniques. pp. 520–536. Springer (2012)\n\n expected weight distribution::\n\n +--------------------------+-------------------+-------------------+\n | <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|\n | w - 2p | p | p |\n +--------------------------+-------------------+-------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n - ``mmt`` -- restrict optimization to use of MMT algorithm (precisely enforce p1=p/2)\n\n EXAMPLES::\n\n >>> from .estimator import bjmm_depth_2_complexity\n >>> bjmm_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [35, 500, 35]\n i_val_inc = [10, 10, 10]\n params = [-1 for _ in range(3)]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), min(i_val[1], n - k))):\n for p1 in range(max(params[2] - i_val_inc[2] // 2, (p + 1) // 2), min(w, i_val[2])):\n if mmt and p1 != p // 2:\n continue\n k1 = (k + l) // 2\n L1 = binom(k1, p1)\n if log2(L1) > time:\n continue\n\n if k1 - p < p1 - p / 2:\n continue\n reps = (binom(p, p / 2) * binom(k1 - p, p1 - p / 2)) ** 2\n\n l1 = int(ceil(log2(reps)))\n\n if l1 > l:\n continue\n\n L12 = max(1, L1 ** 2 // 2 ** l1)\n\n tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(\n binom((k + l) // 2, p)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n T_tree = 2 * _list_merge_complexity(L1, l1, hmap) + _list_merge_complexity(L12,\n l - l1,\n hmap)\n T_rep = int(ceil(2 ** (l1 - log2(reps))))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(tmp, time)\n if tmp == time:\n memory = tmp_mem\n params = [p, l, p1]\n\n for i in range(len(i_val)):\n if params[i] == i_val[i] - 1:\n stop = False\n i_val[i] += i_val_inc[i]\n\n if stop:\n break\n\n par = {\"l\": params[1], \"p\": params[0], \"p1\": params[2], \"depth\": 2}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef bjmm_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of BJMM algorithm in depth 3\n\n [MMT11] May, A., Meurer, A., Thomae, E.: Decoding random linear codes in 2^(0.054n). In: International Conference\n on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)\n\n [BJMM12] Becker, A., Joux, A., May, A., Meurer, A.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0\n improves information set decoding. In: Annual international conference on the theory and applications of\n cryptographic techniques. pp. 520–536. Springer (2012)\n\n expected weight distribution::\n\n +--------------------------+-------------------+-------------------+\n | <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|\n | w - 2p | p | p |\n +--------------------------+-------------------+-------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import bjmm_depth_3_complexity\n >>> bjmm_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n params = [-1 for _ in range(4)]\n i_val = [25, 400, 20, 10]\n i_val_inc = [10, 10, 10, 10]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2 + (params[0] - i_val_inc[0] // 2) % 2, 0),\n min(w // 2, i_val[0]), 2):\n for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), min(n - k, i_val[1]))):\n k1 = (k + l) // 2\n for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), i_val[2], 2):\n for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2), i_val[3]):\n L1 = binom(k1, p1)\n\n if log2(L1) > time:\n continue\n\n reps1 = (binom(p2, p2 / 2) * binom(k1 - p2, p1 - p2 / 2)) ** 2\n l1 = int((log2(reps1))) if reps1 != 1 else 0\n\n L12 = max(1, L1 ** 2 // 2 ** l1)\n reps2 = (binom(p, p / 2) * binom(k1 - p, p2 - p / 2)) ** 2\n l2 = int(ceil(log2(reps2))) if reps2 != 1 else 0\n\n L1234 = max(1, L12 ** 2 // 2 ** (l2 - l1))\n tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(\n binom((k + l) // 2, p)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12,\n l2 - l1,\n hmap) + _list_merge_complexity(\n L1234,\n l - l2,\n hmap)\n T_rep = int(ceil(2 ** (3 * max(0, l1 - log2(reps1)) + max(0, l2 - log2(reps2)))))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n if tmp < time:\n time = tmp\n memory = tmp_mem\n params = [p, l, p2, p1]\n\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n stop = False\n i_val[i] += i_val_inc[i]\n\n if stop:\n break\n\n par = {\"l\": params[1], \"p\": params[0], \"p1\": params[3], \"p2\": params[2], \"depth\": 3}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef bjmm_depth_2_partially_disjoint_weight_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of BJMM algorithm in depth 2 using partially disjoint weight, applying explicit MitM-NN search on second level\n\n [MMT11] May, A., Meurer, A., Thomae, E.: Decoding random linear codes in 2^(0.054n). In: International Conference\n on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)\n\n [BJMM12] Becker, A., Joux, A., May, A., Meurer, A.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0\n improves information set decoding. In: Annual international conference on the theory and applications of\n cryptographic techniques. pp. 520–536. Springer (2012)\n\n [EssBel21] Esser, A. and Bellini, E.: Syndrome Decoding Estimator. In: IACR Cryptol. ePrint Arch. 2021 (2021), 1243\n\n expected weight distribution::\n\n +--------------------------+--------------------+--------------------+--------+--------+\n | <-+ n - k - l1 - 2 l2 +->|<-+ (k + l1) / 2 +->|<-+ (k + l1) / 2 +->| l2 | l2 |\n | w - 2 p - 2 w2 | p | p | w2 | w2 |\n +--------------------------+--------------------+--------------------+--------+--------+\n\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import bjmm_depth_2_partially_disjoint_weight_complexity\n >>> bjmm_depth_2_partially_disjoint_weight_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [30, 25, 5]\n i_val_inc = [10, 10, 10, 10, 10]\n params = [-1 for _ in range(5)]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for p1 in range(max(params[1] - i_val_inc[1] // 2, (p + 1) // 2), min(w, i_val[1])):\n for w2 in range(max(params[2] - i_val_inc[2] // 2, 0), min(w - p1, i_val[2])):\n\n #############################################################################################\n ######choose start value for l1 close to the logarithm of the number of representations######\n #############################################################################################\n try:\n f = lambda x: log2((binom(p, p // 2) * binom_sp((k + x) / 2 - p, p1 - p // 2))) * 2 - x\n l1_val = int(fsolve(f, 0)[0])\n except:\n continue\n if f(l1_val) < 0 or f(l1_val) > 1:\n continue\n #############################################################################################\n\n for l1 in range(max(0, l1_val - i_val_inc[3] // 2), l1_val + i_val_inc[3] // 2):\n k1 = (k + l1) // 2\n reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2\n\n L1 = binom(k1, p1)\n if log2(L1) > time:\n continue\n\n L12 = L1 ** 2 // 2 ** l1\n L12 = max(L12, 1)\n tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n #################################################################################\n #######choose start value for l2 such that resultlist size is close to L12#######\n #################################################################################\n try:\n f = lambda x: log2(int(L12)) + int(2) * log2(binom_sp(x, int(w2))) - int(2) * x\n l2_val = int(fsolve(f, 0)[0])\n except:\n continue\n if f(l2_val) < 0 or f(l2_val) > 1:\n continue\n ################################################################################\n l2_min = w2\n l2_max = (n - k - l1 - (w - 2 * p - 2 * w2)) // 2\n l2_range = [l2_val - i_val_inc[4] // 2, l2_val + i_val_inc[4] // 2]\n for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):\n Tp = max(\n log2(binom(n, w)) - log2(binom(n - k - l1 - 2 * l2, w - 2 * p - 2 * w2)) - 2 * log2(\n binom(k1, p)) - 2 * log2(binom(l2, w2)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n\n T_tree = 2 * _list_merge_complexity(L1, l1, hmap) + _mitm_nn_complexity(L12, 2 * l2, 2 * w2,\n hmap)\n T_rep = int(ceil(2 ** max(l1 - log2(reps), 0)))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(tmp, time)\n\n if tmp == time:\n memory = tmp_mem\n params = [p, p1, w2, l2, l1]\n\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n i_val[i] += i_val_inc[i]\n stop = False\n if stop:\n break\n break\n\n par = {\"l1\": params[4], \"p\": params[0], \"p1\": params[1], \"depth\": 2, \"l2\": params[3], \"w2\": params[2]}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef bjmm_depth_2_disjoint_weight_complexity(n, k, w, mem=inf, hmap=1, p_range=[0, 25], memory_access=0):\n \"\"\"\n Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search\n\n\n [MMT11] May, A., Meurer, A., Thomae, E.: Decoding random linear codes in 2^(0.054n). In: International Conference\n on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)\n\n [BJMM12] Becker, A., Joux, A., May, A., Meurer, A.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0\n improves information set decoding. In: Annual international conference on the theory and applications of\n cryptographic techniques. pp. 520–536. Springer (2012)\n\n [EssBel21] Esser, A. and Bellini, E.: Syndrome Decoding Estimator. In: IACR Cryptol. ePrint Arch. 2021 (2021), 1243\n \n expected weight distribution::\n\n +---------------------------+-------------+------------+----------+----------+----------+----------+\n |<-+ n - k - 2 l1 - 2 l2 +->|<-+ k / 2 +->|<-+ k / 2 ->|<-+ l1 +->|<-+ l1 +->|<-+ l2 +->|<-+ l2 +->|\n | w - 2 p - 2 w1 - 2 w2 | p | p | w1 | w1 | w2 | w2 |\n +---------------------------+-------------+------------+----------+----------+----------+----------+\n\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``p_range`` -- interval in which the parameter p is searched (default: [0, 25], helps speeding up computation)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import bjmm_depth_2_disjoint_weight_complexity\n >>> bjmm_depth_2_disjoint_weight_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k)\n i_val = [p_range[1], 20, 10, 10, 5]\n i_val_inc = [10, 10, 10, 10, 10, 10, 10]\n params = [-1 for _ in range(7)]\n while True:\n stop = True\n for p in range(max(p_range[0], params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for p1 in range(max(params[1] - i_val_inc[1] // 2, (p + 1) // 2), min(w, i_val[1])):\n s = max(params[2] - i_val_inc[2] // 2, 0)\n for w1 in range(s - (s % 2), min(w // 2 - p, i_val[2]), 2):\n for w11 in range(max(params[3] - i_val_inc[3] // 2, (w1 + 1) // 2), min(w, i_val[3])):\n for w2 in range(max(params[4] - i_val_inc[4] // 2, 0), min(w // 2 - p - w1, i_val[4])):\n ##################################################################################\n ######choose start value for l1 such that representations cancel out exactly######\n ##################################################################################\n try:\n f = lambda x: 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2)) * (\n binom_sp(x, w1 // 2) * binom_sp(x - w1, w11 - w1 // 2)) + 1) - 2 * x\n l1_val = int(\n fsolve(f, 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2))))[0])\n except:\n continue\n if f(l1_val) < 0 or f(l1_val) > 10:\n continue\n #################################################################################\n\n for l1 in range(max(l1_val - i_val_inc[5], w1, w11), l1_val + i_val_inc[5]):\n k1 = k // 2\n reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2 * (\n binom(w1, w1 // 2) * binom(l1 - w1, w11 - w1 // 2)) ** 2\n reps = max(reps, 1)\n L1 = binom(k1, p1)\n if log2(L1) > time:\n continue\n\n L12 = L1 ** 2 * binom(l1, w11) ** 2 // 2 ** (2 * l1)\n L12 = max(L12, 1)\n tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n #################################################################################\n #######choose start value for l2 such that resultlist size is equal to L12#######\n #################################################################################\n try:\n f = lambda x: log2(L12) + 2 * log2(binom_sp(x, w2) + 1) - 2 * x\n l2_val = int(fsolve(f, 50)[0])\n except:\n continue\n if f(l2_val) < 0 or f(l2_val) > 10:\n continue\n ################################################################################\n l2_max = (n - k - 2 * l1 - (w - 2 * p - 2 * w1 - 2 * w2)) // 2\n l2_min = w2\n l2_range = [l2_val - i_val_inc[6] // 2, l2_val + i_val_inc[6] // 2]\n for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):\n Tp = max(\n log2(binom(n, w)) - log2(\n binom(n - k - 2 * l1 - 2 * l2, w - 2 * p - 2 * w1 - 2 * w2)) - 2 * log2(\n binom(k1, p)) - 2 * log2(binom(l1, w1)) - 2 * log2(\n binom(l2, w2)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n\n T_tree = 2 * _mitm_nn_complexity(L1, 2 * l1, 2 * w11, hmap) + _mitm_nn_complexity(\n L12, 2 * l2, 2 * w2, hmap)\n T_rep = int(ceil(2 ** max(2 * l1 - log2(reps), 0)))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(tmp, time)\n\n if tmp == time:\n memory = tmp_mem\n params = [p, p1, w1, w11, w2, l2, l1 + l2]\n\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n i_val[i] += i_val_inc[i]\n stop = False\n if stop:\n break\n break\n par = {\"l\": params[6], \"p\": params[0], \"p1\": params[1], \"w1\": params[2], \"w11\": params[3], \"l2\": params[5],\n \"w2\": params[4], \"depth\": 2}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef both_may_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of Both-May algorithm in depth 2 using Indyk-Motwani and MitM for NN search\n\n [BotMay18] Both, L., May, A.: Decoding linear codes with high error rate and its impact for LPN security. In:\n International Conference on Post-Quantum Cryptography. pp. 25--46. Springer (2018)\n\n expected weight distribution::\n\n +-------------------+---------+-------------------+-------------------+\n | <--+ n - k - l+-->|<-+ l +->|<----+ k / 2 +---->|<----+ k / 2 +---->|\n | w - w2 - 2p | w2 | p | p |\n +-------------------+---------+-------------------+-------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import both_may_depth_2_complexity\n >>> both_may_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [20, 160, 5, 4, 15]\n i_val_inc = [10, 10, 10, 6, 6]\n params = [-1 for _ in range(5)]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):\n for w1 in range(max(params[2] - i_val_inc[2] // 2, 0), min(w, l + 1, i_val[2])):\n for w2 in range(max(params[3] - i_val_inc[3] // 2, 0), min(w - 2 * p, l + 1, i_val[3], 2 * w1), 2):\n for p1 in range(max(params[4] - i_val_inc[4] // 2, (p + 1) // 2), min(w, i_val[4])):\n k1 = (k) // 2\n reps = (binom(p, p / 2) * binom(k1 - p, p1 - p / 2)) ** 2 * binom(w2, w2 / 2) * binom(\n l - w2,\n w1 - w2 / 2)\n reps = 1 if reps == 0 else reps\n L1 = binom(k1, p1)\n\n if log2(L1) > time:\n continue\n\n L12 = max(1, L1 ** 2 * binom(l, w1) // 2 ** l)\n\n tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - w2 - 2 * p)) - 2 * log2(\n binom(k1, p)) - log2(binom(l, w2)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n\n first_level_nn = _indyk_motwani_complexity(L1, l, w1, hmap)\n second_level_nn = _indyk_motwani_complexity(L12, n - k - l, w - 2 * p - w2, hmap)\n T_tree = 2 * first_level_nn + second_level_nn\n T_rep = int(ceil(2 ** max(0, l - log2(reps))))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(tmp, time)\n\n if tmp == time:\n memory = tmp_mem\n params = [p, l, w1, w2, p1, log2(L1), log2(L12)]\n\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n i_val[i] += i_val_inc[i]\n stop = False\n if stop:\n break\n\n par = {\"l\": params[1], \"p\": params[0], \"p1\": params[4], \"w1\": params[2], \"w2\": params[3], \"depth\": 2}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef may_ozerov_complexity(n, k, w, mem=inf, hmap=1, only_depth_two=0, memory_access=0):\n \"\"\"\n Complexity estimate of May-Ozerov algorithm using Indyk-Motwani for NN search\n\n [MayOze15] May, A. and Ozerov, I.: On computing nearest neighbors with applications to decoding of binary linear codes.\n In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)\n\n expected weight distribution::\n\n +-------------------------+---------------------+---------------------+\n | <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|\n | w - 2p | p | p |\n +-------------------------+---------------------+---------------------+\n\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import may_ozerov_complexity\n >>> may_ozerov_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n d2 = may_ozerov_depth_2_complexity(n, k, w, mem, hmap, memory_access)\n d3 = may_ozerov_depth_3_complexity(n, k, w, mem, hmap, memory_access)\n return d2 if d2[\"time\"] < d3[\"time\"] or only_depth_two else d3\n\n\ndef may_ozerov_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search\n\n [MayOze15] May, A. and Ozerov, I.: On computing nearest neighbors with applications to decoding of binary linear codes.\n In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)\n\n expected weight distribution::\n\n +-------------------------+---------------------+---------------------+\n | <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|\n | w - 2p | p | p |\n +-------------------------+---------------------+---------------------+\n\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import may_ozerov_depth_2_complexity\n >>> may_ozerov_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [30, 300, 25]\n i_val_inc = [10, 10, 10]\n params = [-1 for _ in range(3)]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):\n for p1 in range(max(params[2] - i_val_inc[2] // 2, (p + 1) // 2), min(w, i_val[2])):\n k1 = (k + l) // 2\n reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2\n\n L1 = binom(k1, p1)\n if log2(L1) > time:\n continue\n\n L12 = L1 ** 2 // 2 ** l\n L12 = max(L12, 1)\n tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(\n log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions, 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n\n T_tree = 2 * _list_merge_complexity(L1, l, hmap) + _indyk_motwani_complexity(L12,\n n - k - l,\n w - 2 * p,\n hmap)\n T_rep = int(ceil(2 ** max(l - log2(reps), 0)))\n\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n time = min(tmp, time)\n\n if tmp == time:\n memory = tmp_mem\n params = [p, l, p1]\n\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n i_val[i] += i_val_inc[i]\n stop = False\n if stop:\n break\n break\n\n par = {\"l\": params[1], \"p\": params[0], \"p1\": params[2], \"depth\": 2}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n return res\n\n\ndef may_ozerov_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):\n \"\"\"\n Complexity estimate of May-Ozerov algorithm in depth 3 using Indyk-Motwani for NN search\n\n [MayOze15] May, A. and Ozerov, I.: On computing nearest neighbors with applications to decoding of binary linear codes.\n In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)\n\n expected weight distribution::\n\n +-------------------------+---------------------+---------------------+\n | <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|\n | w - 2p | p | p |\n +-------------------------+---------------------+---------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``mem`` -- upper bound on the available memory (as log2), default unlimited\n - ``hmap`` -- indicates if hashmap is being used (default: true)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import may_ozerov_depth_3_complexity\n >>> may_ozerov_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n solutions = max(0, log2(binom(n, w)) - (n - k))\n time = inf\n memory = 0\n r = _optimize_m4ri(n, k, mem)\n\n i_val = [20, 200, 20, 10]\n i_val_inc = [10, 10, 10, 10]\n params = [-1 for _ in range(4)]\n while True:\n stop = True\n for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):\n for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):\n k1 = (k + l) // 2\n for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), p + i_val[2], 2):\n for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2),\n min(p2 + i_val[3], k1 - p2 // 2)):\n L1 = binom(k1, p1)\n if log2(L1) > time:\n continue\n\n reps1 = (binom(p2, p2 // 2) * binom(k1 - p2, p1 - p2 // 2)) ** 2\n l1 = int(ceil(log2(reps1)))\n\n if l1 > l:\n continue\n L12 = max(1, L1 ** 2 // 2 ** l1)\n reps2 = (binom(p, p // 2) * binom(k1 - p, p2 - p // 2)) ** 2\n\n L1234 = max(1, L12 ** 2 // 2 ** (l - l1))\n tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r))\n if tmp_mem > mem:\n continue\n\n Tp = max(\n log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions,\n 0)\n Tg = _gaussian_elimination_complexity(n, k, r)\n T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12,\n l - l1,\n hmap) + _indyk_motwani_complexity(\n L1234,\n n - k - l,\n w - 2 * p,\n hmap)\n T_rep = int(ceil(2 ** (max(l - log2(reps2), 0) + 3 * max(l1 - log2(reps1), 0))))\n tmp = Tp + log2(Tg + T_rep * T_tree)\n tmp += __memory_access_cost(tmp_mem, memory_access)\n\n if tmp < time:\n time = tmp\n memory = tmp_mem\n params = [p, l, p2, p1]\n for i in range(len(i_val)):\n if params[i] >= i_val[i] - i_val_inc[i] / 2:\n i_val[i] += i_val_inc[i]\n stop = False\n if stop:\n break\n break\n par = {\"l\": params[1], \"p\": params[0], \"p1\": params[3], \"p2\": params[2], \"depth\": 3}\n res = {\"time\": time, \"memory\": memory, \"parameters\": par}\n\n return res\n\n\ndef quantum_prange_complexity(n, k, w, maxdepth=96, matrix_mult_constant=2.5):\n \"\"\"\n Optimistic complexity estimate of quantum version of Prange's algorithm\n\n [Pra62] Prange, E.: The use of information sets in decoding cyclic codes. IRE Transactions\n on Information Theory 8(5), 5–9 (1962)\n\n [Ber10] Bernstein, D.J.: Grover vs. McEliece. In: International Workshop on Post-QuantumCryptography.\n pp. 73–80. Springer (2010)\n\n expected weight distribution::\n\n +--------------------------------+-------------------------------+\n | <----------+ n - k +---------> | <----------+ k +------------> |\n | w | 0 |\n +--------------------------------+-------------------------------+\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``maxdepth`` -- maximum allowed depth of the quantum circuit (default: 96)\n - ``matrix_mult_constant`` -- used matrix multiplication constant (default: 2.5)\n\n\n EXAMPLES::\n\n >>> from .estimator import quantum_prange_complexity\n >>> quantum_prange_complexity(n=100,k=50,w=10) # doctest: +SKIP\n\n \"\"\"\n\n Tg = matrix_mult_constant * log2(n - k)\n if Tg > maxdepth:\n return 0\n\n full_circuit = Tg + (log2(binom(n, w)) - log2(binom(n - k, w))) / 2\n if full_circuit < maxdepth:\n return full_circuit\n\n time = log2(binom(n, w)) - log2(binom(n - k, w)) + 2 * Tg - maxdepth\n return time\n\n\ndef sd_estimate_display(n, k, w, memory_limit=inf, bit_complexities=1, hmap=1, skip=[\"BJMM-dw\"], precision=1,\n truncate=0,\n all_parameters=0, theoretical_estimates=0, use_mo=1, workfactor_accuracy=1, limit_depth=0,\n quantum_estimates=1,\n maxdepth=96, matrix_mult_constant=2.5, memory_access=0):\n \"\"\"\n Output estimates of complexity to solve the syndrome decoding problem\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``memory_limit`` -- upper bound on the available memory (in log2) (default: unlimited)\n - ``bit_complexities`` -- state security level in number of bitoperations, otherwise field operations (default: true)\n - ``hmap`` -- indicates if hashmap is used for sorting lists (default: true)\n - ``skip`` -- list of algorithms not to consider (default: [\"BJMM-dw\"] (this variant will take a long time to optimize))\n - ``precision`` -- amount of decimal places displayed for complexity estimates (default: 1)\n - ``truncate`` -- decimal places exceeding ``precision`` are truncated, otherwise rounded (default: false)\n - ``all_parameters`` -- print values of all hyperparameters (default: false)\n - ``theoretical_estimates`` -- compute theoretical workfactors for all algorithms (default: false)\n - ``use_mo`` -- use may-ozerov nearest neighbor search in theoretical workfactor computation (default: true)\n - ``workfactor_accuracy`` -- the higher the more accurate the workfactor computation, can slow down computations significantly, recommended range 0-2 (needs to be larger than 0) (default: 1)\n - ``limit_depth`` -- restricts BJMM and May-Ozerov algorithms to depth two only (default: false)\n - ``quantum_estimates`` -- compute quantum estimates of all algorithms (default: true)\n - ``maxdepth`` -- maximum allowed depth of the quantum circuit (default: 96)\n - ``matrix_mult_constant`` -- used matrix multiplication constant (default: 2.5)\n - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)\n\n EXAMPLES::\n\n >>> from .estimator import *\n >>> sd_estimate_display(n=600,k=400,w=22)\n =========================================================================\n Complexity estimation to solve the (600,400,22) syndrome decoding problem\n =========================================================================\n The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\n The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).\n +----------------+---------------+---------+\n | | estimate | quantum |\n +----------------+------+--------+---------+\n | algorithm | time | memory | time |\n +----------------+------+--------+---------+\n | Prange | 60.1 | 17.3 | 37.1 |\n | Stern | 47.0 | 24.5 | -- |\n | Dumer | 47.6 | 24.6 | -- |\n | Ball Collision | 47.7 | 24.5 | -- |\n | BJMM (MMT) | 47.6 | 22.7 | -- |\n | BJMM-pdw | 47.7 | 21.7 | -- |\n | May-Ozerov | 46.5 | 22.6 | -- |\n | Both-May | 47.1 | 22.6 | -- |\n +----------------+------+--------+---------+\n\n\n >>> from .estimator import *\n >>> sd_estimate_display(n=1000,k=500,w=100,all_parameters=1,theoretical_estimates=1,precision=2) # long time\n ===========================================================================\n Complexity estimation to solve the (1000,500,100) syndrome decoding problem\n ===========================================================================\n The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\n The approximation is based on the theoretical workfactor of the respective algorithms, disregarding all polynomial factors and using further approximations that introduce additional polynomial inaccurcies.\n The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).\n +----------------+-----------------+-----------------+---------+--------------------------------------------------------------------+\n | | estimate | approximation | quantum | parameters |\n +----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+\n | algorithm | time | memory | time | memory | time | classical |\n +----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+\n | Prange | 134.46 | 19.26 | 108.03 | 0.00 | 76.39 | r : 7 |\n | Stern | 117.04 | 38.21 | 104.02 | 31.39 | -- | l : 27 | p : 4 |\n | Dumer | 116.82 | 38.53 | 103.76 | 33.68 | -- | l : 28 | p : 4 |\n | Ball Collision | 117.04 | 38.21 | 103.76 | 32.67 | -- | l : 27 | p : 4 | pl : 0 |\n | BJMM (MMT) | 112.39 | 73.15 | 90.17 | 67.76 | -- | l : 120 | p : 16 | p1 : 10 | depth : 2 |\n | BJMM-pdw | 113.92 | 52.74 | -- | -- | -- | l1 : 35 | p : 10 | p1 : 6 | depth : 2 | l2 : 21 | w2 : 0 |\n | May-Ozerov | 111.56 | 70.44 | 89.51 | 51.39 | -- | l : 69 | p : 14 | p1 : 10 | depth : 2 |\n | Both-May | 113.68 | 68.58 | 87.60 | 64.13 | -- | l : 75 | p : 14 | p1 : 10 | w1 : 2 | w2 : 2 | depth : 2 |\n +----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+\n\n\n TESTS::\n\n >>> from .estimator import *\n >>> sd_estimate_display(24646,12323,142,all_parameters=True) # long time\n ==============================================================================\n Complexity estimation to solve the (24646,12323,142) syndrome decoding problem\n ==============================================================================\n The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\n The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).\n +----------------+----------------+---------+--------------------------------------------------------------------+\n | | estimate | quantum | parameters |\n +----------------+-------+--------+---------+--------------------------------------------------------------------+\n | algorithm | time | memory | time | classical |\n +----------------+-------+--------+---------+--------------------------------------------------------------------+\n | Prange | 182.1 | 28.4 | 114.5 | r : 11 |\n | Stern | 160.6 | 39.8 | -- | l : 33 | p : 2 |\n | Dumer | 161.1 | 39.8 | -- | l : 28 | p : 2 |\n | Ball Collision | 161.1 | 39.8 | -- | l : 28 | p : 2 | pl : 0 |\n | BJMM (MMT) | 160.9 | 54.2 | -- | l : 74 | p : 4 | p1 : 3 | depth : 2 |\n | BJMM-pdw | 160.9 | 55.0 | -- | l1 : 30 | p : 4 | p1 : 3 | depth : 2 | l2 : 22 | w2 : 0 |\n | May-Ozerov | 160.4 | 55.0 | -- | l : 30 | p : 4 | p1 : 3 | depth : 2 |\n | Both-May | 161.1 | 37.8 | -- | l : 4 | p : 2 | p1 : 1 | w1 : 1 | w2 : 0 | depth : 2 |\n +----------------+-------+--------+---------+--------------------------------------------------------------------+\n\n\n >>> from .estimator import *\n >>> sd_estimate_display(300,200,20,all_parameters=True, skip=[])\n =========================================================================\n Complexity estimation to solve the (300,200,20) syndrome decoding problem\n =========================================================================\n The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\n The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).\n +----------------+---------------+---------+-------------------------------------------------------------------------------------------+\n | | estimate | quantum | parameters |\n +----------------+------+--------+---------+-------------------------------------------------------------------------------------------+\n | algorithm | time | memory | time | classical |\n +----------------+------+--------+---------+-------------------------------------------------------------------------------------------+\n | Prange | 52.5 | 15.3 | 33.5 | r : 5 |\n | Stern | 40.7 | 21.5 | -- | l : 13 | p : 2 |\n | Dumer | 41.1 | 26.9 | -- | l : 18 | p : 3 |\n | Ball Collision | 41.3 | 21.5 | -- | l : 12 | p : 2 | pl : 0 |\n | BJMM (MMT) | 41.1 | 27.5 | -- | l : 25 | p : 4 | p1 : 2 | depth : 2 |\n | BJMM-pdw | 41.3 | 18.9 | -- | l1 : 3 | p : 2 | p1 : 1 | depth : 2 | l2 : 4 | w2 : 0 |\n | BJMM-dw | 41.3 | 19.7 | -- | l : 6 | p : 2 | p1 : 1 | w1 : 0 | w11 : 1 | l2 : 5 | w2 : 0 | depth : 2 |\n | May-Ozerov | 40.1 | 19.7 | -- | l : 2 | p : 2 | p1 : 1 | depth : 2 |\n | Both-May | 40.4 | 19.7 | -- | l : 2 | p : 2 | p1 : 1 | w1 : 2 | w2 : 0 | depth : 2 |\n +----------------+------+--------+---------+-------------------------------------------------------------------------------------------+\n\n\n\n \"\"\"\n\n complexities = _sd_estimate(n, k, w, theoretical_estimates, memory_limit, bit_complexities, hmap, skip, use_mo,\n workfactor_accuracy, limit_depth, quantum_estimates, maxdepth, matrix_mult_constant,\n memory_access)\n\n headline = \"Complexity estimation to solve the ({},{},{}) syndrome decoding problem\".format(n, k, w)\n print(\"=\" * len(headline))\n print(headline)\n print(\"=\" * len(headline))\n if bit_complexities:\n print(\n \"The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\")\n else:\n print(\n \"The following table states complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.\")\n print(\"The time complexity estimate is measured in the number of additions in (F_2)^n.\")\n print(\"The memory complexity estimate is given in the number of vector space elements that need to be stored.\")\n\n if theoretical_estimates:\n print(\n \"The approximation is based on the theoretical workfactor of the respective algorithms, disregarding all polynomial factors and using further approximations that introduce additional polynomial inaccurcies.\")\n if quantum_estimates:\n print(\n \"The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).\")\n tables = []\n table_fields = ['algorithm']\n\n tbl_names = PrettyTable(table_fields)\n tbl_names.padding_width = 1\n tbl_names.title = ' '\n\n for i in complexities.keys():\n tbl_names.add_row([i])\n tbl_names.align[\"algorithm\"] = \"l\"\n tables.append(tbl_names)\n\n table_fields = ['time', 'memory']\n tbl_estimates = PrettyTable(table_fields)\n tbl_estimates.padding_width = 1\n tbl_estimates.title = 'estimate'\n tbl_estimates.align[\"time\"] = \"r\"\n tbl_estimates.align[\"memory\"] = \"r\"\n for i in complexities.keys():\n if complexities[i][\"time\"] != inf:\n T, M = __round_or_truncate_to_given_precision(complexities[i][\"time\"], complexities[i][\"memory\"], truncate,\n precision)\n else:\n T, M = \"--\", \"--\"\n tbl_estimates.add_row([T, M])\n\n tables.append(tbl_estimates)\n\n if theoretical_estimates:\n table_fields = ['time', 'memory']\n tbl_approx = PrettyTable(table_fields)\n tbl_approx.padding_width = 1\n tbl_approx.title = 'approximation'\n tbl_approx.align[\"time\"] = \"r\"\n tbl_approx.align[\"memory\"] = \"r\"\n\n for i in complexities.keys():\n if complexities[i][\"Workfactor time\"] != 0:\n T, M = __round_or_truncate_to_given_precision(complexities[i][\"Workfactor time\"] * n,\n complexities[i][\"Workfactor memory\"] * n, truncate,\n precision)\n else:\n T, M = \"--\", \"--\"\n tbl_approx.add_row([T, M])\n\n tables.append(tbl_approx)\n\n if quantum_estimates:\n table_fields = [' time']\n tbl_quantum = PrettyTable(table_fields)\n tbl_quantum.padding_width = 1\n tbl_quantum.title = \"quantum\"\n tbl_quantum.align[\"time\"] = \"r\"\n for i in complexities.keys():\n if \"quantum time\" in complexities[i].keys() and complexities[i][\"quantum time\"] != 0:\n T, M = __round_or_truncate_to_given_precision(complexities[i][\"quantum time\"], 0, truncate, precision)\n else:\n T = \"--\"\n tbl_quantum.add_row([T])\n tables.append(tbl_quantum)\n\n if all_parameters:\n table_fields = ['classical']\n tbl_params = PrettyTable(table_fields)\n tbl_params.padding_width = 1\n tbl_params.title = \"parameters\"\n tbl_params.align['classical'] = \"l\"\n\n for i in complexities.keys():\n row = \"\"\n for j in complexities[i][\"parameters\"].keys():\n row += \"{:<{align}}\".format(j, align=max(2, len(j))) + \" : \" + '{:3d}'.format(\n complexities[i][\"parameters\"][j]) + \" | \"\n tbl_params.add_row([row[:-3]])\n\n tables.append(tbl_params)\n\n tbl_join = __concat_pretty_tables(str(tables[0]), str(tables[1]))\n for i in range(2, len(tables)):\n tbl_join = __concat_pretty_tables(tbl_join, str(tables[i]))\n\n print(tbl_join)\n\n\ndef _add_theoretical_estimates(complexities, n, k, w, memory_limit, skip, use_mo, workfactor_accuracy):\n rate = k / n\n omega = w / n\n\n grid_std_accuracy = {\"prange\": [20, 150], \"stern\": [20, 150], \"dumer\": [20, 150], \"ball_collision\": [15, 150],\n \"bjmm\": [10, 250], \"may-ozerov\": [5, 1000], \"both-may\": [5, 1000]}\n\n if workfactor_accuracy != 1:\n for i in grid_std_accuracy.keys():\n for j in range(2):\n grid_std_accuracy[i][j] = int(ceil(grid_std_accuracy[i][j] * workfactor_accuracy))\n\n for i in complexities.keys():\n complexities[i][\"Workfactor time\"] = 0\n complexities[i][\"Workfactor memory\"] = 0\n\n nr_algorithms = 7 - len(skip)\n nr_algorithms += 1 if \"BJMM-dw\" in skip else 0\n nr_algorithms += 1 if \"BJMM-p-dw\" in skip or \"BJMM-pdw\" in skip else 0\n bar = Bar('Computing theoretical workfactors\\t', max=nr_algorithms)\n\n if \"prange\" not in skip:\n T, M = prange_workfactor(rate, omega, grid_std_accuracy[\"prange\"][0], grid_std_accuracy[\"prange\"][1],\n memory_limit)\n complexities[\"Prange\"][\"Workfactor time\"] = T\n complexities[\"Prange\"][\"Workfactor memory\"] = M\n bar.next()\n if \"stern\" not in skip:\n T, M = stern_workfactor(rate, omega, grid_std_accuracy[\"stern\"][0], grid_std_accuracy[\"stern\"][1], memory_limit)\n complexities[\"Stern\"][\"Workfactor time\"] = T\n complexities[\"Stern\"][\"Workfactor memory\"] = M\n bar.next()\n if \"dumer\" not in skip:\n T, M = dumer_workfactor(rate, omega, grid_std_accuracy[\"dumer\"][0], grid_std_accuracy[\"dumer\"][1], memory_limit)\n complexities[\"Dumer\"][\"Workfactor time\"] = T\n complexities[\"Dumer\"][\"Workfactor memory\"] = M\n bar.next()\n if \"ball_collision\" not in skip:\n T, M = ball_collision_workfactor(rate, omega, grid_std_accuracy[\"ball_collision\"][0],\n grid_std_accuracy[\"ball_collision\"][1], memory_limit)\n complexities[\"Ball Collision\"][\"Workfactor time\"] = T\n complexities[\"Ball Collision\"][\"Workfactor memory\"] = M\n bar.next()\n if \"BJMM\" not in skip and \"MMT\" not in skip:\n T, M = bjmm_workfactor(rate, omega, grid_std_accuracy[\"bjmm\"][0], grid_std_accuracy[\"bjmm\"][1], memory_limit)\n complexities[\"BJMM (MMT)\"][\"Workfactor time\"] = T\n complexities[\"BJMM (MMT)\"][\"Workfactor memory\"] = M\n bar.next()\n if \"MO\" not in skip and \"May-Ozerov\" not in skip:\n T, M = may_ozerov_workfactor(rate, omega, grid_std_accuracy[\"may-ozerov\"][0],\n grid_std_accuracy[\"may-ozerov\"][1], memory_limit, use_mo)\n complexities[\"May-Ozerov\"][\"Workfactor time\"] = T\n complexities[\"May-Ozerov\"][\"Workfactor memory\"] = M\n bar.next()\n if \"BM\" not in skip and \"Both-May\" not in skip:\n T, M = both_may_workfactor(rate, omega, grid_std_accuracy[\"both-may\"][0], grid_std_accuracy[\"both-may\"][1],\n memory_limit, use_mo)\n complexities[\"Both-May\"][\"Workfactor time\"] = T\n complexities[\"Both-May\"][\"Workfactor memory\"] = M\n bar.next()\n\n bar.finish()\n\n\ndef _sd_estimate(n, k, w, theoretical_estimates, memory_limit, bit_complexities, hmap, skip, use_mo,\n workfactor_accuracy, limit_depth, quantum_estimates, maxdepth, matrix_mult_constant, memory_access):\n \"\"\"\n Estimate complexity to solve syndrome decoding problem\n\n INPUT:\n\n - ``n`` -- length of the code\n - ``k`` -- dimension of the code\n - ``w`` -- Hamming weight of error vector\n - ``memory_limit`` -- upper bound on the available memory (as log2(bits))\n - ``hmap`` -- indicates if hashmap should be used for sorting lists\n - ``skip`` -- list of algorithms not to consider\n - ``use_mo`` -- use may-ozerov nearest neighbor search in theoretical workfactor computation\n - ``workfactor_accuracy`` -- the higher the more accurate the workfactor computation, can slow down computations significantly, recommended range 0-2 (needs to be larger than 0)\n\n \"\"\"\n\n complexities = {}\n if bit_complexities:\n memory_limit -= log2(n)\n\n nr_algorithms = 9 - len(skip)\n bar = Bar('Computing estimates\\t\\t\\t', max=nr_algorithms)\n\n if \"prange\" not in skip:\n complexities[\"Prange\"] = prange_complexity(n, k, w, mem=memory_limit, memory_access=memory_access)\n if quantum_estimates:\n complexities[\"Prange\"][\"quantum time\"] = quantum_prange_complexity(n, k, w, maxdepth=maxdepth,\n matrix_mult_constant=matrix_mult_constant)\n bar.next()\n\n if \"stern\" not in skip:\n complexities[\"Stern\"] = stern_complexity(n, k, w, mem=memory_limit, hmap=hmap, memory_access=memory_access)\n bar.next()\n if \"dumer\" not in skip:\n complexities[\"Dumer\"] = dumer_complexity(n, k, w, mem=memory_limit, hmap=hmap, memory_access=memory_access)\n bar.next()\n if \"ball_collision\" not in skip:\n complexities[\"Ball Collision\"] = ball_collision_decoding_complexity(n, k, w, mem=memory_limit, hmap=hmap,\n memory_access=memory_access)\n bar.next()\n if \"BJMM\" not in skip and \"MMT\" not in skip:\n complexities[\"BJMM (MMT)\"] = bjmm_complexity(n, k, w, mem=memory_limit, hmap=hmap, only_depth_two=limit_depth,\n memory_access=memory_access)\n bar.next()\n if \"BJMM-pdw\" not in skip and \"BJMM-p-dw\" not in skip:\n complexities[\"BJMM-pdw\"] = bjmm_depth_2_partially_disjoint_weight_complexity(n, k, w, mem=memory_limit,\n hmap=hmap,\n memory_access=memory_access)\n bar.next()\n if \"BJMM-dw\" not in skip:\n complexities[\"BJMM-dw\"] = bjmm_depth_2_disjoint_weight_complexity(n, k, w, mem=memory_limit, hmap=hmap,\n memory_access=memory_access)\n bar.next()\n if \"MO\" not in skip and \"May-Ozerov\" not in skip:\n complexities[\"May-Ozerov\"] = may_ozerov_complexity(n, k, w, mem=memory_limit, hmap=hmap,\n only_depth_two=limit_depth, memory_access=memory_access)\n bar.next()\n if \"BM\" not in skip and \"Both-May\" not in skip:\n complexities[\"Both-May\"] = both_may_depth_2_complexity(n, k, w, mem=memory_limit, hmap=hmap,\n memory_access=memory_access)\n bar.next()\n\n bar.finish()\n if theoretical_estimates:\n _add_theoretical_estimates(complexities, n, k, w, memory_limit, skip, use_mo, workfactor_accuracy)\n\n if bit_complexities:\n field_op = log2(n)\n for i in complexities.keys():\n complexities[i][\"time\"] += field_op\n complexities[i][\"memory\"] += field_op\n\n return complexities\n"
] | [
[
"scipy.special.binom",
"scipy.optimize.fsolve"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Siddhant085/tensorflow | [
"6f6161a0110d99b2655efc9d933b753dadadbc38"
] | [
"tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for GBDT estimator.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tempfile\nfrom tensorflow.contrib.boosted_trees.estimator_batch import estimator\nfrom tensorflow.contrib.boosted_trees.proto import learner_pb2\nfrom tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.feature_column import feature_column_lib as core_feature_column\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\n\n\ndef _train_input_fn():\n features = {\"x\": constant_op.constant([[2.], [1.], [1.]])}\n label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)\n return features, label\n\n\ndef _ranking_train_input_fn():\n features = {\n \"a.f1\": constant_op.constant([[3.], [0.3], [1.]]),\n \"a.f2\": constant_op.constant([[0.1], [3.], [1.]]),\n \"b.f1\": constant_op.constant([[13.], [0.4], [5.]]),\n \"b.f2\": constant_op.constant([[1.], [3.], [0.01]]),\n }\n label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)\n return features, label\n\n\ndef _eval_input_fn():\n features = {\"x\": constant_op.constant([[1.], [2.], [2.]])}\n label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)\n return features, label\n\n\ndef _infer_ranking_train_input_fn():\n features = {\n \"f1\": constant_op.constant([[3.], [2], [1.]]),\n \"f2\": constant_op.constant([[0.1], [3.], [1.]])\n }\n return features, None\n\n\nclass BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._export_dir_base = tempfile.mkdtemp() + \"export/\"\n gfile.MkDir(self._export_dir_base)\n\n def testFitAndEvaluateDontThrowException(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n classifier = estimator.GradientBoostedDecisionTreeClassifier(\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[contrib_feature_column.real_valued_column(\"x\")])\n\n classifier.fit(input_fn=_train_input_fn, steps=15)\n classifier.evaluate(input_fn=_eval_input_fn, steps=1)\n classifier.export(self._export_dir_base)\n\n def testThatLeafIndexIsInPredictions(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n classifier = estimator.GradientBoostedDecisionTreeClassifier(\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[contrib_feature_column.real_valued_column(\"x\")],\n output_leaf_index=True)\n\n classifier.fit(input_fn=_train_input_fn, steps=15)\n result_iter = classifier.predict(input_fn=_eval_input_fn)\n for prediction_dict in result_iter:\n self.assertTrue(\"leaf_index\" in prediction_dict)\n self.assertTrue(\"logits\" in prediction_dict)\n\n def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n # Use core head\n head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n\n model = estimator.GradientBoostedDecisionTreeEstimator(\n head=head_fn,\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[core_feature_column.numeric_column(\"x\")],\n use_core_libs=True)\n\n model.fit(input_fn=_train_input_fn, steps=15)\n model.evaluate(input_fn=_eval_input_fn, steps=1)\n model.export(self._export_dir_base)\n\n def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n classifier = estimator.GradientBoostedDecisionTreeClassifier(\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[core_feature_column.numeric_column(\"x\")],\n use_core_libs=True)\n\n classifier.fit(input_fn=_train_input_fn, steps=15)\n classifier.evaluate(input_fn=_eval_input_fn, steps=1)\n classifier.export(self._export_dir_base)\n\n def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n regressor = estimator.GradientBoostedDecisionTreeRegressor(\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[core_feature_column.numeric_column(\"x\")],\n use_core_libs=True)\n\n regressor.fit(input_fn=_train_input_fn, steps=15)\n regressor.evaluate(input_fn=_eval_input_fn, steps=1)\n regressor.export(self._export_dir_base)\n\n def testRankingDontThrowExceptionForForEstimator(self):\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)\n\n model = estimator.GradientBoostedDecisionTreeRanker(\n head=head_fn,\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n use_core_libs=True,\n feature_columns=[\n core_feature_column.numeric_column(\"f1\"),\n core_feature_column.numeric_column(\"f2\")\n ],\n ranking_model_pair_keys=(\"a\", \"b\"))\n\n model.fit(input_fn=_ranking_train_input_fn, steps=1000)\n model.evaluate(input_fn=_ranking_train_input_fn, steps=1)\n model.predict(input_fn=_infer_ranking_train_input_fn)\n\n\nclass CoreGradientBoostedDecisionTreeEstimator(test_util.TensorFlowTestCase):\n\n def testTrainEvaluateInferDoesNotThrowError(self):\n head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)\n\n learner_config = learner_pb2.LearnerConfig()\n learner_config.num_classes = 2\n learner_config.constraints.max_tree_depth = 1\n model_dir = tempfile.mkdtemp()\n config = run_config.RunConfig()\n\n est = estimator.CoreGradientBoostedDecisionTreeEstimator(\n head=head_fn,\n learner_config=learner_config,\n num_trees=1,\n examples_per_layer=3,\n model_dir=model_dir,\n config=config,\n feature_columns=[core_feature_column.numeric_column(\"x\")])\n\n # Train for a few steps.\n est.train(input_fn=_train_input_fn, steps=1000)\n est.evaluate(input_fn=_eval_input_fn, steps=1)\n est.predict(input_fn=_eval_input_fn)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"
] | [
[
"tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"tensorflow.python.platform.gfile.MkDir",
"tensorflow.python.feature_column.feature_column_lib.numeric_column",
"tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig",
"tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig",
"tensorflow.python.platform.googletest.main",
"tensorflow.contrib.layers.python.layers.feature_column.real_valued_column",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
aliabdelkader/FusionTransformer | [
"4175e13a3633a6c6b8b2aa44beb94a89acf7307f"
] | [
"FusionTransformer/common/utils/torch_util.py"
] | [
"import random\nimport numpy as np\nimport torch\n\n\ndef set_random_seed(seed):\n if seed < 0:\n return\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\ndef worker_init_fn(worker_id):\n \"\"\"The function is designed for pytorch multi-process dataloader.\n Note that we use the pytorch random generator to generate a base_seed.\n Please try to be consistent.\n\n References:\n https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed\n\n \"\"\"\n base_seed = torch.IntTensor(1).random_().item()\n # print(worker_id, base_seed)\n np.random.seed(base_seed + worker_id)\n\ndef dist_worker_init_fn(worker_id):\n\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)"
] | [
[
"torch.initial_seed",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.IntTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
beiyan1911/conditional_aia_generation | [
"0ace640d6e8dae41b63f26809a494b88cc3718e2",
"0ace640d6e8dae41b63f26809a494b88cc3718e2"
] | [
"models/base_model.py",
"models/cgan_model.py"
] | [
"import os\nfrom abc import ABC, abstractmethod\nfrom collections import OrderedDict\nimport torch.optim as optim\nimport torch\nfrom models import net_utils\n\n\nclass BaseModel(ABC):\n\n def __init__(self, config):\n self.config = config\n self.isTrain = config.isTrain\n self.device = config.device\n torch.backends.cudnn.benchmark = True\n self.loss_names = []\n self.model_names = []\n self.visual_names = []\n self.optimizers = []\n self.loss_stack = OrderedDict()\n self.metric = 0 # used for learning rate policy 'plateau'\n\n # 设置输入数据\n @abstractmethod\n def set_input(self, input):\n pass\n\n @abstractmethod\n def forward(self):\n pass\n\n @abstractmethod\n def optimize_parameters(self):\n pass\n\n def setup(self, opt):\n \"\"\"\n create schedulers\n \"\"\"\n if self.isTrain:\n self.schedulers = [optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.epoch_num, eta_min=1e-6) for\n optimizer in self.optimizers]\n\n if opt.resume:\n for per_scheduler in self.schedulers:\n per_scheduler.step(opt.resume_count)\n print('re-adjust learning rate')\n\n @abstractmethod\n def test(self):\n pass\n\n def get_lr(self):\n lr = self.optimizers[0].param_groups[0]['lr']\n return lr\n\n def update_learning_rate(self, epoch):\n \"\"\"Update learning rates for all the models; called at the end of every epoch\"\"\"\n for scheduler in self.schedulers:\n scheduler.step()\n # lr = self.optimizers[0].param_groups[0]['lr']\n # print('learning rate = %.7f' % lr)\n\n # 返回输出结果\n def get_current_np_outputs(self):\n pass\n\n # 返回 loss names\n def get_loss_names(self):\n return self.loss_names\n\n # 返回最近的loss值\n def get_current_losses(self):\n loss_dict = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n loss_dict[name] = float(getattr(self, 'loss_' + name))\n return loss_dict\n\n # **************************** save、load、print models *****************************#\n\n def save_networks(self, epoch):\n \"\"\"Save all the models to the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = 'epoch_%d_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.config.checkpoints_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n torch.save(net.state_dict(), save_path)\n print('save epoch %d models to file !' % epoch)\n\n def load_networks(self, epoch):\n \"\"\"Load all the models from the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = 'epoch_%d_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.config.checkpoints_dir, load_filename)\n if not os.path.exists(load_path):\n continue\n net = getattr(self, 'net' + name)\n print('loading the models from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n net.load_state_dict(state_dict)\n\n def print_networks(self):\n \"\"\"Print the total number of parameters in the network and (if verbose) network architecture\n\n Parameters:\n verbose (bool) -- if verbose: print the network architecture\n \"\"\"\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')\n\n def train(self):\n \"\"\"Make models eval mode during test time\"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.train()\n\n def eval(self):\n \"\"\"Make models eval mode during test time\"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()\n\n def set_requires_grad(self, nets, requires_grad=False):\n \"\"\"Set requies_grad=Fasle for all the models to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of models\n requires_grad (bool) -- whether the models require gradients or not\n \"\"\"\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n",
"import numpy as np\nimport torch\nfrom models.unet3d import UNet3D\nfrom models.networks import NLayerDiscriminator\nfrom models.net_utils import init_weights\nfrom utils.him import tensor2np\nfrom .base_model import BaseModel\nfrom .net_attention import MultiAttResnet\n\n\nclass CGANModel(BaseModel):\n\n def __init__(self, config):\n BaseModel.__init__(self, config)\n self.loss_names = ['G_sup'] # sub 表示像素损失\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = UNet3D(input_dim=5, out_channels=1, n_feat=24).to(self.device)\n # self.netG = MultiAttResnet(in_dims=5).to(self.device)\n\n init_weights(self.netG)\n\n if self.isTrain:\n self.netD = NLayerDiscriminator(input_nc=6, ndf=16, n_layers=3).to(self.device)\n init_weights(self.netD)\n\n # self.criterionGAN = torch.nn.MSELoss()\n self.criterionGAN = torch.nn.L1Loss()\n self.True_ = torch.tensor(1.0).to(self.device)\n self.False_ = torch.tensor(0.0).to(self.device)\n\n self.supLoss = torch.nn.MSELoss()\n\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=config.lr, betas=(config.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=config.lr, betas=(config.beta1, 0.999))\n\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n\n self.real_A = input['inputs'].to(self.device)\n self.real_B = input['outputs'].to(self.device)\n\n def forward(self):\n self.fake_B = self.netG(self.real_A)\n\n def test(self):\n with torch.no_grad():\n self.forward()\n self.loss_G_sup = self.supLoss(self.fake_B, self.real_B) * self.config.lambda_L1\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # ************************ update D ***********************\n # self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n\n # Fake\n fake_AB = torch.cat([self.real_A, self.fake_B.detach()], 1)\n pred_fake = self.netD(fake_AB)\n self.loss_D_fake = self.criterionGAN(pred_fake, self.False_.expand_as(pred_fake))\n\n self.loss_D_fake.backward()\n\n # Real\n real_AB = torch.cat([self.real_A, self.real_B], 1)\n pred_real = self.netD(real_AB)\n self.loss_D_real = self.criterionGAN(pred_real, self.True_.expand_as(pred_real))\n\n self.loss_D_real.backward()\n\n # combine\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n\n self.optimizer_D.step() # update D's weights\n # ************************ update G ***********************\n # self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n self.optimizer_G.zero_grad() # set G's gradients to zero\n\n fake_AB = torch.cat([self.real_A, self.fake_B.detach()], 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, self.True_.expand_as(pred_fake))\n\n self.loss_G_GAN.backward()\n\n # Second, G(A) = B\n self.loss_G_sup = self.supLoss(self.fake_B, self.real_B) * self.config.lambda_L1\n self.loss_G_sup.backward()\n\n # combine\n self.loss_G = self.loss_G_GAN + self.loss_G_sup\n\n self.optimizer_G.step()\n\n def get_current_np_outputs(self):\n \"\"\"\n return 4 dims data. [N,C,W,H]\n \"\"\"\n label = tensor2np(self.real_B)\n predict = tensor2np(self.fake_B)\n inputs = tensor2np(self.real_A)\n res = np.concatenate([inputs, label, predict], axis=1)\n\n return res\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR"
],
[
"torch.cat",
"torch.tensor",
"numpy.concatenate",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrejromanov/dd-trace-py | [
"661011e891d4699006614b1a238096d7140cc55c"
] | [
"tests/tracer/test_span.py"
] | [
"# -*- coding: utf-8 -*-\nimport time\nfrom unittest.case import SkipTest\n\nimport mock\nimport pytest\n\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.constants import ENV_KEY\nfrom ddtrace.constants import SERVICE_VERSION_KEY\nfrom ddtrace.constants import SPAN_MEASURED_KEY\nfrom ddtrace.constants import VERSION_KEY\nfrom ddtrace.ext import SpanTypes\nfrom ddtrace.ext import errors\nfrom ddtrace.span import Span\nfrom tests import TracerTestCase\nfrom tests import assert_is_measured\nfrom tests import assert_is_not_measured\n\n\nclass SpanTestCase(TracerTestCase):\n def test_ids(self):\n s = Span(tracer=None, name=\"span.test\")\n assert s.trace_id\n assert s.span_id\n assert not s.parent_id\n\n s2 = Span(tracer=None, name=\"t\", trace_id=1, span_id=2, parent_id=1)\n assert s2.trace_id == 1\n assert s2.span_id == 2\n assert s2.parent_id == 1\n\n def test_tags(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(\"a\", \"a\")\n s.set_tag(\"b\", 1)\n s.set_tag(\"c\", \"1\")\n d = s.to_dict()\n assert d[\"meta\"] == dict(a=\"a\", c=\"1\")\n assert d[\"metrics\"] == dict(b=1)\n\n def test_numeric_tags(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(\"negative\", -1)\n s.set_tag(\"zero\", 0)\n s.set_tag(\"positive\", 1)\n s.set_tag(\"large_int\", 2 ** 53)\n s.set_tag(\"really_large_int\", (2 ** 53) + 1)\n s.set_tag(\"large_negative_int\", -(2 ** 53))\n s.set_tag(\"really_large_negative_int\", -((2 ** 53) + 1))\n s.set_tag(\"float\", 12.3456789)\n s.set_tag(\"negative_float\", -12.3456789)\n s.set_tag(\"large_float\", 2.0 ** 53)\n s.set_tag(\"really_large_float\", (2.0 ** 53) + 1)\n\n d = s.to_dict()\n assert d[\"meta\"] == dict(\n really_large_int=str(((2 ** 53) + 1)),\n really_large_negative_int=str(-((2 ** 53) + 1)),\n )\n assert d[\"metrics\"] == {\n \"negative\": -1,\n \"zero\": 0,\n \"positive\": 1,\n \"large_int\": 2 ** 53,\n \"large_negative_int\": -(2 ** 53),\n \"float\": 12.3456789,\n \"negative_float\": -12.3456789,\n \"large_float\": 2.0 ** 53,\n \"really_large_float\": (2.0 ** 53) + 1,\n }\n\n def test_set_tag_bool(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(\"true\", True)\n s.set_tag(\"false\", False)\n\n d = s.to_dict()\n assert d[\"meta\"] == dict(true=\"True\", false=\"False\")\n assert \"metrics\" not in d\n\n def test_set_tag_metric(self):\n s = Span(tracer=None, name=\"test.span\")\n\n s.set_tag(\"test\", \"value\")\n assert s.meta == dict(test=\"value\")\n assert s.metrics == dict()\n\n s.set_tag(\"test\", 1)\n assert s.meta == dict()\n assert s.metrics == dict(test=1)\n\n def test_set_valid_metrics(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_metric(\"a\", 0)\n s.set_metric(\"b\", -12)\n s.set_metric(\"c\", 12.134)\n s.set_metric(\"d\", 1231543543265475686787869123)\n s.set_metric(\"e\", \"12.34\")\n d = s.to_dict()\n expected = {\n \"a\": 0,\n \"b\": -12,\n \"c\": 12.134,\n \"d\": 1231543543265475686787869123,\n \"e\": 12.34,\n }\n assert d[\"metrics\"] == expected\n\n def test_set_invalid_metric(self):\n s = Span(tracer=None, name=\"test.span\")\n\n invalid_metrics = [None, {}, [], s, \"quarante-douze\", float(\"nan\"), float(\"inf\"), 1j]\n\n for i, m in enumerate(invalid_metrics):\n k = str(i)\n s.set_metric(k, m)\n assert s.get_metric(k) is None\n\n def test_set_numpy_metric(self):\n try:\n import numpy as np\n except ImportError:\n raise SkipTest(\"numpy not installed\")\n s = Span(tracer=None, name=\"test.span\")\n s.set_metric(\"a\", np.int64(1))\n assert s.get_metric(\"a\") == 1\n assert type(s.get_metric(\"a\")) == float\n\n def test_tags_not_string(self):\n # ensure we can cast as strings\n class Foo(object):\n def __repr__(self):\n 1 / 0\n\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(\"a\", Foo())\n\n def test_finish(self):\n # ensure span.finish() marks the end time of the span\n s = Span(None, \"test.span\")\n sleep = 0.05\n time.sleep(sleep)\n s.finish()\n assert s.duration >= sleep, \"%s < %s\" % (s.duration, sleep)\n\n def test_finish_no_tracer(self):\n # ensure finish works with no tracer without raising exceptions\n s = Span(tracer=None, name=\"test.span\")\n s.finish()\n\n def test_finish_called_multiple_times(self):\n # we should only record a span the first time finish is called on it\n s = Span(self.tracer, \"bar\")\n s.finish()\n s.finish()\n\n def test_finish_set_span_duration(self):\n # If set the duration on a span, the span should be recorded with this\n # duration\n s = Span(tracer=None, name=\"test.span\")\n s.duration = 1337.0\n s.finish()\n assert s.duration == 1337.0\n\n def test_setter_casts_duration_ns_as_int(self):\n s = Span(tracer=None, name=\"test.span\")\n s.duration = 3.2\n s.finish()\n assert s.duration == 3.2\n assert s.duration_ns == 3200000000\n assert isinstance(s.duration_ns, int)\n\n def test_get_span_returns_none_by_default(self):\n s = Span(tracer=None, name=\"test.span\")\n assert s.duration is None\n\n def test_traceback_with_error(self):\n s = Span(None, \"test.span\")\n try:\n 1 / 0\n except ZeroDivisionError:\n s.set_traceback()\n else:\n assert 0, \"should have failed\"\n\n assert s.error\n assert \"by zero\" in s.get_tag(errors.ERROR_MSG)\n assert \"ZeroDivisionError\" in s.get_tag(errors.ERROR_TYPE)\n\n def test_traceback_without_error(self):\n s = Span(None, \"test.span\")\n s.set_traceback()\n assert not s.error\n assert not s.get_tag(errors.ERROR_MSG)\n assert not s.get_tag(errors.ERROR_TYPE)\n assert \"in test_traceback_without_error\" in s.get_tag(errors.ERROR_STACK)\n\n def test_ctx_mgr(self):\n s = Span(self.tracer, \"bar\")\n assert not s.duration\n assert not s.error\n\n e = Exception(\"boo\")\n try:\n with s:\n time.sleep(0.01)\n raise e\n except Exception as out:\n assert out == e\n assert s.duration > 0, s.duration\n assert s.error\n assert s.get_tag(errors.ERROR_MSG) == \"boo\"\n assert \"Exception\" in s.get_tag(errors.ERROR_TYPE)\n assert s.get_tag(errors.ERROR_STACK)\n\n else:\n assert 0, \"should have failed\"\n\n def test_span_type(self):\n s = Span(tracer=None, name=\"test.span\", service=\"s\", resource=\"r\", span_type=SpanTypes.WEB)\n s.set_tag(\"a\", \"1\")\n s.set_meta(\"b\", \"2\")\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d[\"span_id\"] == s.span_id\n assert d[\"trace_id\"] == s.trace_id\n assert d[\"parent_id\"] == s.parent_id\n assert d[\"meta\"] == {\"a\": \"1\", \"b\": \"2\"}\n assert d[\"type\"] == \"web\"\n assert d[\"error\"] == 0\n assert type(d[\"error\"]) == int\n\n def test_span_to_dict(self):\n s = Span(tracer=None, name=\"test.span\", service=\"s\", resource=\"r\")\n s.span_type = \"foo\"\n s.set_tag(\"a\", \"1\")\n s.set_meta(\"b\", \"2\")\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d[\"span_id\"] == s.span_id\n assert d[\"trace_id\"] == s.trace_id\n assert d[\"parent_id\"] == s.parent_id\n assert d[\"meta\"] == {\"a\": \"1\", \"b\": \"2\"}\n assert d[\"type\"] == \"foo\"\n assert d[\"error\"] == 0\n assert type(d[\"error\"]) == int\n\n def test_span_to_dict_sub(self):\n parent = Span(tracer=None, name=\"test.span\", service=\"s\", resource=\"r\")\n s = Span(tracer=None, name=\"test.span\", service=\"s\", resource=\"r\")\n s._parent = parent\n s.span_type = \"foo\"\n s.set_tag(\"a\", \"1\")\n s.set_meta(\"b\", \"2\")\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d[\"span_id\"] == s.span_id\n assert d[\"trace_id\"] == s.trace_id\n assert d[\"parent_id\"] == s.parent_id\n assert d[\"meta\"] == {\"a\": \"1\", \"b\": \"2\"}\n assert d[\"type\"] == \"foo\"\n assert d[\"error\"] == 0\n assert type(d[\"error\"]) == int\n\n def test_span_boolean_err(self):\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\")\n s.error = True\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d[\"error\"] == 1\n assert type(d[\"error\"]) == int\n\n @mock.patch(\"ddtrace.span.log\")\n def test_numeric_tags_none(self, span_log):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None)\n d = s.to_dict()\n assert d\n assert \"metrics\" not in d\n\n # Ensure we log a debug message\n span_log.debug.assert_called_once_with(\n \"ignoring not number metric %s:%s\",\n ANALYTICS_SAMPLE_RATE_KEY,\n None,\n )\n\n def test_numeric_tags_true(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True)\n d = s.to_dict()\n assert d\n expected = {ANALYTICS_SAMPLE_RATE_KEY: 1.0}\n assert d[\"metrics\"] == expected\n\n def test_numeric_tags_value(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5)\n d = s.to_dict()\n assert d\n expected = {ANALYTICS_SAMPLE_RATE_KEY: 0.5}\n assert d[\"metrics\"] == expected\n\n def test_numeric_tags_bad_value(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, \"Hello\")\n d = s.to_dict()\n assert d\n assert \"metrics\" not in d\n\n def test_set_tag_none(self):\n s = Span(tracer=None, name=\"root.span\", service=\"s\", resource=\"r\")\n assert s.meta == dict()\n\n s.set_tag(\"custom.key\", \"100\")\n\n assert s.meta == {\"custom.key\": \"100\"}\n\n s.set_tag(\"custom.key\", None)\n\n assert s.meta == {\"custom.key\": \"None\"}\n\n def test_duration_zero(self):\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123)\n s.finish(finish_time=123)\n assert s.duration_ns == 0\n assert s.duration == 0\n\n def test_start_int(self):\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123)\n assert s.start == 123\n assert s.start_ns == 123000000000\n\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123.123)\n assert s.start == 123.123\n assert s.start_ns == 123123000000\n\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123.123)\n s.start = 234567890.0\n assert s.start == 234567890\n assert s.start_ns == 234567890000000000\n\n def test_duration_int(self):\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\")\n s.finish()\n assert isinstance(s.duration_ns, int)\n assert isinstance(s.duration, float)\n\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123)\n s.finish(finish_time=123.2)\n assert s.duration_ns == 200000000\n assert s.duration == 0.2\n\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=123.1)\n s.finish(finish_time=123.2)\n assert s.duration_ns == 100000000\n assert s.duration == 0.1\n\n s = Span(tracer=None, name=\"foo.bar\", service=\"s\", resource=\"r\", start=122)\n s.finish(finish_time=123)\n assert s.duration_ns == 1000000000\n assert s.duration == 1\n\n def test_set_tag_version(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(VERSION_KEY, \"1.2.3\")\n assert s.get_tag(VERSION_KEY) == \"1.2.3\"\n assert s.get_tag(SERVICE_VERSION_KEY) is None\n\n s.set_tag(SERVICE_VERSION_KEY, \"service.version\")\n assert s.get_tag(VERSION_KEY) == \"service.version\"\n assert s.get_tag(SERVICE_VERSION_KEY) == \"service.version\"\n\n def test_set_tag_env(self):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(ENV_KEY, \"prod\")\n assert s.get_tag(ENV_KEY) == \"prod\"\n\n\[email protected](\n \"value,assertion\",\n [\n (None, assert_is_measured),\n (1, assert_is_measured),\n (1.0, assert_is_measured),\n (-1, assert_is_measured),\n (True, assert_is_measured),\n (\"true\", assert_is_measured),\n # DEV: Ends up being measured because we do `bool(\"false\")` which is `True`\n (\"false\", assert_is_measured),\n (0, assert_is_not_measured),\n (0.0, assert_is_not_measured),\n (False, assert_is_not_measured),\n ],\n)\ndef test_set_tag_measured(value, assertion):\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(SPAN_MEASURED_KEY, value)\n assertion(s)\n\n\ndef test_set_tag_measured_not_set():\n # Span is not measured by default\n s = Span(tracer=None, name=\"test.span\")\n assert_is_not_measured(s)\n\n\ndef test_set_tag_measured_no_value():\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(SPAN_MEASURED_KEY)\n assert_is_measured(s)\n\n\ndef test_set_tag_measured_change_value():\n s = Span(tracer=None, name=\"test.span\")\n s.set_tag(SPAN_MEASURED_KEY, True)\n assert_is_measured(s)\n\n s.set_tag(SPAN_MEASURED_KEY, False)\n assert_is_not_measured(s)\n\n s.set_tag(SPAN_MEASURED_KEY)\n assert_is_measured(s)\n\n\[email protected](\"ddtrace.span.log\")\ndef test_span_key(span_log):\n # Span tag keys must be strings\n s = Span(tracer=None, name=\"test.span\")\n\n s.set_tag(123, True)\n span_log.warning.assert_called_once_with(\"Ignoring tag pair %s:%s. Key must be a string.\", 123, True)\n assert s.get_tag(123) is None\n assert s.get_tag(\"123\") is None\n\n span_log.reset_mock()\n\n s.set_tag(None, \"val\")\n span_log.warning.assert_called_once_with(\"Ignoring tag pair %s:%s. Key must be a string.\", None, \"val\")\n assert s.get_tag(123.32) is None\n\n\ndef test_span_finished():\n span = Span(None, None)\n assert span.finished is False\n assert span.duration_ns is None\n\n span.finished = True\n assert span.finished is True\n assert span.duration_ns is not None\n duration = span.duration_ns\n\n span.finished = True\n assert span.finished is True\n assert span.duration_ns == duration\n\n span.finished = False\n assert span.finished is False\n\n span.finished = True\n assert span.finished is True\n assert span.duration_ns != duration\n\n\ndef test_span_unicode_set_tag():\n span = Span(None, None)\n span.set_tag(\"key\", u\"😌\")\n span.set_tag(\"😐\", u\"😌\")\n span._set_str_tag(\"key\", u\"😌\")\n span._set_str_tag(u\"😐\", u\"😌\")\n\n\ndef test_span_ignored_exceptions():\n s = Span(None, None)\n s._ignore_exception(ValueError)\n\n with pytest.raises(ValueError):\n with s:\n raise ValueError()\n\n assert s.error == 0\n assert s.get_tag(errors.ERROR_MSG) is None\n assert s.get_tag(errors.ERROR_TYPE) is None\n assert s.get_tag(errors.ERROR_STACK) is None\n\n s = Span(None, None)\n s._ignore_exception(ValueError)\n\n with pytest.raises(ValueError):\n with s:\n raise ValueError()\n\n with pytest.raises(RuntimeError):\n with s:\n raise RuntimeError()\n\n assert s.error == 1\n assert s.get_tag(errors.ERROR_MSG) is not None\n assert \"RuntimeError\" in s.get_tag(errors.ERROR_TYPE)\n assert s.get_tag(errors.ERROR_STACK) is not None\n\n\ndef test_span_ignored_exception_multi():\n s = Span(None, None)\n s._ignore_exception(ValueError)\n s._ignore_exception(RuntimeError)\n\n with pytest.raises(ValueError):\n with s:\n raise ValueError()\n\n with pytest.raises(RuntimeError):\n with s:\n raise RuntimeError()\n\n assert s.error == 0\n assert s.get_tag(errors.ERROR_MSG) is None\n assert s.get_tag(errors.ERROR_TYPE) is None\n assert s.get_tag(errors.ERROR_STACK) is None\n\n\ndef test_span_ignored_exception_subclass():\n s = Span(None, None)\n s._ignore_exception(Exception)\n\n with pytest.raises(ValueError):\n with s:\n raise ValueError()\n\n with pytest.raises(RuntimeError):\n with s:\n raise RuntimeError()\n\n assert s.error == 0\n assert s.get_tag(errors.ERROR_MSG) is None\n assert s.get_tag(errors.ERROR_TYPE) is None\n assert s.get_tag(errors.ERROR_STACK) is None\n"
] | [
[
"numpy.int64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marwahaha/dit | [
"feaa7dfa87b4f6067039be4ac05c7e645fdcec3c"
] | [
"dit/profiles/base_profile.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThe base information profile.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import with_metaclass\n\nimport numpy as np\n\n\nprofile_docstring = \"\"\"\n{name}\n\nStatic Attributes\n-----------------\nxlabel : str\n The label for the x-axis when plotting.\nylabel : str\n The label for the y-axis when plotting.\n{static_attributes}\n\nAttributes\n----------\ndist : Distribution\nprofile : dict\nwidths : [float]\n{attributes}\n\nMethods\n-------\ndraw\n Plot the profile\n{methods}\n\nPrivate Methods\n---------------\n_compute\n Compute the profile\n\"\"\"\n\n\nclass BaseProfile(with_metaclass(ABCMeta, object)):\n \"\"\"\n BaseProfile\n\n Static Attributes\n -----------------\n xlabel : str\n The label for the x-axis when plotting.\n ylabel : str\n The label for the y-axis when plotting.\n\n Attributes\n ----------\n dist : Distribution\n profile : dict\n widths : [float]\n\n Methods\n -------\n draw\n Plot the profile.\n\n Abstract Methods\n ----------------\n _compute\n Compute the profile.\n \"\"\"\n\n xlabel = 'scale'\n ylabel = 'information [bits]'\n align = 'center'\n\n def __init__(self, dist):\n \"\"\"\n Initialize the profile.\n\n Parameters\n ----------\n dist : Distribution\n The distribution to compute the profile for.\n \"\"\"\n super(BaseProfile, self).__init__()\n self.dist = dist.copy(base='linear')\n self._compute()\n\n @abstractmethod\n def _compute(self):\n \"\"\"\n Abstract method to compute the profile.\n \"\"\"\n pass\n\n def draw(self, ax=None): # pragma: no cover\n \"\"\"\n Draw the profile using matplotlib.\n\n Parameters\n ----------\n ax : axis\n The axis to draw the profile on. If None, a new axis is created.\n\n Returns\n -------\n ax : axis\n The axis with profile.\n \"\"\"\n if ax is None:\n import matplotlib.pyplot as plt\n ax = plt.figure().gca()\n\n # pylint: disable=no-member\n left, height = zip(*sorted(self.profile.items()))\n ax.bar(left, height, width=self.widths, align=self.align)\n\n ax.set_xticks(sorted(self.profile.keys()))\n\n ax.set_xlabel(self.xlabel)\n ax.set_ylabel(self.ylabel)\n\n low, high = ax.get_ylim()\n if np.isclose(low, 0, atol=1e-5):\n low = -0.1\n if np.isclose(high, 0, atol=1e-5):\n high = 0.1\n ax.set_ylim((low, high))\n\n return ax\n"
] | [
[
"matplotlib.pyplot.figure",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lgasyou/spark-scheduler-configuration-optimizer | [
"05c0ea9411db642c7c7e675a6949ffcc6814947a"
] | [
"optimizer/environment/yarn/statebuilder.py"
] | [
"from typing import Tuple\n\nimport requests\nimport torch\nfrom requests.exceptions import ConnectionError\n\nfrom optimizer.hyperparameters import STATE_SHAPE\nfrom optimizer.environment.yarn.yarnmodel import *\nfrom optimizer.environment.spark.sparkapplicationtimedelaypredictor import SparkApplicationTimeDelayPredictor\nfrom optimizer.environment.spark.sparkapplicationbuilder import SparkApplicationBuilder\nfrom optimizer.environment.spark.completedsparkapplicationanalyzer import CompletedSparkApplicationAnalyzer\nfrom optimizer.environment.stateinvalidexception import StateInvalidException\nfrom optimizer.util import jsonutil\n\n\nclass StateBuilder(object):\n\n def __init__(self, rm_api_url: str, spark_history_server_api_url: str, scheduler_strategy):\n self.RM_API_URL = rm_api_url\n self.SPARK_HISTORY_SERVER_API_URL = spark_history_server_api_url\n self.scheduler_strategy = scheduler_strategy\n self.application_time_delay_predictor = SparkApplicationTimeDelayPredictor(spark_history_server_api_url)\n self._tmp_add_models()\n\n # TODO: Replace this with train set.\n def _tmp_add_models(self):\n builder = SparkApplicationBuilder(self.SPARK_HISTORY_SERVER_API_URL)\n analyzer = CompletedSparkApplicationAnalyzer()\n predictor = self.application_time_delay_predictor\n\n app = builder.build('application_1562834622700_0051')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('linear', svm_model)\n\n # ALS\n app = builder.build('application_1562834622700_0039')\n als_model = analyzer.analyze(app)\n predictor.add_algorithm('als', als_model)\n\n # KMeans\n app = builder.build('application_1562834622700_0018')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('kmeans', svm_model)\n\n # SVM\n app = builder.build('application_1562834622700_0014')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('svm', svm_model)\n\n # Bayes\n app = builder.build('application_1562834622700_0043')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('bayes', svm_model)\n\n # FPGrowth\n app = builder.build('application_1562834622700_0054')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('fpgrowth', svm_model)\n\n # LDA\n app = builder.build('application_1562834622700_0058')\n svm_model = analyzer.analyze(app)\n predictor.add_algorithm('lda', svm_model)\n\n def build(self):\n try:\n waiting_apps, running_apps = self.parse_and_build_applications()\n resources = self.parse_and_build_resources()\n constraints = self.parse_and_build_constraints()\n return State(waiting_apps, running_apps, resources, constraints)\n except (ConnectionError, TypeError, requests.exceptions.HTTPError):\n raise StateInvalidException\n\n @staticmethod\n def build_tensor(raw: State):\n height, width = STATE_SHAPE\n tensor = torch.zeros(height, width)\n\n # Line 0-74: waiting apps and their resource requests\n for i, wa in enumerate(raw.waiting_apps[:75]):\n line = [wa.elapsed_time, wa.priority, wa.converted_location]\n for rr in wa.request_resources[:64]:\n line.extend([rr.priority, rr.memory, rr.cpu])\n line.extend([0.0] * (width - len(line)))\n tensor[i] = torch.Tensor(line)\n\n # Line 75-149: running apps and their resource requests\n for i, ra in enumerate(raw.running_apps[:75]):\n row = i + 75\n line = [ra.elapsed_time, ra.priority, ra.converted_location,\n ra.progress, ra.queue_usage_percentage, ra.predicted_time_delay]\n for rr in ra.request_resources[:65]:\n line.extend([rr.priority, rr.memory, rr.cpu])\n line.extend([0.0] * (width - len(line)))\n tensor[row] = torch.Tensor(line)\n\n # Line 150-198: resources of cluster\n row, idx = 150, 0\n for r in raw.resources[:4900]:\n tensor[row][idx] = r.mem\n idx += 1\n tensor[row][idx] = r.vcore_num\n idx += 1\n if idx == width:\n row += 1\n idx = 0\n\n # Line 199: queue constraints\n row, queue_constraints = 199, []\n for c in raw.constraints[:50]:\n queue_constraints.extend([c.converted_name, c.capacity, c.max_capacity, c.used_capacity])\n queue_constraints.extend([0.0] * (width - len(queue_constraints)))\n tensor[row] = torch.Tensor(queue_constraints)\n\n return tensor\n\n def parse_and_build_applications(self) -> Tuple[List[WaitingApplication], List[RunningApplication]]:\n waiting_apps = self.parse_and_build_waiting_apps()\n running_apps = self.parse_and_build_running_apps()\n return waiting_apps, running_apps\n\n def parse_and_build_waiting_apps(self) -> List[WaitingApplication]:\n url = self.RM_API_URL + 'ws/v1/cluster/apps?states=NEW,NEW_SAVING,SUBMITTED,ACCEPTED'\n app_json = jsonutil.get_json(url)\n return self.build_waiting_apps_from_json(app_json)\n\n def parse_and_build_running_apps(self) -> List[RunningApplication]:\n url = self.RM_API_URL + 'ws/v1/cluster/apps?states=RUNNING'\n app_json = jsonutil.get_json(url)\n return self.build_running_apps_from_json(app_json)\n\n def parse_and_build_resources(self) -> List[Resource]:\n url = self.RM_API_URL + 'ws/v1/cluster/nodes'\n conf = jsonutil.get_json(url)\n nodes = conf['nodes']['node']\n resources = []\n for n in nodes:\n memory = (int(n['usedMemoryMB']) + int(n['availMemoryMB'])) / 1024\n vcores = int(n['usedVirtualCores']) + int(n['availableVirtualCores'])\n resources.append(Resource(vcores, int(memory)))\n return resources\n\n def parse_and_build_constraints(self) -> List[QueueConstraint]:\n return self.scheduler_strategy.get_queue_constraints()\n\n def build_running_apps_from_json(self, j: dict) -> List[RunningApplication]:\n if j['apps'] is None:\n return []\n\n apps_json, apps = j['apps']['app'], []\n for j in apps_json:\n application_id = j['id']\n name = j['name']\n elapsed_time = j['elapsedTime']\n priority = j['priority']\n progress = j['progress']\n queue_usage_percentage = j['queueUsagePercentage']\n location = j['queue']\n predicted_time_delay = self.application_time_delay_predictor.predict(application_id, name)\n request_resources = self.build_request_resources_from_json(j)\n apps.append(RunningApplication(application_id, elapsed_time, priority, location, progress,\n queue_usage_percentage, predicted_time_delay, request_resources))\n\n return apps\n\n def build_waiting_apps_from_json(self, j: dict) -> List[WaitingApplication]:\n if j['apps'] is None:\n return []\n\n apps_json = j['apps']['app']\n apps = []\n for j in apps_json:\n elapsed_time = j['elapsedTime']\n priority = j['priority']\n location = j['queue']\n request_resources = self.build_request_resources_from_json(j)\n apps.append(WaitingApplication(elapsed_time, priority, location, request_resources))\n\n return apps\n\n @staticmethod\n def build_request_resources_from_json(j: dict) -> List[ApplicationRequestResource]:\n ret = []\n\n if 'resourceRequests' not in j:\n return ret\n\n resource_requests = j['resourceRequests']\n for req in resource_requests:\n priority = req['priority']\n capability = req['capability']\n memory = capability['memory']\n cpu = capability['vCores']\n ret.append(ApplicationRequestResource(priority, memory, cpu))\n\n return ret\n"
] | [
[
"torch.Tensor",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.