filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_24775
|
# conflicts with isort because of local non-relative import
# pylint: disable=wrong-import-order
import logging
import os
import sys
from datetime import datetime, timedelta
from dependencies import logger
from models.tortoise_models import (
AuthEvents,
DispenserState,
DoorState,
FleetState,
HealthStatus,
IngestorState,
LiftState,
RawLog,
TaskSummary,
)
from tortoise import Tortoise, run_async
from .app_config import app_config
logger = logging.getLogger("clean_script")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger.addHandler(handler)
if "RMF_REST_SERVER_DEBUG" in os.environ:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.info("started clean up")
reportingModels = [
AuthEvents,
DispenserState,
DoorState,
FleetState,
HealthStatus,
IngestorState,
LiftState,
RawLog,
TaskSummary,
]
number_of_days_to_keep_logs = app_config.log_storage_time + 60
logger.info(
"You are about to delete all the logs older than %s days",
str(app_config.log_storage_time),
)
async def delete_logs():
for model in reportingModels:
rows = await model.filter(
created__lt=datetime.now() - timedelta(days=number_of_days_to_keep_logs)
)
logger.info("%s has %s rows > 7 days", str(model.__module__), str(len(rows)))
await model.filter(
created__lt=datetime.now() - timedelta(days=number_of_days_to_keep_logs)
).delete()
async def run():
await Tortoise.init(
db_url=app_config.db_url,
modules={"models": ["models.tortoise_models"]},
)
await Tortoise.generate_schemas()
await delete_logs()
def main():
run_async(run())
if __name__ == "__main__":
main()
|
the-stack_106_24776
|
import hash_ssz
from beacon_chain.state import crystallized_state as cs
from ssz import ssz
import time
from hashlib import blake2b
def hash(x):
return blake2b(x).digest()[:32]
v = cs.ValidatorRecord(pubkey=3**160, withdrawal_shard=567, withdrawal_address=b'\x35' * 20, randao_commitment=b'\x57' * 20, balance=32 * 10**18, start_dynasty=7, end_dynasty=17284)
c = cs.CrosslinkRecord(dynasty=4, slot=12847, hash=b'\x67' * 32)
cr_stubs = [c for i in range(1024)]
def make_crystallized_state(valcount):
sc_stub = cs.ShardAndCommittee(shard_id=1, committee=list(range(valcount // 1024)))
sc_stubs = [[sc_stub for i in range(16)] for i in range(64)]
c = cs.CrystallizedState(
validators=[v for i in range(valcount)],
last_state_recalc=1,
shard_and_committee_for_slots=sc_stubs,
last_justified_slot=12744,
justified_streak=98,
last_finalized_slot=1724,
current_dynasty=19824,
crosslink_records = cr_stubs,
dynasty_seed=b'\x98' * 32,
dynasty_start=124
)
return c
def time_test(valcount):
c = make_crystallized_state(valcount)
a = time.time()
h = hash_ssz.hash_ssz(c)
return(time.time() - a)
def encoded_length(valcount):
c = make_crystallized_state(valcount)
return len(ssz.serialize(c))
def hash_time_test(valcount):
c = make_crystallized_state(valcount)
a = time.time()
s = ssz.serialize(c)
a2 = time.time()
h = hash(s)
return(a2 - a, time.time() - a2)
if __name__ == '__main__':
print(time_test(2**18))
|
the-stack_106_24777
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import importlib
import inspect
import json
import logging
import re
import traceback
import unicodedata
from collections import defaultdict
from os.path import basename
import yaml
from six import PY3, iteritems, text_type
from ..config import is_affirmative
from ..constants import ServiceCheck
from ..utils.common import ensure_bytes, ensure_unicode, to_string
from ..utils.http import RequestsWrapper
from ..utils.limiter import Limiter
from ..utils.proxy import config_proxy_skip
try:
import datadog_agent
from ..log import init_logging
init_logging()
except ImportError:
from ..stubs import datadog_agent
from ..stubs.log import init_logging
init_logging()
try:
import aggregator
using_stub_aggregator = False
except ImportError:
from ..stubs import aggregator
using_stub_aggregator = True
if datadog_agent.get_config('disable_unsafe_yaml'):
from ..ddyaml import monkey_patch_pyyaml
monkey_patch_pyyaml()
# Metric types for which it's only useful to submit once per set of tags
ONE_PER_CONTEXT_METRIC_TYPES = [aggregator.GAUGE, aggregator.RATE, aggregator.MONOTONIC_COUNT]
class __AgentCheck(object):
"""The base class for any Agent based integrations.
:cvar DEFAULT_METRIC_LIMIT: allows to set a limit on the number of metric name and tags combination
this check can send per run. This is useful for checks that have an unbounded
number of tag values that depend on the input payload.
The logic counts one set of tags per gauge/rate/monotonic_count call, and deduplicates
sets of tags for other metric types. The first N sets of tags in submission order will
be sent to the aggregator, the rest are dropped. The state is reset after each run.
See https://github.com/DataDog/integrations-core/pull/2093 for more informations.
:ivar log: is a logger instance that prints to the Agent's main log file. You can set the
log level in the Agent config file 'datadog.yaml'.
"""
# If defined, this will be the prefix of every metric/service check and the source type of events
__NAMESPACE__ = ''
OK, WARNING, CRITICAL, UNKNOWN = ServiceCheck
HTTP_CONFIG_REMAPPER = None # Used by `self.http` RequestsWrapper
FIRST_CAP_RE = re.compile(br'(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile(br'([a-z0-9])([A-Z])')
METRIC_REPLACEMENT = re.compile(br'([^a-zA-Z0-9_.]+)|(^[^a-zA-Z]+)')
DOT_UNDERSCORE_CLEANUP = re.compile(br'_*\._*')
DEFAULT_METRIC_LIMIT = 0
def __init__(self, *args, **kwargs):
"""In general, you don't need to and you should not override anything from the base
class except the :py:meth:`check` method but sometimes it might be useful for a Check to
have its own constructor.
When overriding `__init__` you have to remember that, depending on the configuration,
the Agent might create several different Check instances and the method would be
called as many times.
:warning: when loading a Custom check, the Agent will inspect the module searching
for a subclass of `AgentCheck`. If such a class exists but has been derived in
turn, it'll be ignored - **you should never derive from an existing Check**.
:param str name: the name of the check.
:param dict init_config: the 'init_config' section of the configuration.
:param list instances: a one-element list containing the instance options from the
configuration file (a list is used to keep backward compatibility with
older versions of the Agent).
"""
self.metrics = defaultdict(list)
self.check_id = ''
self.instances = kwargs.get('instances', [])
self.name = kwargs.get('name', '')
self.init_config = kwargs.get('init_config', {})
self.agentConfig = kwargs.get('agentConfig', {})
self.warnings = []
self.metric_limiter = None
if len(args) > 0:
self.name = args[0]
if len(args) > 1:
self.init_config = args[1]
if len(args) > 2:
if len(args) > 3 or 'instances' in kwargs:
# old-style init: the 3rd argument is `agentConfig`
self.agentConfig = args[2]
if len(args) > 3:
self.instances = args[3]
else:
# new-style init: the 3rd argument is `instances`
self.instances = args[2]
# Agent 6+ will only have one instance
self.instance = self.instances[0] if self.instances else None
# `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead
self.hostname = datadog_agent.get_hostname()
# the agent5 'AgentCheck' setup a log attribute.
self.log = logging.getLogger('{}.{}'.format(__name__, self.name))
# Provides logic to yield consistent network behavior based on user configuration.
# Only new checks or checks on Agent 6.13+ can and should use this for HTTP requests.
self._http = None
# Save the dynamically detected integration version
self._check_version = None
# TODO: Remove with Agent 5
# Set proxy settings
self.proxies = self._get_requests_proxy()
if not self.init_config:
self._use_agent_proxy = True
else:
self._use_agent_proxy = is_affirmative(self.init_config.get('use_agent_proxy', True))
# TODO: Remove with Agent 5
self.default_integration_http_timeout = float(self.agentConfig.get('default_integration_http_timeout', 9))
self._deprecations = {
'increment': [
False,
(
'DEPRECATION NOTICE: `AgentCheck.increment`/`AgentCheck.decrement` are deprecated, please '
'use `AgentCheck.gauge` or `AgentCheck.count` instead, with a different metric name'
),
],
'device_name': [
False,
(
'DEPRECATION NOTICE: `device_name` is deprecated, please use a `device:` '
'tag in the `tags` list instead'
),
],
'in_developer_mode': [
False,
'DEPRECATION NOTICE: `in_developer_mode` is deprecated, please stop using it.',
],
'no_proxy': [
False,
(
'DEPRECATION NOTICE: The `no_proxy` config option has been renamed '
'to `skip_proxy` and will be removed in Agent version 6.13.'
),
],
}
# Setup metric limits
try:
metric_limit = self.instances[0].get('max_returned_metrics', self.DEFAULT_METRIC_LIMIT)
# Do not allow to disable limiting if the class has set a non-zero default value
if metric_limit == 0 and self.DEFAULT_METRIC_LIMIT > 0:
metric_limit = self.DEFAULT_METRIC_LIMIT
self.warning(
'Setting max_returned_metrics to zero is not allowed, reverting '
'to the default of {} metrics'.format(self.DEFAULT_METRIC_LIMIT)
)
except Exception:
metric_limit = self.DEFAULT_METRIC_LIMIT
if metric_limit > 0:
self.metric_limiter = Limiter(self.name, 'metrics', metric_limit, self.warning)
@staticmethod
def load_config(yaml_str):
"""
Convenience wrapper to ease programmatic use of this class from the C API.
"""
return yaml.safe_load(yaml_str)
@property
def http(self):
if self._http is None:
self._http = RequestsWrapper(self.instance or {}, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log)
return self._http
@property
def check_version(self):
if self._check_version is None:
# 'datadog_checks.<PACKAGE>.<MODULE>...'
module_parts = self.__module__.split('.')
package_path = '.'.join(module_parts[:2])
package = importlib.import_module(package_path)
# Provide a default just in case
self._check_version = getattr(package, '__version__', '0.0.0')
return self._check_version
@property
def in_developer_mode(self):
self._log_deprecation('in_developer_mode')
return False
def get_instance_proxy(self, instance, uri, proxies=None):
# TODO: Remove with Agent 5
proxies = proxies if proxies is not None else self.proxies.copy()
deprecated_skip = instance.get('no_proxy', None)
skip = is_affirmative(instance.get('skip_proxy', not self._use_agent_proxy)) or is_affirmative(deprecated_skip)
if deprecated_skip is not None:
self._log_deprecation('no_proxy')
return config_proxy_skip(proxies, uri, skip)
def _context_uid(self, mtype, name, tags=None, hostname=None):
return '{}-{}-{}-{}'.format(mtype, name, tags if tags is None else hash(frozenset(tags)), hostname)
def _submit_metric(self, mtype, name, value, tags=None, hostname=None, device_name=None):
if value is None:
# ignore metric sample
return
tags = self._normalize_tags_type(tags, device_name, name)
if hostname is None:
hostname = ''
if self.metric_limiter:
if mtype in ONE_PER_CONTEXT_METRIC_TYPES:
# Fast path for gauges, rates, monotonic counters, assume one set of tags per call
if self.metric_limiter.is_reached():
return
else:
# Other metric types have a legit use case for several calls per set of tags, track unique sets of tags
context = self._context_uid(mtype, name, tags, hostname)
if self.metric_limiter.is_reached(context):
return
try:
value = float(value)
except ValueError:
err_msg = 'Metric: {} has non float value: {}. Only float values can be submitted as metrics.'.format(
repr(name), repr(value)
)
if using_stub_aggregator:
raise ValueError(err_msg)
self.warning(err_msg)
return
aggregator.submit_metric(self, self.check_id, mtype, self._format_namespace(name), value, tags, hostname)
def gauge(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample a gauge metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(aggregator.GAUGE, name, value, tags=tags, hostname=hostname, device_name=device_name)
def count(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample a raw count metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(aggregator.COUNT, name, value, tags=tags, hostname=hostname, device_name=device_name)
def monotonic_count(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample an increasing counter metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(
aggregator.MONOTONIC_COUNT, name, value, tags=tags, hostname=hostname, device_name=device_name
)
def rate(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample a point, with the rate calculated at the end of the check.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(aggregator.RATE, name, value, tags=tags, hostname=hostname, device_name=device_name)
def histogram(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample a histogram metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(aggregator.HISTOGRAM, name, value, tags=tags, hostname=hostname, device_name=device_name)
def historate(self, name, value, tags=None, hostname=None, device_name=None):
"""Sample a histogram based on rate metrics.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._submit_metric(aggregator.HISTORATE, name, value, tags=tags, hostname=hostname, device_name=device_name)
def increment(self, name, value=1, tags=None, hostname=None, device_name=None):
"""Increment a counter metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._log_deprecation('increment')
self._submit_metric(aggregator.COUNTER, name, value, tags=tags, hostname=hostname, device_name=device_name)
def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):
"""Decrement a counter metric.
:param str name: the name of the metric.
:param float value: the value for the metric.
:param list tags: (optional) a list of tags to associate with this metric.
:param str hostname: (optional) a hostname to associate with this metric. Defaults to the current host.
:param str device_name: **deprecated** add a tag in the form :code:`device:<device_name>` to the :code:`tags`
list instead.
"""
self._log_deprecation('increment')
self._submit_metric(aggregator.COUNTER, name, value, tags=tags, hostname=hostname, device_name=device_name)
def service_check(self, name, status, tags=None, hostname=None, message=None):
"""Send the status of a service.
:param str name: the name of the service check.
:param status: a constant describing the service status.
:type status: :py:class:`datadog_checks.base.constants.ServiceCheck`
:param list tags: (optional) a list of tags to associate with this check.
:param str message: (optional) additional information or a description of why this status occurred.
"""
tags = self._normalize_tags_type(tags)
if hostname is None:
hostname = ''
if message is None:
message = ''
else:
message = to_string(message)
aggregator.submit_service_check(
self, self.check_id, self._format_namespace(name), status, tags, hostname, message
)
def _log_deprecation(self, deprecation_key):
"""
Logs a deprecation notice at most once per AgentCheck instance, for the pre-defined `deprecation_key`
"""
if not self._deprecations[deprecation_key][0]:
self.log.warning(self._deprecations[deprecation_key][1])
self._deprecations[deprecation_key][0] = True
# TODO(olivier): implement service_metadata if it's worth it
def service_metadata(self, meta_name, value):
pass
def set_external_tags(self, external_tags):
# Example of external_tags format
# [
# ('hostname', {'src_name': ['test:t1']}),
# ('hostname2', {'src2_name': ['test2:t3']})
# ]
try:
new_tags = []
for hostname, source_map in external_tags:
new_tags.append((to_string(hostname), source_map))
for src_name, tags in iteritems(source_map):
source_map[src_name] = self._normalize_tags_type(tags)
datadog_agent.set_external_tags(new_tags)
except IndexError:
self.log.exception('Unexpected external tags format: {}'.format(external_tags))
raise
def convert_to_underscore_separated(self, name):
"""
Convert from CamelCase to camel_case
And substitute illegal metric characters
"""
metric_name = self.FIRST_CAP_RE.sub(br'\1_\2', ensure_bytes(name))
metric_name = self.ALL_CAP_RE.sub(br'\1_\2', metric_name).lower()
metric_name = self.METRIC_REPLACEMENT.sub(br'_', metric_name)
return self.DOT_UNDERSCORE_CLEANUP.sub(br'.', metric_name).strip(b'_')
def warning(self, warning_message):
"""Log a warning message and display it in the Agent's status page.
:param str warning_message: the warning message.
"""
warning_message = to_string(warning_message)
frame = inspect.currentframe().f_back
lineno = frame.f_lineno
# only log the last part of the filename, not the full path
filename = basename(frame.f_code.co_filename)
self.log.warning(warning_message, extra={'_lineno': lineno, '_filename': filename})
self.warnings.append(warning_message)
def get_warnings(self):
"""
Return the list of warnings messages to be displayed in the info page
"""
warnings = self.warnings
self.warnings = []
return warnings
def _get_requests_proxy(self):
# TODO: Remove with Agent 5
no_proxy_settings = {'http': None, 'https': None, 'no': []}
# First we read the proxy configuration from datadog.conf
proxies = self.agentConfig.get('proxy', datadog_agent.get_config('proxy'))
if proxies:
proxies = proxies.copy()
# requests compliant dict
if proxies and 'no_proxy' in proxies:
proxies['no'] = proxies.pop('no_proxy')
return proxies if proxies else no_proxy_settings
def _format_namespace(self, s):
if self.__NAMESPACE__:
return '{}.{}'.format(self.__NAMESPACE__, to_string(s))
return to_string(s)
def normalize(self, metric, prefix=None, fix_case=False):
"""
Turn a metric into a well-formed metric name
prefix.b.c
:param metric The metric name to normalize
:param prefix A prefix to to add to the normalized name, default None
:param fix_case A boolean, indicating whether to make sure that the metric name returned is in "snake_case"
"""
if isinstance(metric, text_type):
metric = unicodedata.normalize('NFKD', metric).encode('ascii', 'ignore')
if fix_case:
name = self.convert_to_underscore_separated(metric)
if prefix is not None:
prefix = self.convert_to_underscore_separated(prefix)
else:
name = re.sub(br"[,\+\*\-/()\[\]{}\s]", b"_", metric)
# Eliminate multiple _
name = re.sub(br"__+", b"_", name)
# Don't start/end with _
name = re.sub(br"^_", b"", name)
name = re.sub(br"_$", b"", name)
# Drop ._ and _.
name = re.sub(br"\._", b".", name)
name = re.sub(br"_\.", b".", name)
if prefix is not None:
name = ensure_bytes(prefix) + b"." + name
return to_string(name)
def check(self, instance):
raise NotImplementedError
def run(self):
try:
instance = copy.deepcopy(self.instances[0])
if 'set_breakpoint' in self.init_config:
from ..utils.agent.debug import enter_pdb
enter_pdb(self.check, line=self.init_config['set_breakpoint'], args=(instance,))
elif 'profile_memory' in self.init_config:
from ..utils.agent.memory import TRACE_LOCK, profile_memory
with TRACE_LOCK:
metrics = profile_memory(
self.check, self.init_config, namespaces=self.check_id.split(':', 1), args=(instance,)
)
tags = ['check_name:{}'.format(self.name), 'check_version:{}'.format(self.check_version)]
for m in metrics:
self.gauge(m.name, m.value, tags=tags)
else:
self.check(instance)
result = ''
except Exception as e:
result = json.dumps([{'message': str(e), 'traceback': traceback.format_exc()}])
finally:
if self.metric_limiter:
self.metric_limiter.reset()
return result
class __AgentCheckPy3(__AgentCheck):
"""
Python3 version of the __AgentCheck base class, overrides few methods to
add compatibility with Python3.
"""
def event(self, event):
"""Send an event.
An event is a dictionary with the following keys and data types:
.. code:: python
{
"timestamp": int, # the epoch timestamp for the event
"event_type": str, # the event name
"api_key": str, # the api key for your account
"msg_title": str, # the title of the event
"msg_text": str, # the text body of the event
"aggregation_key": str, # a key to use for aggregating events
"alert_type": str, # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info'
"source_type_name": str, # (optional) the source type name
"host": str, # (optional) the name of the host
"tags": list, # (optional) a list of tags to associate with this event
"priority": str, # (optional) specifies the priority of the event ("normal" or "low")
}
:param ev event: the event to be sent.
"""
# Enforce types of some fields, considerably facilitates handling in go bindings downstream
for key, value in list(iteritems(event)):
# transform any bytes objects to utf-8
if isinstance(value, bytes):
try:
event[key] = event[key].decode('utf-8')
except UnicodeError:
self.log.warning(
'Error decoding unicode field `{}` to utf-8 encoded string, cannot submit event'.format(key)
)
return
if event.get('tags'):
event['tags'] = self._normalize_tags_type(event['tags'])
if event.get('timestamp'):
event['timestamp'] = int(event['timestamp'])
if event.get('aggregation_key'):
event['aggregation_key'] = ensure_unicode(event['aggregation_key'])
if self.__NAMESPACE__:
event.setdefault('source_type_name', self.__NAMESPACE__)
aggregator.submit_event(self, self.check_id, event)
def _normalize_tags_type(self, tags, device_name=None, metric_name=None):
"""
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if device_name:
self._log_deprecation('device_name')
normalized_tags.append('device:{}'.format(ensure_unicode(device_name)))
if tags is not None:
for tag in tags:
if tag is None:
continue
if not isinstance(tag, str):
try:
tag = tag.decode('utf-8')
except Exception:
self.log.warning(
'Error decoding tag `{}` as utf-8 for metric `{}`, ignoring tag'.format(tag, metric_name)
)
continue
normalized_tags.append(tag)
return normalized_tags
class __AgentCheckPy2(__AgentCheck):
"""
Python2 version of the __AgentCheck base class, overrides few methods to
add compatibility with Python2.
"""
def event(self, event):
# Enforce types of some fields, considerably facilitates handling in go bindings downstream
for key, value in list(iteritems(event)):
# transform the unicode objects to plain strings with utf-8 encoding
if isinstance(value, text_type):
try:
event[key] = event[key].encode('utf-8')
except UnicodeError:
self.log.warning(
"Error encoding unicode field '%s' to utf-8 encoded string, can't submit event", key
)
return
if event.get('tags'):
event['tags'] = self._normalize_tags_type(event['tags'])
if event.get('timestamp'):
event['timestamp'] = int(event['timestamp'])
if event.get('aggregation_key'):
event['aggregation_key'] = ensure_bytes(event['aggregation_key'])
if self.__NAMESPACE__:
event.setdefault('source_type_name', self.__NAMESPACE__)
aggregator.submit_event(self, self.check_id, event)
def _normalize_tags_type(self, tags, device_name=None, metric_name=None):
"""
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if device_name:
self._log_deprecation("device_name")
device_tag = self._to_bytes("device:{}".format(device_name))
if device_tag is None:
self.log.warning(
'Error encoding device name `{}` to utf-8 for metric `{}`, ignoring tag'.format(
repr(device_name), repr(metric_name)
)
)
else:
normalized_tags.append(device_tag)
if tags is not None:
for tag in tags:
if tag is None:
continue
encoded_tag = self._to_bytes(tag)
if encoded_tag is None:
self.log.warning(
'Error encoding tag `{}` to utf-8 for metric `{}`, ignoring tag'.format(
repr(tag), repr(metric_name)
)
)
continue
normalized_tags.append(encoded_tag)
return normalized_tags
def _to_bytes(self, data):
"""
Normalize a text data to bytes (type `bytes`) so that the go bindings can
handle it easily.
"""
# TODO: On Python 3, move this `if` line to the `except` branch
# as the common case will indeed no longer be bytes.
if not isinstance(data, bytes):
try:
return data.encode('utf-8')
except Exception:
return None
return data
AgentCheck = __AgentCheckPy3 if PY3 else __AgentCheckPy2
|
the-stack_106_24778
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
app_name = 'onlinecourse'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
path(route='', view=views.CourseListView.as_view(), name='index'),
path('registration/', views.registration_request, name='registration'),
path('login/', views.login_request, name='login'),
path('logout/', views.logout_request, name='logout'),
# ex: /onlinecourse/5/
path('<int:pk>/', views.CourseDetailView.as_view(), name='course_details'),
# ex: /enroll/5/
path('<int:course_id>/enroll/', views.enroll, name='enroll'),
# <HINT> Create a route for submit view
path('<int:course_id>/submit/', views.submit , name="submit"),
# <HINT> Create a route for show_exam_result view
path('course/<int:course_id>/lesson/<int:lesson_id>/submission/<int:submission_id>/result/', views.show_exam_result , name="show_exam_result")
# path('course/<int:course_id>/submission/<int:submission_id>/result/', views.show_exam_result , name="show_exam_result")
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
the-stack_106_24779
|
# -*- coding: utf-8 -*-
"""Test suite for lower bounding techniques."""
import numpy as np
import pandas as pd
import pytest
from sktime.distances.lower_bounding import LowerBounding
from sktime.distances.tests._utils import create_test_distance_numpy
def _validate_bounding_result(
matrix: np.ndarray,
x: np.ndarray,
y: np.ndarray,
all_finite: bool = False,
all_infinite: bool = False,
is_gradient_bounding: bool = False,
):
"""Validate the bounding matrix is what is expected.
Parameters
----------
matrix: np.ndarray (2d array)
Bounding matrix.
x: np.ndarray (1d, 2d or 3d array)
First timeseries.
y: np.ndarray (1d, 2d or 3d array)
Second timeseries.
all_finite: bool, default = False
Boolean that when true will check all the values are finite.
all_infinite: bool, default = False
Boolean that when true will check all the values (aside the middle diagonal)
are infinite.
is_gradient_bounding: bool, default = False
Boolean that when true marks the bounding matrix as generated by an algorithm
that uses a gradient and therefore the first a second column are allowed to
be finite (aside the first and last element in the matrix).
"""
assert isinstance(matrix, np.ndarray), (
f"A bounding matrix must be of type np.ndarray. Instead one was provided with "
f"{type(matrix)} type."
)
assert matrix.ndim == 2, (
f"A bounding matrix must have two dimensions. Instead one was provided with "
f"{matrix.ndim} dimensions."
)
assert matrix.shape == (len(x), len(y)), (
f"A bounding matrix with shape len(x) by len(y) is expected ({len(x), len(y)}. "
f"Instead one was given with shape {matrix.shape}"
)
unique, counts = np.unique(matrix, return_counts=True)
count_dict = dict(zip(unique, counts))
for key in count_dict:
if np.isfinite(key):
assert count_dict[key] >= len(y) or all_infinite is False, (
"All the values in the bounding matrix should be finite. A infinite "
"value was found (aside from the diagonal)."
)
else:
if is_gradient_bounding:
max_infinite = len(y) + len(x) - 2 # -2 as 0,0 and n,m should be finite
assert count_dict[key] >= max_infinite or all_finite is False, (
"All values in the bounding matrix should be infinite. Aside"
"from the first column and last column."
)
else:
assert all_finite is False, (
"All values in the bounding matrix should be"
"infinite. A finite value was found"
)
def _validate_bounding(
x: np.ndarray,
y: np.ndarray,
) -> None:
"""Test each lower bounding with different parameters.
The amount of finite vs infinite values are estimated and are checked that many
is around the amount in the matrix.
Parameters
----------
x: np.ndarray (1d, 2d or 3d)
First timeseries
y: np.ndarray (1d, 2d, or 3d)
Second timeseries
"""
no_bounding = LowerBounding.NO_BOUNDING
no_bounding_result = no_bounding.create_bounding_matrix(x, y)
_validate_bounding_result(no_bounding_result, x, y, all_finite=True)
sakoe_chiba = LowerBounding.SAKOE_CHIBA
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=0.25),
x,
y,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=0.25),
x,
y,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=1.0),
x,
y,
all_finite=True,
)
_validate_bounding_result(
sakoe_chiba.create_bounding_matrix(x, y, sakoe_chiba_window_radius=0.0),
x,
y,
all_infinite=True,
)
itakura_parallelogram = LowerBounding.ITAKURA_PARALLELOGRAM
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=0.2),
x,
y,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=0.3),
x,
y,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=1.0),
x,
y,
all_finite=True,
is_gradient_bounding=True,
)
_validate_bounding_result(
itakura_parallelogram.create_bounding_matrix(x, y, itakura_max_slope=0.0),
x,
y,
all_infinite=True,
is_gradient_bounding=True,
)
def test_lower_bounding() -> None:
"""Test for various lower bounding methods."""
no_bounding = LowerBounding.NO_BOUNDING
no_bounding_int = LowerBounding(1)
assert (
no_bounding_int is no_bounding
), "No bounding must be able to be constructed using the enum and a int value."
sakoe_chiba = LowerBounding.SAKOE_CHIBA
sakoe_chiba_int = LowerBounding(2)
assert (
sakoe_chiba_int is sakoe_chiba
), "Sakoe chiba must be able to be constructed using the enum and a int value."
itakura_parallelogram = LowerBounding.ITAKURA_PARALLELOGRAM
itakura_parallelogram_int = LowerBounding(3)
assert itakura_parallelogram_int is itakura_parallelogram, (
"Itakura parallelogram must be able to be constructed using the enum and a int "
"value"
)
_validate_bounding(
x=np.array([10.0]),
y=np.array([15.0]),
)
_validate_bounding(
x=create_test_distance_numpy(10),
y=create_test_distance_numpy(10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 1),
y=create_test_distance_numpy(10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10),
y=create_test_distance_numpy(10, 10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 1),
y=create_test_distance_numpy(10, 10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 10),
y=create_test_distance_numpy(10, 10, 10, random_state=2),
)
def test_incorrect_parameters() -> None:
"""Test to check correct errors raised."""
numpy_x = create_test_distance_numpy(10, 10)
numpy_y = create_test_distance_numpy(10, 10, random_state=2)
df_x = pd.DataFrame(numpy_x)
series_x = df_x.iloc[0]
numpy_4d = np.array([[[[1, 2, 3]]]])
no_bounding = LowerBounding.NO_BOUNDING
sakoe_chiba = LowerBounding.SAKOE_CHIBA
with pytest.raises(ValueError): # Try pass data frame in y
no_bounding.create_bounding_matrix(numpy_x, df_x)
with pytest.raises(ValueError): # Try pass data frame in x
no_bounding.create_bounding_matrix(df_x, numpy_y)
with pytest.raises(ValueError): # Try pass series in y
no_bounding.create_bounding_matrix(numpy_x, series_x)
with pytest.raises(ValueError): # Try pass series in x
no_bounding.create_bounding_matrix(series_x, numpy_y)
with pytest.raises(ValueError): # Try pass 4d numpy in y
no_bounding.create_bounding_matrix(numpy_x, numpy_4d)
with pytest.raises(ValueError): # Try pass 4d numpy in x
no_bounding.create_bounding_matrix(numpy_4d, numpy_y)
with pytest.raises(ValueError): # Try pass float to sakoe
sakoe_chiba.create_bounding_matrix(
numpy_x, numpy_y, sakoe_chiba_window_radius=1.2
)
with pytest.raises(ValueError): # Try pass both window and gradient
sakoe_chiba.create_bounding_matrix(
numpy_x, numpy_y, sakoe_chiba_window_radius=1.2, itakura_max_slope=10.0
)
def test_numba_lower_bounding() -> None:
"""Test numba implementation of bounding."""
_validate_bounding(
x=np.array([10.0]),
y=np.array([15.0]),
)
_validate_bounding(
x=create_test_distance_numpy(10),
y=create_test_distance_numpy(10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 1),
y=create_test_distance_numpy(10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10),
y=create_test_distance_numpy(10, 10, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 1),
y=create_test_distance_numpy(10, 10, 1, random_state=2),
)
_validate_bounding(
x=create_test_distance_numpy(10, 10, 10),
y=create_test_distance_numpy(10, 10, 10, random_state=2),
)
|
the-stack_106_24784
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleChainConnectionInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
from tb_rest_client.api_client import ApiClient
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'from_index': 'int',
'target_rule_chain_id': 'RuleChainId',
'additional_info': 'JsonNode',
'type': 'str'
}
attribute_map = {
'from_index': 'fromIndex',
'target_rule_chain_id': 'targetRuleChainId',
'additional_info': 'additionalInfo',
'type': 'type'
}
def __init__(self, from_index=None, target_rule_chain_id=None, additional_info=None, type=None): # noqa: E501
"""RuleChainConnectionInfo - a model defined in Swagger""" # noqa: E501
self._from_index = None
self._target_rule_chain_id = None
self._additional_info = None
self._type = None
self.discriminator = None
self.from_index = from_index
self.target_rule_chain_id = target_rule_chain_id
self.additional_info = additional_info
self.type = type
@property
def from_index(self):
"""Gets the from_index of this RuleChainConnectionInfo. # noqa: E501
Index of rule node in the 'nodes' array of the RuleChainMetaData. Indicates the 'from' part of the connection. # noqa: E501
:return: The from_index of this RuleChainConnectionInfo. # noqa: E501
:rtype: int
"""
return self._from_index
@from_index.setter
def from_index(self, from_index):
"""Sets the from_index of this RuleChainConnectionInfo.
Index of rule node in the 'nodes' array of the RuleChainMetaData. Indicates the 'from' part of the connection. # noqa: E501
:param from_index: The from_index of this RuleChainConnectionInfo. # noqa: E501
:type: int
"""
if from_index is None:
raise ValueError("Invalid value for `from_index`, must not be `None`") # noqa: E501
self._from_index = from_index
@property
def target_rule_chain_id(self):
"""Gets the target_rule_chain_id of this RuleChainConnectionInfo. # noqa: E501
:return: The target_rule_chain_id of this RuleChainConnectionInfo. # noqa: E501
:rtype: RuleChainId
"""
return self._target_rule_chain_id
@target_rule_chain_id.setter
def target_rule_chain_id(self, target_rule_chain_id):
"""Sets the target_rule_chain_id of this RuleChainConnectionInfo.
:param target_rule_chain_id: The target_rule_chain_id of this RuleChainConnectionInfo. # noqa: E501
:type: RuleChainId
"""
if target_rule_chain_id is None:
raise ValueError("Invalid value for `target_rule_chain_id`, must not be `None`") # noqa: E501
self._target_rule_chain_id = target_rule_chain_id
@property
def additional_info(self):
"""Gets the additional_info of this RuleChainConnectionInfo. # noqa: E501
:return: The additional_info of this RuleChainConnectionInfo. # noqa: E501
:rtype: JsonNode
"""
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
"""Sets the additional_info of this RuleChainConnectionInfo.
:param additional_info: The additional_info of this RuleChainConnectionInfo. # noqa: E501
:type: JsonNode
"""
if additional_info is None:
raise ValueError("Invalid value for `additional_info`, must not be `None`") # noqa: E501
self._additional_info = additional_info
@property
def type(self):
"""Gets the type of this RuleChainConnectionInfo. # noqa: E501
Type of the relation. Typically indicated the result of processing by the 'from' rule node. For example, 'Success' or 'Failure' # noqa: E501
:return: The type of this RuleChainConnectionInfo. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RuleChainConnectionInfo.
Type of the relation. Typically indicated the result of processing by the 'from' rule node. For example, 'Success' or 'Failure' # noqa: E501
:param type: The type of this RuleChainConnectionInfo. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleChainConnectionInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleChainConnectionInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_24785
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kvirt config class
"""
from distutils.spawn import find_executable
from kvirt.defaults import (NETS, POOL, CPUMODEL, NUMCPUS, MEMORY, DISKS,
DISKSIZE, DISKINTERFACE, DISKTHIN, GUESTID,
VNC, CLOUDINIT, RESERVEIP, RESERVEDNS, RESERVEHOST,
START, NESTED, TUNNEL, REPORTURL, REPORTDIR,
REPORT, REPORTALL, INSECURE, KEYS, CMDS, DNS,
DOMAIN, SCRIPTS, FILES, ISO,
NETMASKS, GATEWAY, SHAREDKEY, TEMPLATE, ENABLEROOT,
PLANVIEW, PRIVATEKEY, TAGS, RHNREGISTER, RHNUSER, RHNPASSWORD, RHNAK, RHNORG, RHNPOOL,
FLAVOR, KEEP_NETWORKS, DNSCLIENT, STORE_METADATA, NOTIFY, NOTIFYTOKEN, NOTIFYCMD)
from kvirt import common
import os
from shutil import copyfile, rmtree
import sys
import yaml
class Kbaseconfig:
"""
"""
def __init__(self, client=None, containerclient=None, debug=False, quiet=False):
inifile = "%s/.kcli/config.yml" % os.environ.get('HOME')
secretsfile = "%s/.kcli/secrets.yml" % os.environ.get('HOME')
if not os.path.exists(secretsfile):
secrets = {}
else:
with open(secretsfile, 'r') as entries:
try:
secrets = yaml.load(entries)
except yaml.scanner.ScannerError as err:
common.pprint("Couldn't parse yaml in .kcli/secrets.yml. Leaving...", color='red')
common.pprint(err, color='red')
os._exit(1)
if not os.path.exists(inifile):
client = 'local'
if os.path.exists('/Users'):
_type = 'vbox'
elif os.path.exists('/var/run/libvirt/libvirt-sock'):
_type = 'kvm'
elif os.path.exists(os.path.expanduser('~/.kube')):
_type = 'kubevirt'
client = 'kubevirt'
else:
_type = 'fake'
client = 'fake'
self.ini = {'default': {'client': client}, client:
{'pool': 'default', 'type': _type}}
else:
with open(inifile, 'r') as entries:
try:
self.ini = yaml.load(entries)
except yaml.scanner.ScannerError as err:
common.pprint("Couldn't parse yaml in .kcli/config.yml. Leaving...", color='red')
common.pprint(err, color='red')
os._exit(1)
except:
self.host = None
return
for key1 in self.ini:
for key2 in self.ini[key1]:
if isinstance(self.ini[key1][key2], str) and self.ini[key1][key2] == '?secret':
if key1 in secrets and key2 in secrets[key1]:
self.ini[key1][key2] = secrets[key1][key2]
else:
common.pprint("Missing secret for %s/%s" % (key1, key2), color='red')
os._exit(1)
if 'default' not in self.ini:
if len(self.ini) == 1:
client = list(self.ini.keys())[0]
self.ini['default'] = {"client": client}
else:
common.pprint("Missing default section in config file. Leaving...", color='red')
self.host = None
return
if 'client' not in self.ini['default']:
common.pprint("Using local hypervisor as no client was specified...", color='green')
self.ini['default']['client'] = 'local'
self.ini['local'] = {}
self.clients = [e for e in self.ini if e != 'default']
defaults = {}
default = self.ini['default']
defaults['nets'] = default.get('nets', NETS)
defaults['pool'] = default.get('pool', POOL)
defaults['template'] = default.get('template', TEMPLATE)
defaults['cpumodel'] = default.get('cpumodel', CPUMODEL)
defaults['numcpus'] = int(default.get('numcpus', NUMCPUS))
defaults['memory'] = int(default.get('memory', MEMORY))
defaults['disks'] = default.get('disks', DISKS)
defaults['disksize'] = default.get('disksize', DISKSIZE)
defaults['diskinterface'] = default.get('diskinterface', DISKINTERFACE)
defaults['diskthin'] = default.get('diskthin', DISKTHIN)
defaults['guestid'] = default.get('guestid', GUESTID)
defaults['vnc'] = bool(default.get('vnc', VNC))
defaults['cloudinit'] = bool(default.get('cloudinit', CLOUDINIT))
defaults['reserveip'] = bool(default.get('reserveip', RESERVEIP))
defaults['reservedns'] = bool(default.get('reservedns', RESERVEDNS))
defaults['reservehost'] = bool(default.get('reservehost', RESERVEHOST))
defaults['nested'] = bool(default.get('nested', NESTED))
defaults['start'] = bool(default.get('start', START))
defaults['tunnel'] = bool(default.get('tunnel', TUNNEL))
defaults['insecure'] = bool(default.get('insecure', INSECURE))
defaults['reporturl'] = default.get('reporturl', REPORTURL)
defaults['reportdir'] = default.get('reportdir', REPORTDIR)
defaults['report'] = bool(default.get('report', REPORT))
defaults['reportall'] = bool(default.get('reportall', REPORTALL))
defaults['keys'] = default.get('keys', KEYS)
defaults['cmds'] = default.get('cmds', CMDS)
defaults['dns'] = default.get('dns', DNS)
defaults['domain'] = default.get('file', DOMAIN)
defaults['scripts'] = default.get('script', SCRIPTS)
defaults['files'] = default.get('files', FILES)
defaults['iso'] = default.get('iso', ISO)
defaults['netmasks'] = default.get('netmasks', NETMASKS)
defaults['gateway'] = default.get('gateway', GATEWAY)
defaults['sharedkey'] = default.get('sharedkey', SHAREDKEY)
defaults['enableroot'] = default.get('enableroot', ENABLEROOT)
defaults['planview'] = default.get('planview', PLANVIEW)
defaults['privatekey'] = default.get('privatekey', PRIVATEKEY)
defaults['rhnregister'] = default.get('rhnregister', RHNREGISTER)
defaults['rhnuser'] = default.get('rhnuser', RHNUSER)
defaults['rhnpassword'] = default.get('rhnpassword', RHNPASSWORD)
defaults['rhnactivationkey'] = default.get('rhnactivationkey', RHNAK)
defaults['rhnorg'] = default.get('rhnorg', RHNORG)
defaults['rhnpool'] = default.get('rhnpool', RHNPOOL)
defaults['tags'] = default.get('tags', TAGS)
defaults['flavor'] = default.get('flavor', FLAVOR)
defaults['keep_networks'] = default.get('keep_networks', KEEP_NETWORKS)
defaults['dnsclient'] = default.get('dnsclient', DNSCLIENT)
defaults['storemetadata'] = default.get('storemetadata', STORE_METADATA)
defaults['notify'] = default.get('notify', NOTIFY)
defaults['notifytoken'] = default.get('notifytoken', NOTIFYTOKEN)
defaults['notifycmd'] = default.get('notifycmd', NOTIFYCMD)
currentplanfile = "%s/.kcli/plan" % os.environ.get('HOME')
if os.path.exists(currentplanfile):
self.currentplan = open(currentplanfile).read().strip()
else:
self.currentplan = 'kvirt'
self.default = defaults
profilefile = default.get('profiles', "%s/.kcli/profiles.yml" %
os.environ.get('HOME'))
profilefile = os.path.expanduser(profilefile)
if not os.path.exists(profilefile):
self.profiles = {}
else:
with open(profilefile, 'r') as entries:
self.profiles = yaml.load(entries)
flavorsfile = default.get('flavors', "%s/.kcli/flavors.yml" %
os.environ.get('HOME'))
flavorsfile = os.path.expanduser(flavorsfile)
if not os.path.exists(flavorsfile):
self.flavors = {}
else:
with open(flavorsfile, 'r') as entries:
try:
self.flavors = yaml.load(entries)
except yaml.scanner.ScannerError as err:
common.pprint("Couldn't parse yaml in .kcli/flavors.yml. Leaving...", color='red')
common.pprint(err, color='red')
os._exit(1)
self.extraclients = {}
self._extraclients = []
if client == 'all':
clis = [cli for cli in self.clients if
self.ini[cli].get('enabled', True)]
self.client = clis[0]
self._extraclients = clis[1:]
elif client is None:
self.client = self.ini['default']['client']
elif ',' in client:
self.client = client.split(',')[0]
self._extraclients = client.split(',')[1:]
else:
self.client = client
if self.client not in self.ini:
common.pprint("Missing section for client %s in config file. Leaving..." % self.client, color='red')
os._exit(1)
self.options = self.ini[self.client]
options = self.options
self.enabled = options.get('enabled', True)
if not self.enabled:
common.pprint("Disabled hypervisor %s.Leaving..." % client, color='red')
os._exit(1)
self.host = options.get('host', '127.0.0.1')
self.port = options.get('port', 22)
self.user = options.get('user', 'root')
self.protocol = options.get('protocol', 'ssh')
self.type = options.get('type', 'kvm')
self.url = options.get('url', None)
self.pool = options.get('pool', self.default['pool'])
self.template = options.get('template', self.default['template'])
self.tunnel = bool(options.get('tunnel', self.default['tunnel']))
self.insecure = bool(options.get('insecure', self.default['insecure']))
self.report = options.get('report', self.default['report'])
self.reporturl = options.get('reporturl', self.default['reportdir'])
self.reportdir = options.get('reportdir', self.default['reportdir'])
self.reportall = options.get('reportall', self.default['reportall'])
self.nets = options.get('nets', self.default['nets'])
self.cpumodel = options.get('cpumodel', self.default['cpumodel'])
self.cpuflags = options.get('cpuflags', [])
self.numcpus = options.get('numcpus', self.default['numcpus'])
self.memory = options.get('memory', self.default['memory'])
self.disks = options.get('disks', self.default['disks'])
self.disksize = options.get('disksize', self.default['disksize'])
self.diskinterface = options.get('diskinterface',
self.default['diskinterface'])
self.diskthin = options.get('diskthin', self.default['diskthin'])
self.guestid = options.get('guestid', self.default['guestid'])
self.vnc = options.get('vnc', self.default['vnc'])
self.cloudinit = options.get('cloudinit', self.default['cloudinit'])
self.reserveip = options.get('reserveip', self.default['reserveip'])
self.reservedns = options.get('reservedns', self.default['reservedns'])
self.reservehost = options.get('reservehost',
self.default['reservehost'])
self.nested = options.get('nested', self.default['nested'])
self.start = options.get('start', self.default['start'])
self.iso = options.get('iso', self.default['iso'])
self.keys = options.get('keys', self.default['keys'])
self.cmds = options.get('cmds', self.default['cmds'])
self.netmasks = options.get('netmasks', self.default['netmasks'])
self.gateway = options.get('gateway', self.default['gateway'])
self.sharedkey = options.get('sharedkey', self.default['sharedkey'])
self.enableroot = options.get('enableroot', self.default['enableroot'])
self.planview = options.get('planview', self.default['planview'])
self.dns = options.get('dns', self.default['dns'])
self.domain = options.get('domain', self.default['domain'])
self.scripts = options.get('scripts', self.default['scripts'])
self.files = options.get('files', self.default['files'])
self.privatekey = options.get('privatekey', self.default['privatekey'])
self.rhnregister = options.get('rhnregister', self.default['rhnregister'])
self.rhnuser = options.get('rhnuser', self.default['rhnuser'])
self.rhnpassword = options.get('rhnpassword', self.default['rhnpassword'])
self.rhnak = options.get('rhnactivationkey', self.default['rhnactivationkey'])
self.rhnorg = options.get('rhnorg', self.default['rhnorg'])
self.rhnpool = options.get('rhnpool', self.default['rhnpool'])
self.tags = options.get('tags', self.default['tags'])
self.flavor = options.get('flavor', self.default['flavor'])
self.dnsclient = options.get('dnsclient', self.default['dnsclient'])
self.storemetadata = options.get('storemetadata', self.default['storemetadata'])
self.notify = options.get('notify', self.default['notify'])
self.notifytoken = options.get('notifytoken', self.default['notifytoken'])
self.notifycmd = options.get('notifycmd', self.default['notifycmd'])
self.keep_networks = options.get('keep_networks', self.default['keep_networks'])
self.containerclient = containerclient
def switch_host(self, client):
"""
:param client:
:return:
"""
if client not in self.clients:
common.pprint("Client %s not found in config.Leaving...." % client,
color='red')
return {'result': 'failure', 'reason': "Client %s not found in config" % client}
enabled = self.ini[client].get('enabled', True)
oldclient = self.ini['default']['client']
if not enabled:
common.pprint("Client %s is disabled.Leaving...." % client,
color='red')
return {'result': 'failure', 'reason': "Client %s is disabled" %
client}
common.pprint("Switching to client %s..." % client, color='green')
inifile = "%s/.kcli/config.yml" % os.environ.get('HOME')
if os.path.exists(inifile):
newini = ''
for line in open(inifile).readlines():
if 'client' in line:
newini += line.replace(oldclient, client)
else:
newini += line
open(inifile, 'w').write(newini)
return {'result': 'success'}
def enable_host(self, client):
"""
:param client:
:return:
"""
if client not in self.clients:
common.pprint("Client %s not found in config.Leaving...." % client,
color='green')
return {'result': 'failure', 'reason': "Client %s not found in config" % client}
common.pprint("Enabling client %s..." % client, color='green')
inifile = "%s/.kcli/config.yml" % os.environ.get('HOME')
if os.path.exists(inifile):
newini = ''
clientreached = False
for line in open(inifile).readlines():
if line.startswith("%s:" % client):
clientreached = True
newini += line
continue
if clientreached and 'enabled' not in self.ini[client]:
newini += " enabled: true\n"
clientreached = False
newini += line
continue
elif clientreached and line.startswith(' enabled:'):
newini += " enabled: true\n"
clientreached = False
else:
newini += line
open(inifile, 'w').write(newini)
return {'result': 'success'}
def disable_host(self, client):
"""
:param client:
:return:
"""
if client not in self.clients:
common.pprint("Client %s not found in config.Leaving...." % client,
color='red')
return {'result': 'failure', 'reason': "Client %s not found in config" % client}
elif self.ini['default']['client'] == client:
common.pprint("Client %s currently default.Leaving...." % client,
color='red')
return {'result': 'failure', 'reason': "Client %s currently default" % client}
common.pprint("Disabling client %s..." % client, color='green')
inifile = "%s/.kcli/config.yml" % os.environ.get('HOME')
if os.path.exists(inifile):
newini = ''
clientreached = False
for line in open(inifile).readlines():
if line.startswith("%s:" % client):
clientreached = True
newini += line
continue
if clientreached and 'enabled' not in self.ini[client]:
newini += " enabled: false\n"
clientreached = False
newini += line
continue
elif clientreached and line.startswith(' enabled:'):
newini += " enabled: false\n"
clientreached = False
else:
newini += line
open(inifile, 'w').write(newini)
return {'result': 'success'}
def bootstrap(self, name, host, port, user, protocol, url, pool, poolpath):
"""
:param name:
:param host:
:param port:
:param user:
:param protocol:
:param url:
:param pool:
:param poolpath:
"""
common.pprint("Bootstrapping env", color='green')
if host is None and url is None:
url = 'qemu:///system'
host = '127.0.0.1'
if pool is None:
pool = 'default'
if poolpath is None:
poolpath = '/var/lib/libvirt/images'
if host == '127.0.0.1':
ini = {'default': {'client': 'local', 'cloudinit': True,
'tunnel': False, 'insecure': True, 'enableroot': True,
'reserveip': False, 'reservedns': False,
'nested': True, 'reservehost': False,
'start': True},
'local': {'pool': pool, 'nets': ['default']}}
if not sys.platform.startswith('linux'):
ini['local']['type'] = 'vbox'
else:
if name is None:
name = host
ini = {'default': {'client': name, 'cloudinit': True,
'tunnel': True, 'insecure': True, 'enableroot': True,
'reserveip': False, 'reservedns': False,
'nested': True, 'reservehost': False,
'start': True}, name: {'host': host, 'pool': pool, 'nets': ['default']}}
if protocol is not None:
ini[name]['protocol'] = protocol
if user is not None:
ini[name]['user'] = user
if port is not None:
ini[name]['port'] = port
if url is not None:
ini[name]['url'] = url
path = os.path.expanduser('~/.kcli/config.yml')
rootdir = os.path.expanduser('~/.kcli')
if os.path.exists(path):
copyfile(path, "%s.bck" % path)
if not os.path.exists(rootdir):
os.makedirs(rootdir)
with open(path, 'w') as conf_file:
yaml.safe_dump(ini, conf_file, default_flow_style=False,
encoding='utf-8', allow_unicode=True)
common.pprint("Environment bootstrapped!", color='green')
def list_repos(self):
"""
:return:
"""
repos = {}
plansdir = "%s/.kcli/plans" % os.environ.get('HOME')
if not os.path.exists(plansdir):
return {}
else:
repodirs = [d for d in os.listdir(plansdir) if os.path.isdir("%s/%s" % (plansdir, d))]
for d in repodirs:
repos[d] = None
if os.path.exists("%s/%s/.git/config" % (plansdir, d)) and find_executable('git') is not None:
gitcmd = "git config -f %s/%s/.git/config --get remote.origin.url" % (plansdir, d)
giturl = os.popen(gitcmd).read().strip()
repos[d] = giturl
return repos
def list_products(self, group=None, repo=None):
"""
:param group:
:param repo:
:return:
"""
plansdir = "%s/.kcli/plans" % os.environ.get('HOME')
if not os.path.exists(plansdir):
return []
else:
products = []
repodirs = [d for d in os.listdir(plansdir) if os.path.isdir("%s/%s" % (plansdir, d))]
for rep in repodirs:
repometa = "%s/%s/KMETA" % (plansdir, rep)
if not os.path.exists(repometa):
continue
else:
realdir = os.path.dirname(os.readlink(repometa)) if os.path.islink(repometa) else None
with open(repometa, 'r') as entries:
try:
repoproducts = yaml.load(entries)
for repoproduct in repoproducts:
repoproduct['repo'] = rep
if 'file' not in repoproduct:
repoproduct['file'] = 'kcli_plan.yml'
if '/' in repoproduct['file']:
repoproduct['group'] = repoproduct['file'].split('/')[0]
else:
repoproduct['group'] = ''
if realdir is not None:
repoproduct['realdir'] = realdir
products.append(repoproduct)
except yaml.scanner.ScannerError:
common.pprint("Couldn't properly parse .kcli/repo. Leaving...", color='red')
continue
if repo is not None:
products = [product for product in products if 'repo'
in product and product['repo'] == repo]
if group is not None:
products = [product for product in products if 'group'
in product and product['group'] == group]
return products
def create_repo(self, name, url):
"""
:param name:
:param url:
:return:
"""
reponame = name if name is not None else os.path.basename(url)
repodir = "%s/.kcli/plans/%s" % (os.environ.get('HOME'), reponame)
if not os.path.exists(repodir):
os.makedirs(repodir, exist_ok=True)
if not url.startswith('http') and not url.startswith('git'):
os.symlink(url, repodir)
elif find_executable('git') is None:
common.pprint('repo operations require git', color='red')
os._exit(1)
else:
os.system("git clone %s %s" % (url, repodir))
if not os.path.exists("%s/KMETA" % repodir):
for root, dirs, files in os.walk(repodir):
for name in files:
if name == 'KMETA':
dst = "%s/KMETA" % repodir
src = "%s/KMETA" % root.replace("%s/" % repodir, '')
os.symlink(src, dst)
break
os._exit(1)
def update_repo(self, name, url=None):
"""
:param name:
:param url:
:return:
"""
repodir = "%s/.kcli/plans/%s" % (os.environ.get('HOME'), name)
if not os.path.exists(repodir):
return {'result': 'failure', 'reason': 'repo %s not found' % name}
elif find_executable('git') is None:
return {'result': 'failure', 'reason': 'repo operations require git'}
else:
os.chdir(repodir)
if os.path.exists('.git'):
os.system("git pull --rebase")
return {'result': 'success'}
def delete_repo(self, name):
"""
:param name:
:return:
"""
repodir = "%s/.kcli/plans/%s" % (os.environ.get('HOME'), name)
if os.path.exists(repodir) and os.path.isdir(repodir):
rmtree(repodir)
return {'result': 'success'}
def info_plan(self, inputfile, quiet=False, web=False, onfly=None):
"""
:param inputfile:
:param quiet:
:return:
"""
inputfile = os.path.expanduser(inputfile) if inputfile is not None else 'kcli_plan.yml'
if not quiet:
common.pprint("Providing information on parameters of plan %s..." %
inputfile, color='green')
if not os.path.exists(inputfile):
common.pprint("No input file found nor default kcli_plan.yml. Leaving....", color='red')
os._exit(1)
parameters = common.get_parameters(inputfile)
if parameters is not None:
parameters = yaml.load(parameters)['parameters']
if web:
return parameters
for parameter in parameters:
print("%s: %s" % (parameter, parameters[parameter]))
if parameter == 'baseplan':
if onfly is not None:
common.fetch("%s/%s" % (onfly, parameters[parameter]), '.')
baseplan = parameters[parameter]
baseplan = "%s/%s" % (os.path.dirname(inputfile), baseplan)
self.info_plan(baseplan, quiet=True)
print()
else:
common.pprint("No parameters found. Leaving...", color='blue')
# return {'result': 'success'}
def info_product(self, name, repo=None, group=None, web=False):
"""Info product"""
if repo is not None and group is not None:
products = [product for product in self.list_products
if product['name'] == name and
product['repo'] == repo and
product['group'] == group]
elif repo is not None:
products = [product for product in self.list_products()
if product['name'] == name and product['repo'] == repo]
if group is not None:
products = [product for product in self.list_products()
if product['name'] == name and
product['group'] == group]
else:
products = [product for product in self.list_products()
if product['name'] == name]
if len(products) == 0:
common.pprint("Product not found. Leaving...", color='red')
os._exit(1)
elif len(products) > 1:
common.pprint("Product found in several places. Specify repo or group", color='red')
os._exit(1)
else:
product = products[0]
repo = product['repo']
repodir = "%s/.kcli/plans/%s" % (os.environ.get('HOME'), repo)
group = product['group']
_file = product['file']
description = product.get('description')
numvms = product.get('numvms')
template = product.get('template')
comments = product.get('comments')
if not web:
if description is not None:
print("description: %s" % description)
if group is not None:
print("group: %s" % group)
if numvms is not None:
numvmsinfo = "numvms: %s" % numvms
if numvms == 1:
numvmsinfo += " (Vm name can be overriden)"
print(numvmsinfo)
if template is not None:
print("template: %s" % template)
if comments is not None:
print("Comments : %s" % comments)
inputfile = "%s/%s" % (product['realdir'], _file) if 'realdir' in product else _file
parameters = self.info_plan("%s/%s" % (repodir, inputfile), quiet=True, web=web)
if web:
return {'product': product, 'comments': comments, 'description': description, 'parameters': parameters}
|
the-stack_106_24786
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import sys
import traceback
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.policy_client import PolicyClient
if sys.version_info[0] == 2:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer as HTTPServer
from SocketServer import ThreadingMixIn
elif sys.version_info[0] == 3:
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
@PublicAPI
class PolicyServer(ThreadingMixIn, HTTPServer):
"""REST server than can be launched from a ExternalEnv.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib.
Examples:
>>> class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(
low=-10,
high=10,
shape=(4,),
dtype=np.float32))
def run(self):
server = PolicyServer(self, "localhost", 8900)
server.serve_forever()
>>> register_env("srv", lambda _: CartpoleServing())
>>> pg = PGTrainer(env="srv", config={"num_workers": 0})
>>> while True:
pg.train()
>>> client = PolicyClient("localhost:8900")
>>> eps_id = client.start_episode()
>>> action = client.get_action(eps_id, obs)
>>> ...
>>> client.log_returns(eps_id, reward)
>>> ...
>>> client.log_returns(eps_id, reward)
"""
@PublicAPI
def __init__(self, external_env, address, port):
handler = _make_handler(external_env)
HTTPServer.__init__(self, (address, port), handler)
def _make_handler(external_env):
class Handler(SimpleHTTPRequestHandler):
def do_POST(self):
content_len = int(self.headers.get('Content-Length'), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
if command == PolicyClient.START_EPISODE:
response["episode_id"] = external_env.start_episode(
args["episode_id"], args["training_enabled"])
elif command == PolicyClient.GET_ACTION:
response["action"] = external_env.get_action(
args["episode_id"], args["observation"])
elif command == PolicyClient.LOG_ACTION:
external_env.log_action(args["episode_id"],
args["observation"], args["action"])
elif command == PolicyClient.LOG_RETURNS:
external_env.log_returns(args["episode_id"], args["reward"],
args["info"])
elif command == PolicyClient.END_EPISODE:
external_env.end_episode(args["episode_id"],
args["observation"])
else:
raise Exception("Unknown command: {}".format(command))
return response
return Handler
|
the-stack_106_24787
|
"""Test c_cpp_properties flags generation."""
import imp
from os import path, environ
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import c_cpp_properties
from EasyClangComplete.plugin import tools
from EasyClangComplete.plugin.utils import flag
imp.reload(c_cpp_properties)
imp.reload(tools)
imp.reload(flag)
CCppProperties = c_cpp_properties.CCppProperties
SearchScope = tools.SearchScope
Flag = flag.Flag
class TestCCppProperties(TestCase):
"""Test generating flags with a 'c_cpp_properties.json' file."""
def test_get_all_flags(self):
"""Test if c_cpp_properties.json is found."""
include_prefixes = ['-I']
db = CCppProperties(include_prefixes)
expected = [Flag('-I' + path.normpath('/lib_include_dir')),
Flag('-Dlib_EXPORTS')]
path_to_db = path.join(path.dirname(__file__),
'c_cpp_properties_files',
'simple')
scope = SearchScope(from_folder=path_to_db)
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_expand_environment_variables(self):
"""Test environment variables are expanded."""
include_prefixes = ['-I']
db = CCppProperties(include_prefixes)
environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'
expected = [Flag('-I' + path.normpath('/lib_include_dir')),
Flag('-Dlib_EXPORTS')]
path_to_db = path.join(path.dirname(__file__),
'c_cpp_properties_files',
'environment')
scope = SearchScope(from_folder=path_to_db)
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_no_db_in_folder(self):
"""Test if no json is found."""
include_prefixes = ['-I']
db = CCppProperties(include_prefixes)
flags = db.get_flags(path.normpath('/home/user/dummy_main.cpp'))
self.assertTrue(flags is None)
def test_empty_include_and_defines(self):
"""Test that empty fields are handled correctly."""
include_prefixes = ['-I']
db = CCppProperties(include_prefixes)
expected = []
path_to_db = path.join(path.dirname(__file__),
'c_cpp_properties_files',
'empty')
scope = SearchScope(from_folder=path_to_db)
self.assertEqual(expected, db.get_flags(search_scope=scope))
|
the-stack_106_24788
|
from django.db import migrations
from cms.djangoapps.contentstore.config.waffle import ENABLE_CHECKLISTS_QUALITY
def create_flag(apps, schema_editor):
Flag = apps.get_model('waffle', 'Flag')
# Replacement for flag_undefined_default=True on flag definition
Flag.objects.get_or_create(name=ENABLE_CHECKLISTS_QUALITY.name, defaults={'everyone': True})
class Migration(migrations.Migration):
dependencies = [
('contentstore', '0004_remove_push_notification_configmodel_table'),
('waffle', '0001_initial'),
]
operations = [
# Do not remove the flag for rollback. We don't want to lose original if
# it already existed, and it won't hurt if it was created.
migrations.RunPython(create_flag, reverse_code=migrations.RunPython.noop),
]
|
the-stack_106_24789
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
# Copyright 2022 Northern System Service Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data parser and processing.
Parse image and ground truths in a dataset to training targets and package them
into (image, labels) tuple for RetinaNet.
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
import tensorflow.compat.v1 as tf
from dataloader import anchor
from dataloader import mode_keys as ModeKeys
from dataloader import tf_example_decoder
from utils import box_utils
from utils import dataloader_utils
from utils import input_utils
# Currently there are import errors related to AutoAugment and TF 2.x,
# so we guard the import with a try/except.
try:
from utils import autoaugment_utils # pylint: disable=g-import-not-at-top
autoaug_imported = True
except ImportError:
autoaug_imported = False
AUTOAUG_POLICIES = ('v0', 'test')
class Parser(object):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
match_threshold=0.5,
unmatched_threshold=0.5,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_policy='',
skip_crowd_during_training=True,
max_num_instances=100,
use_bfloat16=True,
regenerate_source_id=False,
mode=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
match_threshold: `float` number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: `float` number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
aug_policy: `str`, the augmentation policy to use.
This can be an autoaugment policy name, for example 'v0'.
An empty string indicates no augmentation policy.
The augment policy is independent from `aug_rand_hflip`,
`aug_scale_min`, and `aug_scale_max`.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
use_bfloat16: `bool`, if True, cast output image to tf.bfloat16.
regenerate_source_id: `bool`, if True TFExampleParser will use hashed
value of `image/encoded` for `image/source_id`.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction or
prediction with groundtruths in the outputs.
"""
self._mode = mode
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
self._is_training = (mode == ModeKeys.TRAIN)
self._example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask=False, regenerate_source_id=regenerate_source_id)
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._aug_policy = aug_policy
# Device.
self._use_bfloat16 = use_bfloat16
# Data is parsed depending on the model Modekey.
if mode == ModeKeys.TRAIN:
self._parse_fn = self._parse_train_data
elif mode == ModeKeys.EVAL:
self._parse_fn = self._parse_eval_data
elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT:
self._parse_fn = self._parse_predict_data
else:
raise ValueError('mode is not defined.')
def __call__(self, value):
"""Parses data to an image and associated training labels.
Args:
value: a string tensor holding a serialized tf.Example proto.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels:
cls_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: number of positive anchors in the image.
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
[y_scale, x_scale], [y_offset, x_offset]].
groundtruths:
source_id: source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
boxes: groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_num_instances, 4].
classes: groundtruth classes annotations. The tensor is padded with
-1 to the fixed dimension [self._max_num_instances].
areas: groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_num_instances].
is_crowds: groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_num_instances].
"""
with tf.name_scope('parser'):
data = self._example_decoder.decode(value)
return self._parse_fn(data)
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
# Gets original image and its size.
image = data['image']
if self._aug_policy:
if self._aug_policy in AUTOAUG_POLICIES:
if autoaug_imported:
image, boxes = autoaugment_utils.distort_image_with_autoaugment(
image, boxes, self._aug_policy)
else:
raise ImportError('Unable to get autoaugment_utils, likely due '
'to imcompatability with TF 2.X.')
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, boxes = input_utils.random_horizontal_flip(image, boxes)
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
# Now the coordinates of boxes are w.r.t the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchor targets.
# Note that after the target assignment, box targets are absolute pixel
# offsets w.r.t. the scaled image.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': input_anchor.multilevel_boxes,
'num_positives': num_positives,
'image_info': image_info,
}
return image, labels
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
groundtruths = {}
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchors.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Sets up groundtruth data for evaluation.
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_groundtruths': tf.shape(data['groundtruth_classes']),
'boxes': box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape),
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': input_anchor.multilevel_boxes,
'num_positives': num_positives,
'image_info': image_info,
'groundtruths': groundtruths,
}
return image, labels
def _parse_predict_data(self, data):
"""Parses data for prediction."""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Compute Anchor boxes.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
}
# If mode is PREDICT_WITH_GT, returns groundtruths and training targets
# in labels.
if self._mode == ModeKeys.PREDICT_WITH_GT:
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape)
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
labels['groundtruths'] = groundtruths
# Computes training objective for evaluation loss.
classes = data['groundtruth_classes']
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchors.
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
labels['cls_targets'] = cls_targets
labels['box_targets'] = box_targets
labels['num_positives'] = num_positives
return {
'images': image,
'labels': labels,
}
|
the-stack_106_24790
|
#! /usr/bin/env python
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab.field import FieldError, GroupError
from landlab.field.graph_field import GraphFields as ModelDataFields
def test_init():
fields = ModelDataFields()
assert set() == fields.groups
# assert_set_equal(set(), fields.groups)
def test_new_field_location():
fields = ModelDataFields()
fields.new_field_location("node", 12)
assert set(["node"]) == fields.groups
# assert_set_equal(set(['node']), fields.groups)
def test_add_existing_group():
fields = ModelDataFields()
fields.new_field_location("node", size=12)
with pytest.raises(ValueError):
fields.new_field_location("node", size=24)
def test_add_multiple_groups():
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.new_field_location("cell", 2)
fields.new_field_location("face", 7)
fields.new_field_location("link", 7)
assert set(["node", "cell", "face", "link"]) == fields.groups
# assert_set_equal(set(['node', 'cell', 'face', 'link']), fields.groups)
def test_ones():
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.new_field_location("cell", 2)
value_array = fields.ones("node")
assert_array_equal(np.ones(12), value_array)
value_array = fields.ones("cell")
assert_array_equal(np.ones(2), value_array)
def test_add_ones():
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.new_field_location("cell", 2)
fields.add_ones("z", at="node")
assert_array_equal(np.ones(12), fields["node"]["z"])
assert_array_equal(np.ones(12), fields.field_values("node", "z"))
fields.add_ones("z", at="cell")
assert_array_equal(np.ones(2), fields["cell"]["z"])
assert_array_equal(np.ones(2), fields.field_values("cell", "z"))
def test_add_ones_return_value():
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.new_field_location("cell", 2)
rtn_value = fields.add_ones("z", at="node")
assert_array_equal(rtn_value, np.ones(12))
assert rtn_value is fields["node"]["z"]
assert rtn_value is fields.field_values("node", "z")
rtn_value = fields.add_ones("z", at="cell")
assert_array_equal(rtn_value, np.ones(2))
assert rtn_value is fields["cell"]["z"]
assert rtn_value is fields.field_values("cell", "z")
def test_add_existing_field_default():
"""Test default is to not replace existing field."""
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.add_empty("z", at="node")
with pytest.raises(FieldError):
fields.add_empty("z", at="node")
with pytest.raises(FieldError):
fields.add_ones("z", at="node")
with pytest.raises(FieldError):
fields.add_zeros("z", at="node")
def test_add_existing_field_with_noclobber():
"""Test noclobber raises an error with an existing field."""
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.add_empty("z", at="node")
with pytest.raises(FieldError):
fields.add_empty("z", at="node", noclobber=True)
with pytest.raises(FieldError):
fields.add_ones("z", at="node", noclobber=True)
with pytest.raises(FieldError):
fields.add_zeros("z", at="node", noclobber=True)
def test_add_field_with_noclobber():
"""Test noclobber does not raise an error with an new field."""
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.add_empty("a", at="node", noclobber=True)
assert "a" in fields["node"]
fields.add_ones("b", at="node", noclobber=True)
assert "b" in fields["node"]
fields.add_zeros("c", at="node", noclobber=True)
assert "c" in fields["node"]
def test_add_field_with_clobber():
"""Test adding a field with clobber on."""
fields = ModelDataFields()
fields.new_field_location("node", 12)
assert fields.add_empty("a", at="node") is not fields.add_empty(
"a", at="node", noclobber=False
)
assert fields.add_ones("b", at="node") is not fields.add_ones(
"b", at="node", noclobber=False
)
assert fields.add_zeros("c", at="node") is not fields.add_zeros(
"c", at="node", noclobber=False
)
def test_getitem():
fields = ModelDataFields()
fields.new_field_location("node", 12)
assert dict() == fields["node"]
with pytest.raises(GroupError):
fields["cell"]
with pytest.raises(KeyError):
fields["cell"]
def test_at_attribute():
fields = ModelDataFields()
fields.new_field_location("node", 12)
assert dict() == fields.at_node
with pytest.raises(AttributeError):
fields.at_cell
fields.add_ones("z", at="node")
assert_array_equal(np.ones(12), fields.at_node["z"])
def test_has_group():
fields = ModelDataFields()
fields.new_field_location("node", 12)
assert fields.has_group("node")
assert not fields.has_group("cell")
def test_delete_field():
fields = ModelDataFields()
fields.new_field_location("link", 17)
assert dict() == fields.at_link
with pytest.raises(AttributeError):
fields.at_node
fields.add_zeros("link", "vals")
assert_array_equal(np.zeros(17), fields.at_link["vals"])
with pytest.raises(KeyError):
fields.delete_field("node", "vals")
fields.delete_field("link", "vals")
with pytest.raises(KeyError):
fields.field_units("link", "vals")
with pytest.raises(KeyError):
fields.at_link["vals"]
def test_scalar_field():
"""Test adding a generic scalar field."""
fields = ModelDataFields()
fields.new_field_location("all_over_the_place", 1)
assert dict() == fields.at_all_over_the_place
with pytest.raises(AttributeError):
fields.at_cell
fields.at_all_over_the_place["const"] = 1.
assert_array_equal(np.array(1.), fields.at_all_over_the_place["const"])
val = np.array(2.)
fields.at_all_over_the_place["const"] = val
assert val is fields.at_all_over_the_place["const"]
def test_grid_field_as_array():
"""Test adding an array as a grid field."""
fields = ModelDataFields()
fields.new_field_location("grid", 1)
fields.at_grid["const"] = [1., 2.]
assert_array_equal(np.array([1., 2.]), fields.at_grid["const"])
val = np.array([1., 2.])
fields.at_grid["const"] = val
assert val is fields.at_grid["const"]
val.shape = (1, 1, 2, 1)
fields.at_grid["const"] = val
assert_array_equal(np.array([1., 2.]), fields.at_grid["const"])
assert val is fields.at_grid["const"]
def test_grid_field_add_zeros_ones_empty():
"""Test creating scalar fields with add_zeros, add_empty, and add_ones."""
fields = ModelDataFields()
fields.new_field_location("grid", 1)
with pytest.raises(ValueError):
fields.add_zeros("value", at="grid")
with pytest.raises(ValueError):
fields.add_empty("value", at="grid")
with pytest.raises(ValueError):
fields.add_ones("value", at="grid")
def test_grid_field_zeros_ones_empty():
"""Test creating scalar fields with zeros, empty, and ones."""
fields = ModelDataFields()
fields.new_field_location("grid", 1)
with pytest.raises(ValueError):
fields.zeros("grid")
with pytest.raises(ValueError):
fields.empty("grid")
with pytest.raises(ValueError):
fields.ones("grid")
def test_nd_field():
"""Test creating fields that are nd in shape."""
fields = ModelDataFields()
fields.new_field_location("node", 12)
fields.add_field("new_value", np.ones((12, 4, 5)), at="node")
fields.add_field("newer_value", np.ones((12, 4)), at="node")
with pytest.raises(ValueError):
fields.add_field("newest_value", np.ones((13, 4, 5)), at="node")
with pytest.raises(ValueError):
fields.add_field("newestest_value", np.ones((13)), at="node")
|
the-stack_106_24792
|
"""
This file offers the methods to automatically retrieve the graph Lactobacillus curieae.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LactobacillusCurieae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lactobacillus curieae graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lactobacillus curieae graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LactobacillusCurieae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_24793
|
"""Device handler for centralite motion (only) sensors."""
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import Basic, Identify, Ota, PollControl
from zigpy.zcl.clusters.measurement import TemperatureMeasurement
from zigpy.zcl.clusters.security import IasZone
from zhaquirks import PowerConfigurationCluster
from . import CENTRALITE
from ..const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
DIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821
MANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC46 # decimal = 64582
MANUFACTURER_SPECIFIC_PROFILE_ID = 0xC2DF # decimal = 49887
class CentraLiteMotionSensor(CustomDevice):
"""Custom device representing centralite motion (only) sensors."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=1026
# device_version=0
# input_clusters=[0, 1, 3, 1026, 1280, 32, 2821]
# output_clusters=[25]>
MODELS_INFO: [
(CENTRALITE, "3305-S"),
(CENTRALITE, "3325-S"),
(CENTRALITE, "3326-L"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.IAS_ZONE,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfigurationCluster.cluster_id,
Identify.cluster_id,
PollControl.cluster_id,
TemperatureMeasurement.cluster_id,
IasZone.cluster_id,
DIAGNOSTICS_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
# <SimpleDescriptor endpoint=2 profile=49887 device_type=263
# device_version=0
# input_clusters=[0, 1, 3, 2821, 64582]
# output_clusters=[3]>
2: {
PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfigurationCluster.cluster_id,
Identify.cluster_id,
DIAGNOSTICS_CLUSTER_ID,
MANUFACTURER_SPECIFIC_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Identify.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfigurationCluster,
Identify.cluster_id,
PollControl.cluster_id,
TemperatureMeasurement.cluster_id,
IasZone.cluster_id,
DIAGNOSTICS_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
2: {
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
DIAGNOSTICS_CLUSTER_ID,
MANUFACTURER_SPECIFIC_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Identify.cluster_id],
},
}
}
|
the-stack_106_24794
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from insta_constants.xpath_constants import *
from insta_constants.user_constants import *
from time import sleep
class Instabot:
def __init__(self, username, password):
self.username = username
self.password = password
manager = ChromeDriverManager()
self.driver = webdriver.Chrome(manager.install())
self.driver.implicitly_wait(10)
self.driver.get('https://instagram.com')
sleep(2)
self.driver.find_element_by_xpath(
LOGIN_USERNAME).send_keys(self.username)
self.driver.find_element_by_xpath(
LOGIN_PASSWORD).send_keys(self.password)
self.driver.find_element_by_xpath(LOGIN_BTN).click()
save_pwd_info_check = self.driver.find_element_by_xpath(
"//*[contains(text(),'Not Now')]").text # checks for save pwd info? pop up
if not save_pwd_info_check is None:
self.driver.find_element_by_xpath(
"//*[contains(text(),'Not Now')]").click()
receive_notifications_info_check = self.driver.find_element_by_xpath(
'/html/body/div[4]/div/div/div/div[3]/button[2]').text
if not receive_notifications_info_check is None:
self.driver.find_element_by_xpath(
'/html/body/div[4]/div/div/div/div[3]/button[2]').click()
self.driver.get(USER_MAIN_PAGE)
self.follower_number = int(self.driver.find_element_by_xpath(
FOLLOWER_XPATH).text.split(' ')[0])
self.following_number = int(self.driver.find_element_by_xpath(
FOLLOWING_XPATH).text.split(' ')[0])
def get_followers(self):
# click on follower and start scrolling down
self.driver.find_element_by_xpath(FOLLOWER_XPATH).click()
sleep(1)
# define WebElement you want to scroll down on
scroller = self.driver.find_element_by_class_name('isgrP')
last_ht, ht = 1, 0
while last_ht != ht:
last_ht = ht
sleep(1)
ht = self.driver.execute_script("""
arguments[0].scrollBy(0,arguments[0].scrollHeight);
return arguments[0].scrollHeight * 0.3;
""", scroller)
follower_path_dynamic = self.driver.find_elements_by_tag_name('a')
followers = [
name.text for name in follower_path_dynamic if name.text != '']
# close tab
self.driver.find_element_by_xpath(
'//*[@class=\"WaOAr\"]//button').click()
return followers
def get_following(self):
# get following
self.driver.find_element_by_xpath(FOLLOWING_XPATH).click()
sleep(1)
# define WebElement you want to scroll down on
scroller = self.driver.find_element_by_class_name('isgrP')
last_ht, ht = 1, 0
while last_ht != ht:
last_ht = ht
sleep(1)
ht = self.driver.execute_script("""
arguments[0].scrollBy(0,arguments[0].scrollHeight);
return arguments[0].scrollHeight * 0.3;
""", scroller)
following_path_dynamic = self.driver.find_elements_by_tag_name('a')
following = [
name.text for name in following_path_dynamic if name.text != '']
# close tab
self.driver.find_element_by_xpath(
'//*[@class=\"WaOAr\"]//button').click()
return following
def find_unfollowing(self):
followers = self.get_followers()
following = self.get_following()
diff = list(set(following) - set(followers))
print(f'Following no: {len(following)}')
print(f'Followers no: {len(followers)}')
print(f'Not following you no: {len(followers) - len(following)}')
print(
f'Not following you --> {diff}')
def quit(self):
self.driver.quit()
bot = Instabot(USERNAME, PASSWORD)
bot.find_unfollowing()
bot.quit()
|
the-stack_106_24795
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import datetime
import os
import platform
import time
from subprocess import call
import requests
class SlotChecker:
def __init__(self):
self.DISTRICT_IDS = [(188, "Gurgaon")] # (149, "South Delhi") #
self.NUM_WEEKS = 5
self.DATES = []
self.URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}&date={}"
self.WRITE_TO_FILE = True
self.ALARM = True
self.FILE_NAME = "vaccine.txt"
self.MIN_AGE = 18
self.MIN_CAPACITY = 1
now = datetime.datetime.now()
for i in range(60):
target_time = now + datetime.timedelta(days=1 * i)
self.DATES.append(target_time.strftime("%d-%m-%Y"))
def check_free_slots(self, data):
free_slots = []
centers = data['centers']
for center in centers:
for session in center['sessions']:
if session['min_age_limit'] == self.MIN_AGE and session['available_capacity'] > self.MIN_CAPACITY:
free_slots.append(
"{} - {} - {} - {} - {} - {}".format(center['name'], center['district_name'], session['date'],
center['fee_type'], session['vaccine'], session['available_capacity']))
return free_slots
def write_to_file(self, slots):
print(slots)
f = open(self.FILE_NAME, "a")
data = '\n'.join(slots)
f.write(data)
f.write('\n')
f.close()
def run(self):
slots = []
for district_id in self.DISTRICT_IDS:
for date in self.DATES:
resp = requests.get(self.URL.format(district_id[0], date))
if resp.status_code != 200:
print(resp.status_code)
# print("Failed to fetch slots on {} for {}".format(date, district_id[1]))
continue
free_slots = self.check_free_slots(resp.json())
if free_slots:
slots.extend(free_slots)
else:
print("No free slots found on {} for {}".format(date, district_id[1]))
if slots:
if self.WRITE_TO_FILE:
self.write_to_file(slots)
if self.ALARM:
if platform.system() == 'Darwin':
os.system("afplay " + 'alarm.wav')
elif platform.system() == 'Linux':
call(["aplay", "alarm.wav"])
time.sleep(5)
if __name__ == '__main__':
sc = SlotChecker()
sc.run()
|
the-stack_106_24796
|
import torch
import torch.nn as nn
import numpy as np
import sys
import os
import torch.nn.functional as F
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'pointnet2'))
import pointnet2_utils
class PointsObjClsModule(nn.Module):
def __init__(self, seed_feature_dim):
""" object candidate point prediction from seed point features.
Args:
seed_feature_dim: int
number of channels of seed point features
"""
super().__init__()
self.in_dim = seed_feature_dim
self.conv1 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.bn1 = torch.nn.BatchNorm1d(self.in_dim)
self.conv2 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.bn2 = torch.nn.BatchNorm1d(self.in_dim)
self.conv3 = torch.nn.Conv1d(self.in_dim, 1, 1)
def forward(self, seed_features):
""" Forward pass.
Arguments:
seed_features: (batch_size, feature_dim, num_seed) Pytorch tensor
Returns:
logits: (batch_size, 1, num_seed)
"""
net = F.relu(self.bn1(self.conv1(seed_features)))
net = F.relu(self.bn2(self.conv2(net)))
logits = self.conv3(net) # (batch_size, 1, num_seed)
return logits
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, input_channel, num_pos_feats=288):
super().__init__()
self.position_embedding_head = nn.Sequential(
nn.Conv1d(input_channel, num_pos_feats, kernel_size=1),
nn.BatchNorm1d(num_pos_feats),
nn.ReLU(inplace=True),
nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1))
def forward(self, xyz):
xyz = xyz.transpose(1, 2).contiguous()
position_embedding = self.position_embedding_head(xyz)
return position_embedding
class FPSModule(nn.Module):
def __init__(self, num_proposal):
super().__init__()
self.num_proposal = num_proposal
def forward(self, xyz, features):
"""
Args:
xyz: (B,K,3)
features: (B,C,K)
"""
# Farthest point sampling (FPS)
sample_inds = pointnet2_utils.furthest_point_sample(xyz, self.num_proposal)
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(xyz_flipped, sample_inds).transpose(1, 2).contiguous()
new_features = pointnet2_utils.gather_operation(features, sample_inds).contiguous()
return new_xyz, new_features, sample_inds
class GeneralSamplingModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, xyz, features, sample_inds):
"""
Args:
xyz: (B,K,3)
features: (B,C,K)
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(xyz_flipped, sample_inds).transpose(1, 2).contiguous()
new_features = pointnet2_utils.gather_operation(features, sample_inds).contiguous()
return new_xyz, new_features, sample_inds
class PredictHead(nn.Module):
def __init__(self, num_class, num_heading_bin, num_size_cluster,
mean_size_arr, num_proposal, seed_feat_dim=256):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
self.num_proposal = num_proposal
self.seed_feat_dim = seed_feat_dim
# Object proposal/detection
# Objectness scores (1), center residual (3),
# heading class+residual (num_heading_bin*2), size class+residual(num_size_cluster*4)
self.conv1 = torch.nn.Conv1d(seed_feat_dim, seed_feat_dim, 1)
self.bn1 = torch.nn.BatchNorm1d(seed_feat_dim)
self.conv2 = torch.nn.Conv1d(seed_feat_dim, seed_feat_dim, 1)
self.bn2 = torch.nn.BatchNorm1d(seed_feat_dim)
self.objectness_scores_head = torch.nn.Conv1d(seed_feat_dim, 1, 1)
self.center_residual_head = torch.nn.Conv1d(seed_feat_dim, 3, 1)
self.heading_class_head = torch.nn.Conv1d(seed_feat_dim, num_heading_bin, 1)
self.heading_residual_head = torch.nn.Conv1d(seed_feat_dim, num_heading_bin, 1)
self.size_class_head = torch.nn.Conv1d(seed_feat_dim, num_size_cluster, 1)
self.size_residual_head = torch.nn.Conv1d(seed_feat_dim, num_size_cluster * 3, 1)
self.sem_cls_scores_head = torch.nn.Conv1d(seed_feat_dim, self.num_class, 1)
def forward(self, features, base_xyz, end_points, prefix=''):
"""
Args:
features: (B,C,num_proposal)
Returns:
scores: (B,num_proposal,2+3+NH*2+NS*4)
"""
batch_size = features.shape[0]
num_proposal = features.shape[-1]
net = F.relu(self.bn1(self.conv1(features)))
net = F.relu(self.bn2(self.conv2(net)))
# objectness
objectness_scores = self.objectness_scores_head(net).transpose(2, 1) # (batch_size, num_proposal, 1)
# center
center_residual = self.center_residual_head(net).transpose(2, 1) # (batch_size, num_proposal, 3)
center = base_xyz + center_residual # (batch_size, num_proposal, 3)
# heading
heading_scores = self.heading_class_head(net).transpose(2, 1) # (batch_size, num_proposal, num_heading_bin)
# (batch_size, num_proposal, num_heading_bin) (should be -1 to 1)
heading_residuals_normalized = self.heading_residual_head(net).transpose(2, 1)
heading_residuals = heading_residuals_normalized * (np.pi / self.num_heading_bin)
# size
mean_size_arr = torch.from_numpy(self.mean_size_arr.astype(np.float32)).cuda() # (num_size_cluster, 3)
mean_size_arr = mean_size_arr.unsqueeze(0).unsqueeze(0) # (1, 1, num_size_cluster, 3)
size_scores = self.size_class_head(net).transpose(2, 1) # (batch_size, num_proposal, num_size_cluster)
size_residuals_normalized = self.size_residual_head(net).transpose(2, 1).view(
[batch_size, num_proposal, self.num_size_cluster, 3]) # (batch_size, num_proposal, num_size_cluster, 3)
size_residuals = size_residuals_normalized * mean_size_arr # (batch_size, num_proposal, num_size_cluster, 3)
size_recover = size_residuals + mean_size_arr # (batch_size, num_proposal, num_size_cluster, 3)
pred_size_class = torch.argmax(size_scores, -1) # batch_size, num_proposal
pred_size_class = pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1, 3)
pred_size = torch.gather(size_recover, 2, pred_size_class) # batch_size, num_proposal, 1, 3
pred_size = pred_size.squeeze_(2) # batch_size, num_proposal, 3
# class
sem_cls_scores = self.sem_cls_scores_head(net).transpose(2, 1) # (batch_size, num_proposal, num_class)
end_points[f'{prefix}base_xyz'] = base_xyz
end_points[f'{prefix}objectness_scores'] = objectness_scores
end_points[f'{prefix}center'] = center
end_points[f'{prefix}heading_scores'] = heading_scores
end_points[f'{prefix}heading_residuals_normalized'] = heading_residuals_normalized
end_points[f'{prefix}heading_residuals'] = heading_residuals
end_points[f'{prefix}size_scores'] = size_scores
end_points[f'{prefix}size_residuals_normalized'] = size_residuals_normalized
end_points[f'{prefix}size_residuals'] = size_residuals
end_points[f'{prefix}pred_size'] = pred_size
end_points[f'{prefix}sem_cls_scores'] = sem_cls_scores
# # used to check bbox size
# l = pred_size[:, :, 0]
# h = pred_size[:, :, 1]
# w = pred_size[:, :, 2]
# x_corners = torch.stack([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2], -1) # N Pq 8
# y_corners = torch.stack([h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2], -1) # N Pq 8
# z_corners = torch.stack([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], -1) # N Pq 8
# corners = torch.stack([x_corners, y_corners, z_corners], -1) # N Pq 8 3
# bbox = center.unsqueeze(2) + corners
# end_points[f'{prefix}bbox_check'] = bbox
return center, pred_size
|
the-stack_106_24797
|
# Copyright (c) 2016 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import functools
from neutron_lib.api import converters as conv
from neutron_lib.api.definitions import address_scope as as_def
from neutron_lib.api.definitions import network as net_def
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import subnet as subnet_def
from neutron_lib.api import extensions
from neutron_lib.api import validators as valid
from oslo_log import log as logging
import six
ALIAS = 'cisco-apic'
DIST_NAMES = 'apic:distinguished_names'
SYNC_STATE = 'apic:synchronization_state'
NAT_TYPE = 'apic:nat_type'
SNAT_HOST_POOL = 'apic:snat_host_pool'
ACTIVE_ACTIVE_AAP = 'apic:active_active_aap'
EXTERNAL_CIDRS = 'apic:external_cidrs'
SVI = 'apic:svi'
BGP = 'apic:bgp_enable'
BGP_ASN = 'apic:bgp_asn'
BGP_TYPE = 'apic:bgp_type'
NESTED_DOMAIN_NAME = 'apic:nested_domain_name'
NESTED_DOMAIN_TYPE = 'apic:nested_domain_type'
NESTED_DOMAIN_INFRA_VLAN = 'apic:nested_domain_infra_vlan'
NESTED_DOMAIN_ALLOWED_VLANS = 'apic:nested_domain_allowed_vlans'
NESTED_DOMAIN_SERVICE_VLAN = 'apic:nested_domain_service_vlan'
NESTED_DOMAIN_NODE_NETWORK_VLAN = 'apic:nested_domain_node_network_vlan'
EXTRA_PROVIDED_CONTRACTS = 'apic:extra_provided_contracts'
EXTRA_CONSUMED_CONTRACTS = 'apic:extra_consumed_contracts'
EPG_CONTRACT_MASTERS = 'apic:epg_contract_masters'
ERSPAN_CONFIG = 'apic:erspan_config'
POLICY_ENFORCEMENT_PREF = 'apic:policy_enforcement_pref'
SNAT_SUBNET_ONLY = 'apic:snat_subnet_only'
BD = 'BridgeDomain'
EPG = 'EndpointGroup'
SUBNET = 'Subnet'
VRF = 'VRF'
EXTERNAL_NETWORK = 'ExternalNetwork'
AP = 'ApplicationProfile'
SYNC_SYNCED = 'synced'
SYNC_BUILD = 'build'
SYNC_ERROR = 'error'
SYNC_NOT_APPLICABLE = 'N/A'
VLANS_LIST = 'vlans_list'
VLAN_RANGES = 'vlan_ranges'
APIC_MAX_VLAN = 4093
APIC_MIN_VLAN = 1
VLAN_RANGE_START = 'start'
VLAN_RANGE_END = 'end'
ERSPAN_DEST_IP = 'dest_ip'
ERSPAN_FLOW_ID = 'flow_id'
ERSPAN_DIRECTION = 'direction'
LOG = logging.getLogger(__name__)
def _validate_apic_vlan(data, key_specs=None):
if data is None:
return
try:
val = int(data)
if val >= APIC_MIN_VLAN and val <= APIC_MAX_VLAN:
return
msg = _("Invalid value for VLAN: '%s'") % data
LOG.debug(msg)
return msg
except (ValueError, TypeError):
msg = _("Invalid data format for VLAN: '%s'") % data
LOG.debug(msg)
return msg
def _validate_apic_vlan_range(data, key_specs=None):
if data is None:
return
expected_keys = [VLAN_RANGE_START, VLAN_RANGE_END]
msg = valid._verify_dict_keys(expected_keys, data)
if msg:
return msg
for k in expected_keys:
msg = _validate_apic_vlan(data[k])
if msg:
return msg
if int(data[VLAN_RANGE_START]) > int(data[VLAN_RANGE_END]):
msg = _("Invalid start, end for VLAN range %s") % data
return msg
def _validate_erspan_flow_id(data, key_specs=None):
if data is None:
return
msg = valid.validate_non_negative(data)
if int(data) > 1023:
msg = _("ERSPAN flow ID must be less than 1023 (was %s)") % data
elif int(data) == 0:
msg = _("ERSPAN flow ID must be greater than 0 (was %s)") % data
return msg
def _validate_erspan_configs(data, valid_values=None):
"""Validate a list of unique ERSPAN configurations.
:param data: The data to validate. To be valid it must be a list like
structure of ERSPAN config dicts, each containing 'dest_ip' and
'flow_id' key values.
:param valid_values: Not used!
:returns: None if data is a valid list of unique ERSPAN config dicts,
otherwise a human readable message indicating why validation failed.
"""
if not isinstance(data, list):
msg = _("Invalid data format for ERSPAN config: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = (ERSPAN_DEST_IP, ERSPAN_FLOW_ID,)
erspan_configs = []
for erspan_config in data:
msg = valid._verify_dict_keys(expected_keys, erspan_config, False)
if msg:
return msg
msg = _validate_erspan_flow_id(erspan_config[ERSPAN_FLOW_ID])
if msg:
return msg
msg = valid.validate_ip_address(erspan_config[ERSPAN_DEST_IP])
if msg:
return msg
if erspan_config in erspan_configs:
msg = _("Duplicate ERSPAN config '%s'") % erspan_config
LOG.debug(msg)
return msg
erspan_configs.append(erspan_config)
def _validate_dict_or_string(data, key_specs=None):
if data is None:
return
if isinstance(data, str) or isinstance(data, six.string_types):
try:
data = ast.literal_eval(data)
except Exception:
msg = _("Extension %s cannot be converted to dict") % data
return msg
return valid.validate_dict_or_none(data, key_specs)
def convert_apic_vlan(value):
if value is None:
return
else:
return int(value)
def convert_apic_none_to_empty_list(value):
if value is None:
return []
if isinstance(value, str) or isinstance(value, six.string_types):
value = ast.literal_eval(value)
return value
def convert_nested_domain_allowed_vlans(value):
if value is None:
return
if isinstance(value, str) or isinstance(value, six.string_types):
value = ast.literal_eval(value)
vlans_list = []
if VLANS_LIST in value:
for vlan in value[VLANS_LIST]:
vlans_list.append(convert_apic_vlan(vlan))
if VLAN_RANGES in value:
for vlan_range in value[VLAN_RANGES]:
for vrng in [VLAN_RANGE_START, VLAN_RANGE_END]:
vlan_range[vrng] = convert_apic_vlan(vlan_range[vrng])
vlans_list.extend(range(vlan_range[VLAN_RANGE_START],
vlan_range[VLAN_RANGE_END] + 1))
# eliminate duplicates
vlans_list = list(set(vlans_list))
# sort
vlans_list.sort()
value[VLANS_LIST] = vlans_list
return value
valid.validators['type:apic_vlan'] = _validate_apic_vlan
valid.validators['type:apic_vlan_list'] = functools.partial(
valid._validate_list_of_items, _validate_apic_vlan)
valid.validators['type:apic_vlan_range_list'] = functools.partial(
valid._validate_list_of_items, _validate_apic_vlan_range)
valid.validators['type:dict_or_string'] = _validate_dict_or_string
valid.validators['type:apic_erspan_flow_id'] = _validate_erspan_flow_id
valid.validators['type:apic_erspan_configs'] = _validate_erspan_configs
APIC_ATTRIBUTES = {
DIST_NAMES: {'allow_post': False, 'allow_put': False, 'is_visible': True},
SYNC_STATE: {'allow_post': False, 'allow_put': False, 'is_visible': True}
}
ERSPAN_KEY_SPECS = [
{ERSPAN_DEST_IP: {'type:ip_address': None,
'required': True},
ERSPAN_FLOW_ID: {'type:apic_erspan_flow_id': None,
'required': True},
ERSPAN_DIRECTION: {'type:values': ['in', 'out', 'both'],
'default': 'both'}},
]
EPG_CONTRACT_MASTER_KEY_SPECS = [
# key spec for opt_name in _VALID_BLANK_EXTRA_DHCP_OPTS
{'app_profile_name': {'type:not_empty_string': None,
'required': True},
'name': {'type:not_empty_string': None,
'required': True}},
]
PORT_ATTRIBUTES = {
ERSPAN_CONFIG: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_apic_none_to_empty_list,
'validate': {'type:apic_erspan_configs': None},
},
}
NET_ATTRIBUTES = {
SVI: {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': False,
'convert_to': conv.convert_to_boolean,
},
BGP: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': False,
'convert_to': conv.convert_to_boolean,
},
BGP_TYPE: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': 'default_export',
'validate': {'type:values': ['default_export', '']},
},
BGP_ASN: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': "0",
'validate': {'type:non_negative': None},
},
NESTED_DOMAIN_NAME: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '',
'validate': {'type:string': None},
},
NESTED_DOMAIN_TYPE: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '',
'validate': {'type:string': None},
},
NESTED_DOMAIN_INFRA_VLAN: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'validate': {'type:apic_vlan': None},
'convert_to': convert_apic_vlan,
},
NESTED_DOMAIN_SERVICE_VLAN: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'validate': {'type:apic_vlan': None},
'convert_to': convert_apic_vlan,
},
NESTED_DOMAIN_NODE_NETWORK_VLAN: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'validate': {'type:apic_vlan': None},
'convert_to': convert_apic_vlan,
},
NESTED_DOMAIN_ALLOWED_VLANS: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'validate': {
'type:dict_or_string': {
VLANS_LIST: {'type:apic_vlan_list': None},
VLAN_RANGES: {'type:apic_vlan_range_list': None},
}
},
'convert_to': convert_nested_domain_allowed_vlans,
},
EXTRA_PROVIDED_CONTRACTS: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_apic_none_to_empty_list,
'validate': {'type:list_of_unique_strings': None},
},
EXTRA_CONSUMED_CONTRACTS: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_apic_none_to_empty_list,
'validate': {'type:list_of_unique_strings': None},
},
EPG_CONTRACT_MASTERS: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_apic_none_to_empty_list,
'validate': {'type:list_of_any_key_specs_or_none':
EPG_CONTRACT_MASTER_KEY_SPECS},
},
DIST_NAMES: {
# DN of corresponding APIC L3Out external network or BD.
# It can be specified only on create.
# Change 'allow_put' if updates on other DNs is allowed later,
# and validate that ExternalNetwork DN may not be updated.
'allow_post': True, 'allow_put': False,
'is_visible': True,
'default': None,
'validate': {
'type:dict_or_none': {
EXTERNAL_NETWORK: {'type:string': None,
'required': False},
BD: {'type:string': None,
'required': False}
},
}
},
POLICY_ENFORCEMENT_PREF: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': 'unenforced',
'validate': {'type:values': ['unenforced', 'enforced', '']},
},
}
EXT_NET_ATTRIBUTES = {
NAT_TYPE: {
# whether NAT is enabled, and if so its type
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': 'distributed',
'validate': {'type:values': ['distributed', 'edge', '']},
},
EXTERNAL_CIDRS: {
# Restrict external traffic to specified addresses
'allow_put': True, 'allow_post': True,
'is_visible': True, 'default': ['0.0.0.0/0'],
'convert_to': convert_apic_none_to_empty_list,
'validate': {'type:subnet_list': None},
},
}
EXT_SUBNET_ATTRIBUTES = {
SNAT_HOST_POOL: {
# Whether an external subnet should be used as a pool
# for allocating host-based SNAT addresses.
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': False,
'convert_to': conv.convert_to_boolean,
},
ACTIVE_ACTIVE_AAP: {
# Whether a subnet will support the active active AAP or not.
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': False,
'convert_to': conv.convert_to_boolean,
},
SNAT_SUBNET_ONLY: {
# Whether this subnet can be used for assigning snat addresses only
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': False,
'convert_to': conv.convert_to_boolean,
}
}
ADDRESS_SCOPE_ATTRIBUTES = {
DIST_NAMES: {
# DN of corresponding APIC VRF; can be specified only on create.
# Change 'allow_put' if updates on other DNs is allowed later,
# and validate that VRF DN may not be updated.
'allow_post': True, 'allow_put': False,
'is_visible': True,
'default': None,
'validate': {
'type:dict_or_none': {
VRF: {'type:string': None,
'required': True}
}
}
}
}
EXTENDED_ATTRIBUTES_2_0 = {
port_def.COLLECTION_NAME: dict(
list(APIC_ATTRIBUTES.items()) + list(PORT_ATTRIBUTES.items())),
net_def.COLLECTION_NAME: dict(
list(APIC_ATTRIBUTES.items()) + list(EXT_NET_ATTRIBUTES.items()) +
list(NET_ATTRIBUTES.items())),
subnet_def.COLLECTION_NAME: dict(
list(APIC_ATTRIBUTES.items()) + list(EXT_SUBNET_ATTRIBUTES.items())),
as_def.COLLECTION_NAME: dict(
list(APIC_ATTRIBUTES.items()) + list(ADDRESS_SCOPE_ATTRIBUTES.items()))
}
class Cisco_apic(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Cisco APIC"
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return ("Extension exposing mapping of Neutron resources to Cisco "
"APIC constructs")
@classmethod
def get_updated(cls):
return "2016-03-31T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
the-stack_106_24799
|
from __future__ import annotations
from datetime import timedelta
import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
)
import numpy as np
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Timedelta,
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
parsing,
period as libperiod,
to_offset,
)
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.offsets import (
Tick,
delta_to_tick,
)
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._typing import (
AnyArrayLike,
Dtype,
NpDtype,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
ensure_object,
is_datetime64_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
if TYPE_CHECKING:
from pandas.core.arrays import DatetimeArray
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
def _field_accessor(name: str, docstring=None):
def f(self):
base = self.freq._period_dtype_code
result = get_period_field_arr(name, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`~pandas.period_array` to create new instances.
Alternatively, :func:`~pandas.array` can be used to create new instances
from a sequence of Period scalars.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
Period: Represents a period of time.
PeriodIndex : Immutable Index for period data.
period_range: Create a fixed-frequency PeriodArray.
array: Construct a pandas array.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
_infer_matches = ("period",)
# Names others delegate to us
_other_ops: list[str] = []
_bool_ops: list[str] = ["is_leap_year"]
_object_ops: list[str] = ["start_time", "end_time", "freq"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
_datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"]
_dtype: PeriodDtype
# --------------------------------------------------------------------
# Constructors
def __init__(
self, values, dtype: Dtype | None = None, freq=None, copy: bool = False
):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._ndarray, values.freq
values = np.array(values, dtype="int64", copy=copy)
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
NDArrayBacked.__init__(self, values, PeriodDtype(freq))
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls,
values: np.ndarray,
freq: BaseOffset | None = None,
dtype: Dtype | None = None,
) -> PeriodArray:
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, dtype=dtype)
@classmethod
def _from_sequence(
cls: type[PeriodArray],
scalars: Sequence[Period | None] | AnyArrayLike,
*,
dtype: Dtype | None = None,
copy: bool = False,
) -> PeriodArray:
if dtype and isinstance(dtype, PeriodDtype):
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
) -> PeriodArray:
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None) -> PeriodArray:
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
# error: Argument 1 of "_unbox_scalar" is incompatible with supertype
# "DatetimeLikeArrayMixin"; supertype defines the argument type as
# "Union[Union[Period, Any, Timedelta], NaTType]"
def _unbox_scalar( # type: ignore[override]
self,
value: Period | NaTType,
setitem: bool = False,
) -> np.int64:
if value is NaT:
# error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
return np.int64(value.value) # type: ignore[union-attr]
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
return np.int64(value.ordinal)
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._require_matching_freq(other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self) -> PeriodDtype:
return self._dtype
# error: Read-only property cannot override read-write property
@property # type: ignore[misc]
def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
return ~self._isnan
# This will raise TypeError for non-object dtypes
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._ndarray, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
"""
The week ordinal of the year.
""",
)
week = weekofyear
day_of_week = _field_accessor(
"day_of_week",
"""
The day of the week with Monday=0, Sunday=6.
""",
)
dayofweek = day_of_week
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"day_of_year",
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear")
days_in_month = _field_accessor(
"days_in_month",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self) -> np.ndarray:
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod.validate_end_alias(how)
end = how == "E"
if end:
if freq == "B" or self.freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
freq = self._get_to_timestamp_base()
base = freq
else:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
new_parr = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
return DatetimeArray(new_data)._with_freq("infer")
# --------------------------------------------------------------------
def _time_shift(self, periods: int, freq=None) -> PeriodArray:
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
def _box_func(self, x) -> Period | NaTType:
return Period._from_ordinal(ordinal=x, freq=self.freq)
@doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex")
def asfreq(self, freq=None, how: str = "E") -> PeriodArray:
"""
Convert the {klass} to the specified frequency `freq`.
Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments
to each :class:`~pandas.Period` in this {klass}.
Parameters
----------
freq : str
A frequency.
how : str {{'E', 'S'}}, default 'E'
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
{klass}
The transformed {klass} with the new frequency.
See Also
--------
{other}.asfreq: Convert each Period in a {other_name} to the given frequency.
Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]')
"""
how = libperiod.validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1 = self.freq._period_dtype_code
base2 = freq._period_dtype_code
asi8 = self.asi8
# self.freq.n can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + self.freq.n - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False):
if boxed:
return str
return "'{}'".format
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> np.ndarray:
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy: bool = True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
else:
return self.copy()
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
value = self._validate_searchsorted_value(value).view("M8[ns]")
# Cast to M8 to get datetime-like NaT placement
m8arr = self._ndarray.view("M8[ns]")
return m8arr.searchsorted(value, side=side, sorter=sorter)
def fillna(self, value=None, method=None, limit=None) -> PeriodArray:
if method is not None:
# view as dt64 so we get treated as timelike in core.missing
dta = self.view("M8[ns]")
result = dta.fillna(value=value, method=method, limit=limit)
return result.view(self.dtype)
return super().fillna(value=value, method=method, limit=limit)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
self._require_matching_freq(other)
new_values = algos.checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = self._isnan | other._isnan
new_values[mask] = NaT
return new_values
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any]
) -> PeriodArray:
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
np.putmask(res_values, self._isnan, iNaT)
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
self._require_matching_freq(other, base=True)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
return super()._add_timedeltalike_scalar(other)
def _add_timedelta_arraylike(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
ordinals = self._addsub_int_array(delta, operator.add).asi8
return type(self)(ordinals, dtype=self.dtype)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
base_nanos = self.freq.base.nanos
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
# ------------------------------------------------------------------
# TODO: See if we can re-share this with Period
def _get_to_timestamp_base(self) -> int:
"""
Return frequency code group used for base of to_timestamp against
frequency code.
Return day freq code against longer freq than day.
Return second freq code against hour between second.
Returns
-------
int
"""
base = self._dtype._dtype_code
if base < FreqGroup.FR_BUS.value:
return FreqGroup.FR_DAY.value
elif FreqGroup.FR_HR.value <= base <= FreqGroup.FR_SEC.value:
return FreqGroup.FR_SEC.value
return base
@property
def start_time(self) -> DatetimeArray:
return self.to_timestamp(how="start")
@property
def end_time(self) -> DatetimeArray:
return self.to_timestamp(how="end")
def _require_matching_freq(self, other, base: bool = False) -> None:
# See also arrays.period.raise_on_incompatible
if isinstance(other, BaseOffset):
other_freq = other
else:
other_freq = other.freq
if base:
condition = self.freq.base != other_freq.base
else:
condition = self.freq != other_freq
if condition:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=other_freq.freqstr,
)
raise IncompatibleFrequency(msg)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Sequence[Period | str | None] | AnyArrayLike,
freq: str | Tick | None = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
<PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
data_dtype = getattr(data, "dtype", None)
if is_datetime64_dtype(data_dtype):
return PeriodArray._from_datetime64(data, freq)
if is_period_dtype(data_dtype):
return PeriodArray(data, freq=freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)):
data = list(data)
arrdata = np.asarray(data)
dtype: PeriodDtype | None
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(arrdata) and len(arrdata) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
if is_integer_dtype(arrdata.dtype):
arr = arrdata.astype(np.int64, copy=False)
ordinals = libperiod.from_ordinals(arr, freq)
return PeriodArray(ordinals, dtype=dtype)
data = ensure_object(arrdata)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int64]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndex):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
elif isinstance(data, (ABCIndex, ABCSeries)):
data = data._values
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is not None:
freq = to_offset(freq)
mult = freq.n
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError("start and end must have same freq")
if start is NaT or end is NaT:
raise ValueError("start and end must not be NaT")
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError("Could not infer freq from start/end")
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(
end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64
)
else:
data = np.arange(
start.ordinal, start.ordinal + periods, mult, dtype=np.int64
)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(
year=None,
month=None,
quarter=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
) -> tuple[np.ndarray, BaseOffset]:
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = to_offset("Q")
base = FreqGroup.FR_QTR.value
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
if base != FreqGroup.FR_QTR.value:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = parsing.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields) -> list[np.ndarray]:
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError("Mismatched Period array lengths")
elif length is None:
length = len(x)
# error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected
# "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int,
# integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]"
return [
np.asarray(x)
if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) # type: ignore[arg-type]
for x in fields
]
|
the-stack_106_24801
|
from random import random, choice
from random import seed as set_seed
from math import atan2
import cairo
# ~~~~~~~~ class for graph nodes ~~~~~~~~
class node:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
self.flux = 0
self.population = 0
self.danger = 0
self.slope = 0
self.coast = False
self.river = False
self.mountain = False
self.sea_neighbors = []
# ~~~~~~~~ class for the terrain ~~~~~~~~
class terrain:
def __init__(self,n,sealevel=0.5,seed=None):
self.sealevel = sealevel
# generate an evenly spaced triagonal grid of points,
# jittered for randomness
if (seed != None):
set_seed(seed)
m = int(2*n/(3**0.5)) + 2
if m%2 == 0: # force odd m
m = m+1
s = 1./(n-1)
h = (3**0.5)*s/2
nodes = []
for y in range(m):
row = []
k = y%2
for x in range(n+k):
px = s*x - 0.5*s*k + s*(random()-0.5)
py = h*y - h + h*(random()-0.5)
row.append( node(px,py,0) )
nodes.append(row)
# build graph
self.graph = {}
# ...starting with the corners
self.graph[nodes[0][0]] = ( nodes[1][0], nodes[0][1], nodes[1][1] )
self.graph[nodes[-1][0]] = ( nodes[-2][0], nodes[-2][1], nodes[-1][1] )
self.graph[nodes[0][-1]] = ( nodes[0][-2], nodes[1][-2], nodes[1][-1] )
self.graph[nodes[-1][-1]] = ( nodes[-1][-2], nodes[-2][-2], nodes[-2][-1] )
#next, the edges
# sides
for y in range(1,m-1):
if y%2 == 0:
# even left
self.graph[nodes[y][0]] = ( nodes[y+1][0], nodes[y+1][1], nodes[y][1], nodes[y-1][1], nodes[y-1][0] )
# even right
self.graph[nodes[y][-1]] = ( nodes[y+1][-2], nodes[y+1][-1], nodes[y-1][-1], nodes[y-1][-2], nodes[y][-2] )
else:
# odd left
self.graph[nodes[y][0]] = ( nodes[y+1][0], nodes[y][1], nodes[y-1][0] )
# odd right
self.graph[nodes[y][-1]] = ( nodes[y+1][-1], nodes[y][-2], nodes[y-1][-1] )
# top & bottom
for x in range(1,n-1):
# bottom
self.graph[nodes[0][x]] = ( nodes[0][x-1], nodes[1][x], nodes[1][x+1], nodes[0][x+1] )
# bottom
self.graph[nodes[-1][x]] = ( nodes[-1][x-1], nodes[-2][x], nodes[-2][x+1], nodes[-1][x+1] )
# the bulk of the graph
for y in range(1,m-1):
k = y%2
for x in range(1,n+k-1):
self.graph[nodes[y][x]] = ( nodes[y-1][x-k], nodes[y-1][x+1-k], nodes[y][x-1], nodes[y][x+1], nodes[y+1][x-k], nodes[y+1][x+1-k] )
def gen_terrain(self,N,maxradius):
for i in range(N):
cx = 3*random() - 1
cy = 3*random() - 1
cr = maxradius*random()
for n in self.graph:
if ( (n.x-cx)**2 + (n.y-cy)**2 ) <= cr:
n.z += 1 + 0.1*random()
self.normalize()
def normalize(self):
zmax = 0
zmin = 1e99
for n in self.graph:
if n.z > zmax:
zmax = n.z
if n.z < zmin:
zmin = n.z
for n in self.graph:
n.z = (n.z-zmin)/(zmax-zmin)
def get_flux(self):
""" compute water flux through each node """
# iterate through points from top to bottom
points = sorted(self.graph,key=lambda n: -n.z)
for n in points:
if n.z >= self.sealevel:
n.flux += 1
zmin = n.z
zmin2 = 1e99
min_n = None
for n2 in self.graph[n]:
if n2.z < zmin:
zmin = n2.z
min_n = n2
if n2.z < zmin2:
zmin2 = n2.z
if min_n != None:
min_n.flux += n.flux
else:
n.flux = 0
n.geo = 'lake'
n.z = zmin2 + 1e-3
self.normalize_flux()
def erode(self,c):
self.get_flux()
for n in self.graph:
n.z -= c*n.flux**0.5
def clean_coasts(self):
for n in self.graph:
sea = 0
land = 0
for n2 in self.graph[n]:
if n2.z > self.sealevel:
land += 1
else:
sea += 1
if sea > 4 or 4 < land:
n.z = 2*self.sealevel - n.z
def normalize_flux(self):
""" normalize the water flux """
fmax = 0
fmin = 1e99
for n in self.graph:
if n.flux > fmax:
fmax = n.flux
if n.flux < fmin:
fmin = n.flux
for n in self.graph:
n.flux = (n.flux-fmin)/(fmax-fmin)
def minmax(self):
zmin = 1e99
zmax = 0
for n in self.graph:
if n.z > zmax:
zmax = n.z
if n.z < zmin:
zmin = n.z
return zmin, zmax
def make_dangerous(self,nodes=10,blurring=10):
for i in range(nodes):
n = choice(list(self.graph))
n.danger = 1
for i in range(blurring):
for n in self.graph:
average_danger = n.danger
for n2 in self.graph[n]:
average_danger += n2.danger
average_danger = average_danger / ( len(self.graph[n]) + 1 )
n.danger = average_danger
# normalize danger
max_danger = 0
min_danger = 2
for n in self.graph:
if n.danger > max_danger:
max_danger = n.danger
if n.danger < min_danger:
min_danger = n.danger
for n in self.graph:
n.danger = (n.danger-min_danger) / (max_danger-min_danger)
def render(self,fname):
lw = 1
scale = 1000
w = scale
h = scale
surf = cairo.SVGSurface(fname,w,h)
ct = cairo.Context(surf)
ct.set_source_rgb(1,1,1)
ct.rectangle(0,0,w,h)
ct.fill()
ct.set_line_width(lw)
ct.set_source_rgb(1,0,0)
for n in self.graph:
if n.z > 0.8:
n.mountain = True
if n.z > self.sealevel:
# find coastline nodes
n.sea_neighbors = [ n2 for n2 in self.graph[n] if n2.z <= self.sealevel ]
if len(n.sea_neighbors) < 5 and len(n.sea_neighbors) > 0:
n.coast = True
if n.flux**0.5 > 0.3 and n.z >= self.sealevel:
# find river nodes
n.river = True
for n in self.graph:
if n.z < self.sealevel:
ns = sorted(self.graph[n],key=lambda n1: atan2(n1.y-n.y,n1.x-n.x))
ct.set_source_rgb(0,0,1)
ct.move_to(scale*ns[0].x, h-scale*ns[0].y)
for n1 in ns[1:]:
ct.line_to(scale*n1.x, h-scale*n1.y)
ct.close_path()
ct.fill()
if n.mountain and random()>(0.000031*len(self.graph)):
# render mountain nodes
ct.set_source_rgb(0,0,0)
ct.move_to(scale*n.x-5,h-scale*n.y+5)
ct.line_to(scale*n.x,h-scale*n.y)
ct.line_to(scale*n.x+5,h-scale*n.y+5)
ct.stroke()
if n.coast:
# render coast nodes
ct.set_source_rgb(0,0,0)
for n2 in self.graph[n]:
# if the two points have a common sea neighbor
if n2.coast and not set(n.sea_neighbors).isdisjoint(n2.sea_neighbors):
ct.move_to(scale*n.x, h-scale*n.y)
ct.line_to(scale*n2.x, h-scale*n2.y)
ct.stroke()
if n.river:
# render river nodes
ct.set_source_rgb(0,0,1)
for n2 in t.graph[n]:
if n2.river:
ct.move_to(scale*n.x, h-scale*n.y)
ct.line_to(scale*n2.x, h-scale*n2.y)
ct.stroke()
ct.set_source_rgb(n.danger,0,0)
ct.rectangle(scale*n.x, h-scale*n.y, 4, 4)
ct.fill()
# finish & save
surf.finish()
t = terrain(2**6, sealevel=0.5*random())
t.gen_terrain(1000,0.1)
for i in range(200):
t.get_flux()
for i in range(100):
t.erode(1e-3)
t.get_flux()
#t.clean_coasts()
t.make_dangerous(1000)
t.render('map.svg')
|
the-stack_106_24802
|
# https://www.geeksforgeeks.org/avl-tree-set-1-insertion/
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
def insert(node, val):
if not node:
return Node(val)
if val <= node.val:
node.left = insert(node.left, val)
elif node.val < val:
node.right = insert(node.right, val)
node.height = 1 + max(get_height(node.left), get_height(node.right))
balance = get_balance(node)
# LL
if balance > 1 and val < node.left.val:
return rotate_right(node)
# LR
if balance > 1 and node.left.val < val:
node.left = rotate_left(node.left)
return rotate_right(node)
# RR
if balance < -1 and node.right.val < val:
return rotate_left(node)
# RL
if balance < -1 and val < node.right.val:
node.right = rotate_right(node.right)
return rotate_left(node)
return node
def get_balance(node):
if not node:
return 0
return get_height(node.left) - get_height(node.right)
def get_height(node):
if not node:
return 0
return node.height
def rotate_left(x):
y = x.right
c = y.left
y.left = x
x.right = c
x.height = 1 + max(get_height(x.left), get_height(x.right))
y.height = 1 + max(get_height(y.left), get_height(y.right))
return y
def rotate_right(x):
y = x.left
c = y.right
y.right = x
x.left = c
x.height = 1 + max(get_height(x.left), get_height(x.right))
y.height = 1 + max(get_height(y.left), get_height(y.right))
return y
def delete(node, val):
if not node:
return
if val < node.val:
node.left = delete(node.left, val)
elif node.val < val:
node.right = delete(node.right, val)
else:
if not node.left:
return node.right
elif not node.right:
return node.left
successor = get_successor(node.right)
node.val = successor.val
node.right = delete(node.right, successor.val)
node.height = 1 + max(get_height(node.left), get_height(node.right))
balance = get_balance(node)
# LL
if balance > 1 and 0 <= get_balance(node.left):
return rotate_right(node)
# LR
if balance > 1 and get_balance(node.left) < 0:
node.left = rotate_left(node.left)
return rotate_right(node)
# RR
if balance < -1 and get_balance(node.right) <= 0:
return rotate_left(node)
# RL
if balance < -1 and 0 < get_balance(node.right):
node.right = rotate_right(node.right)
return rotate_left(node)
return node
def get_successor(node):
while node.left:
node = node.left
return node
def inorder(node):
if not node:
return
inorder(node.left)
print(node.val, node.height)
inorder(node.right)
if __name__ == '__main__':
root = Node(10)
root = insert(root, 20)
root = insert(root, 30)
root = insert(root, 40)
root = insert(root, 50)
root = insert(root, 25)
inorder(root)
print()
root = delete(root, 40)
inorder(root)
print()
|
the-stack_106_24803
|
import os
flags = [
'-x',
'c++',
'-isystem',
'/usr/local/Cellar/llvm/6.0.0/include/c++/v1',
'-isystem',
'/usr/local/include',
'-isystem',
'/usr/local/Cellar/llvm/6.0.0/lib/clang/6.0.0/include',
'-isystem',
'/usr/include',
'-isystem',
'/System/Library/Frameworks'
'-isystem',
'/Library/Frameworks',
]
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c']
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def FindCorrespondingSourceFile(filename):
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
return replacement_file
return filename
def FlagsForFile(filename, **kwargs):
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
# necessary since compilation databases don't have entries for header files.
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition in
# the corresponding source file.
filename = FindCorrespondingSourceFile(filename)
return {
'flags': flags,
'override_filename': filename
}
|
the-stack_106_24804
|
from __future__ import annotations
import sys
import types
from typing import (
Any,
ClassVar,
FrozenSet,
Generator,
Iterable,
Iterator,
List,
NoReturn,
Tuple,
Type,
TypeVar,
TYPE_CHECKING,
)
import numpy as np
__all__ = ["_GenericAlias", "NDArray"]
_T = TypeVar("_T", bound="_GenericAlias")
def _to_str(obj: object) -> str:
"""Helper function for `_GenericAlias.__repr__`."""
if obj is Ellipsis:
return '...'
elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):
if obj.__module__ == 'builtins':
return obj.__qualname__
else:
return f'{obj.__module__}.{obj.__qualname__}'
else:
return repr(obj)
def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
"""Search for all typevars and typevar-containing objects in `args`.
Helper function for `_GenericAlias.__init__`.
"""
for i in args:
if hasattr(i, "__parameters__"):
yield from i.__parameters__
elif isinstance(i, TypeVar):
yield i
def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
"""Recursively replace all typevars with those from `parameters`.
Helper function for `_GenericAlias.__getitem__`.
"""
args = []
for i in alias.__args__:
if isinstance(i, TypeVar):
value: Any = next(parameters)
elif isinstance(i, _GenericAlias):
value = _reconstruct_alias(i, parameters)
elif hasattr(i, "__parameters__"):
prm_tup = tuple(next(parameters) for _ in i.__parameters__)
value = i[prm_tup]
else:
value = i
args.append(value)
cls = type(alias)
return cls(alias.__origin__, tuple(args))
class _GenericAlias:
"""A python-based backport of the `types.GenericAlias` class.
E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and
``t.__args__`` is ``(int,)``.
See Also
--------
:pep:`585`
The PEP responsible for introducing `types.GenericAlias`.
"""
__slots__ = ("__weakref__", "_origin", "_args", "_parameters", "_hash")
@property
def __origin__(self) -> type:
return super().__getattribute__("_origin")
@property
def __args__(self) -> Tuple[object, ...]:
return super().__getattribute__("_args")
@property
def __parameters__(self) -> Tuple[TypeVar, ...]:
"""Type variables in the ``GenericAlias``."""
return super().__getattribute__("_parameters")
def __init__(
self,
origin: type,
args: object | Tuple[object, ...],
) -> None:
self._origin = origin
self._args = args if isinstance(args, tuple) else (args,)
self._parameters = tuple(_parse_parameters(self.__args__))
@property
def __call__(self) -> type:
return self.__origin__
def __reduce__(self: _T) -> Tuple[
Type[_T],
Tuple[type, Tuple[object, ...]],
]:
cls = type(self)
return cls, (self.__origin__, self.__args__)
def __mro_entries__(self, bases: Iterable[object]) -> Tuple[type]:
return (self.__origin__,)
def __dir__(self) -> List[str]:
"""Implement ``dir(self)``."""
cls = type(self)
dir_origin = set(dir(self.__origin__))
return sorted(cls._ATTR_EXCEPTIONS | dir_origin)
def __hash__(self) -> int:
"""Return ``hash(self)``."""
# Attempt to use the cached hash
try:
return super().__getattribute__("_hash")
except AttributeError:
self._hash: int = hash(self.__origin__) ^ hash(self.__args__)
return super().__getattribute__("_hash")
def __instancecheck__(self, obj: object) -> NoReturn:
"""Check if an `obj` is an instance."""
raise TypeError("isinstance() argument 2 cannot be a "
"parameterized generic")
def __subclasscheck__(self, cls: type) -> NoReturn:
"""Check if a `cls` is a subclass."""
raise TypeError("issubclass() argument 2 cannot be a "
"parameterized generic")
def __repr__(self) -> str:
"""Return ``repr(self)``."""
args = ", ".join(_to_str(i) for i in self.__args__)
origin = _to_str(self.__origin__)
return f"{origin}[{args}]"
def __getitem__(self: _T, key: object | Tuple[object, ...]) -> _T:
"""Return ``self[key]``."""
key_tup = key if isinstance(key, tuple) else (key,)
if len(self.__parameters__) == 0:
raise TypeError(f"There are no type variables left in {self}")
elif len(key_tup) > len(self.__parameters__):
raise TypeError(f"Too many arguments for {self}")
elif len(key_tup) < len(self.__parameters__):
raise TypeError(f"Too few arguments for {self}")
key_iter = iter(key_tup)
return _reconstruct_alias(self, key_iter)
def __eq__(self, value: object) -> bool:
"""Return ``self == value``."""
if not isinstance(value, _GENERIC_ALIAS_TYPE):
return NotImplemented
return (
self.__origin__ == value.__origin__ and
self.__args__ == value.__args__
)
_ATTR_EXCEPTIONS: ClassVar[FrozenSet[str]] = frozenset({
"__origin__",
"__args__",
"__parameters__",
"__mro_entries__",
"__reduce__",
"__reduce_ex__",
"__copy__",
"__deepcopy__",
})
def __getattribute__(self, name: str) -> Any:
"""Return ``getattr(self, name)``."""
# Pull the attribute from `__origin__` unless its
# name is in `_ATTR_EXCEPTIONS`
cls = type(self)
if name in cls._ATTR_EXCEPTIONS:
return super().__getattribute__(name)
return getattr(self.__origin__, name)
# See `_GenericAlias.__eq__`
if sys.version_info >= (3, 9):
_GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias)
else:
_GENERIC_ALIAS_TYPE = (_GenericAlias,)
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
if TYPE_CHECKING or sys.version_info >= (3, 9):
_DType = np.dtype[ScalarType]
NDArray = np.ndarray[Any, np.dtype[ScalarType]]
else:
_DType = _GenericAlias(np.dtype, (ScalarType,))
NDArray = _GenericAlias(np.ndarray, (Any, _DType))
|
the-stack_106_24806
|
"""
Title: The Functional API
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/12
Description: Complete guide to the functional API.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Introduction
The Keras *functional API* is a way to create models that is more flexible
than the `tf.keras.Sequential` API. The functional API can handle models
with non-linear topology, models with shared layers, and models
with multiple inputs or outputs.
The main idea that a deep learning model is usually
a directed acyclic graph (DAG) of layers.
So the functional API is a way to build *graphs of layers*.
Consider the following model:
<div class="k-default-codeblock">
```
(input: 784-dimensional vectors)
↧
[Dense (64 units, relu activation)]
↧
[Dense (64 units, relu activation)]
↧
[Dense (10 units, softmax activation)]
↧
(output: logits of a probability distribution over 10 classes)
```
</div>
This is a basic graph with three layers.
To build this model using the functional API, start by creating an input node:
"""
inputs = keras.Input(shape=(784,))
"""
The shape of the data is set as a 784-dimensional vector.
The batch size is always omitted since only the shape of each sample is specified.
If, for example, you have an image input with a shape of `(32, 32, 3)`,
you would use:
"""
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
"""
The `inputs` that is returned contains information about the shape and `dtype`
of the input data that you feed to your model.
Here's the shape:
"""
inputs.shape
"""
Here's the dtype:
"""
inputs.dtype
"""
You create a new node in the graph of layers by calling a layer on this `inputs`
object:
"""
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
"""
The "layer call" action is like drawing an arrow from "inputs" to this layer
you created.
You're "passing" the inputs to the `dense` layer, and out you get `x`.
Let's add a few more layers to the graph of layers:
"""
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
"""
At this point, you can create a `Model` by specifying its inputs and outputs
in the graph of layers:
"""
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
"""
Let's check out what the model summary looks like:
"""
model.summary()
"""
You can also plot the model as a graph:
"""
keras.utils.plot_model(model, "my_first_model.png")
"""
And, optionally, display the input and output shapes of each layer
in the plotted graph:
"""
keras.utils.plot_model(model, "my_first_model_with_shape_info.png", show_shapes=True)
"""
This figure and the code are almost identical. In the code version,
the connection arrows are replaced by the call operation.
A "graph of layers" is an intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirror this.
"""
"""
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models
built using the functional API as for `Sequential` models.
Here, load the MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
then evaluate the model on the test data:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
"""
For further reading, see the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
## Save and serialize
Saving the model and serialization work the same way for models built using
the functional API as they do for `Sequential` models. To standard way
to save a functional model is to call `model.save()`
to save the entire model as a single file. You can later recreate the same model
from this file, even if the code that built the model is no longer available.
This saved file includes the:
- model architecture
- model weight values (that were learned during training)
- model training config, if any (as passed to `compile`)
- optimizer and its state, if any (to restart training where you left off)
"""
model.save("path_to_my_model")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("path_to_my_model")
"""
For details, read the model [serialization & saving](
/guides/serialization_and_saving/) guide.
"""
"""
## Use the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single
graph of layers can be used to generate multiple models.
In the example below, you use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
"""
Here, the decoding architecture is strictly symmetrical
to the encoding architecture, so the output shape is the same as
the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer,
and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
"""
"""
## All models are callable, just like layers
You can treat any model as if it were a layer by invoking it on an `Input` or
on the output of another layer. By calling a model you aren't just reusing
the architecture of the model, you're also reusing its weights.
To see this in action, here's a different take on the autoencoder example that
creates an encoder model, a decoder model, and chain them in two calls
to obtain the autoencoder model:
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
"""
As you can see, the model can be nested: a model can contain sub-models
(since a model is just like a layer).
A common use case for model nesting is *ensembling*.
For example, here's how to ensemble a set of models into a single model
that averages their predictions:
"""
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
"""
## Manipulate complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the `Sequential` API.
For example, if you're building a system for ranking customer issue tickets by
priority and routing them to the correct department,
then the model will have three inputs:
- the title of the ticket (text input),
- the text body of the ticket (text input), and
- any tags added by the user (categorical input)
This model will have two outputs:
- the priority score between 0 and 1 (scalar sigmoid output), and
- the department that should handle the ticket (softmax output
over the set of departments).
You can build this model in a few lines with the functional API:
"""
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred],
)
"""
Now plot the model:
"""
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
"""
When compiling this model, you can assign different losses to each output.
You can even assign different weights to each loss -- to modulate
their contribution to the total training loss.
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
"""
Since the output layers have different names, you could also specify
the loss like this:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights=[1.0, 0.2],
)
"""
Train the model by passing lists of NumPy arrays of inputs and targets:
"""
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
"""
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
### A toy ResNet model
In addition to models with multiple inputs and outputs,
the functional API makes it easy to manipulate non-linear connectivity
topologies -- these are models with layers that are not connected sequentially.
Something the `Sequential` API can not handle.
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this:
"""
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
"""
Plot the model:
"""
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
"""
Now train the model:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(x_train[:1000], y_train[:1000], batch_size=64, epochs=1, validation_split=0.2)
"""
## Shared layers
Another good use for the functional API are for models that use *shared layers*.
Shared layers are layer instances that are reused multiple times in a same model --
they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs from similar spaces
(say, two different pieces of text that feature similar vocabulary).
They enable sharing of information across these different inputs,
and they make it possible to train such a model on less data.
If a given word is seen in one of the inputs,
that will benefit the processing of all inputs that pass through the shared layer.
To share a layer in the functional API, call the same layer instance multiple times.
For instance, here's an `Embedding` layer shared across two different text inputs:
"""
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""
## Extract and reuse nodes in the graph of layers
Because the graph of layers you are manipulating is a static data structure,
it can be accessed and inspected. And this is how you are able to plot
functional models as images.
This also means that you can access the activations of intermediate layers
("nodes" in the graph) and reuse them elsewhere --
which is very useful for something like feature extraction.
Let's look at an example. This is a VGG19 model with weights pretrained on ImageNet:
"""
vgg19 = tf.keras.applications.VGG19()
"""
And these are the intermediate activations of the model,
obtained by querying the graph data structure:
"""
features_list = [layer.output for layer in vgg19.layers]
"""
Use these features to create a new feature-extraction model that returns
the values of the intermediate layer activations:
"""
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
"""
This comes in handy for tasks like
[neural style transfer](https://www.tensorflow.org/tutorials/generative/style_transfer),
among other things.
"""
"""
## Extend the API using custom layers
`tf.keras` includes a wide range of built-in layers, for example:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
But if you don't find what you need, it's easy to extend the API by creating
your own layers. All layers subclass the `Layer` class and implement:
- `call` method, that specifies the computation done by the layer.
- `build` method, that creates the weights of the layer (this is just a style
convention since you can create weights in `__init__`, as well).
To learn more about creating layers from scratch, read
[custom layers and models](/guides/making_new_layers_and_models_via_subclassing) guide.
The following is a basic implementation of `tf.keras.layers.Dense`:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
"""
For serialization support in your custom layer, define a `get_config`
method that returns the constructor arguments of the layer instance:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config, custom_objects={"CustomDense": CustomDense})
"""
Optionally, implement the classmethod `from_config(cls, config)` which is used
when recreating a layer instance given its config dictionary.
The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
"""
"""
## When to use the functional API
When should you use the Keras functional API to create a new model,
or just subclass the `Model` class directly? In general, the functional API
is higher-level, easier and safer, and has a number of
features that subclassed models do not support.
However, model subclassing provides greater flexibility when building models
that are not easily expressible as directed acyclic graphs of layers.
For example, you could not implement a Tree-RNN with the functional API
and would have to subclass `Model` directly.
For in-depth look at the differences between the functional API and
model subclassing, read
[What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
### Functional API strengths:
The following properties are also true for Sequential models
(which are also data structures), but are not true for subclassed models
(which are Python bytecode, not data structures).
#### Less verbose
There is no `super(MyClass, self).__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(tf.zeros((1, 32)))
```
#### Model validation while defining its connectivity graph
In the functional API, the input specification (shape and dtype) is created
in advance (using `Input`). Every time you call a layer,
the layer checks that the specification passed to it matches its assumptions,
and it will raise a helpful error message if not.
This guarantees that any model you can build with the functional API will run.
All debugging -- other than convergence-related debugging --
happens statically during the model construction and not at execution time.
This is similar to type checking in a compiler.
#### A functional model is plottable and inspectable
You can plot the model as a graph, and you can easily access intermediate nodes
in this graph. For example, to extract and reuse the activations of intermediate
layers (as seen in a previous example):
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### A functional model can be serialized or cloned
Because a functional model is a data structure rather than a piece of code,
it is safely serializable and can be saved as a single file
that allows you to recreate the exact same model
without having access to any of the original code.
See the [serialization & saving guide](/guides/serialization_and_saving/).
To serialize a subclassed model, it is necessary for the implementer
to specify a `get_config()`
and `from_config()` method at the model level.
### Functional API weakness:
#### It does not support dynamic architectures
The functional API treats models as DAGs of layers.
This is true for most deep learning architectures, but not all -- for example,
recursive networks or Tree RNNs do not follow this assumption and cannot
be implemented in the functional API.
"""
"""
## Mix-and-match API styles
Choosing between the functional API or Model subclassing isn't a
binary decision that restricts you into one category of models.
All models in the `tf.keras` API can interact with each other, whether they're
`Sequential` models, functional models, or subclassed models that are written
from scratch.
You can always use a functional model or `Sequential` model
as part of a subclassed model or layer:
"""
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, timesteps, input_dim)))
"""
You can use any subclassed layer or model in the functional API
as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` --
Where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors),
and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` --
Where `training` is a boolean indicating whether the layer should behave
in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` --
Where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` --
Of course, you can have both masking and training-specific behavior at the same time.
Additionally, if you implement the `get_config` method on your custom Layer or model,
the functional models you create will still be serializable and cloneable.
Here's a quick example of a custom RNN, written from scratch,
being used in a functional model:
"""
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, 10, 5)))
|
the-stack_106_24808
|
import os
import yaml
from datetime import datetime
import yamldown
from jinja2 import Environment, Template, FileSystemLoader
from .markdown import md
stage1_template = """
{%- if extends -%}
{% raw %}{%{% endraw %} extends '{{ extends }}' {% raw %}%}{% endraw %}
{%- endif %}
{% raw %}{% block page %}{% endraw %}
{{ source_code }}
{% raw %}{% endblock page %}{% endraw %}
"""
def traverse_folder(folder, path_list=[]):
for filename in os.listdir(folder):
fpath = os.path.join(folder, filename)
if filename.startswith('_'):
continue
if os.path.isdir(fpath):
path_list.append(filename)
yield from traverse_folder(fpath, path_list)
else:
yield filename, path_list
def compile_file(jinja_env, filename, source_dir, destination_dir, path_list):
path = '/'.join(path_list)
name_extension = os.path.splitext(filename)
if name_extension[1] == '.md':
output_filename = f'{name_extension[0]}.html'
else:
output_filename = filename
try:
with open(os.path.join(source_dir, path, filename)) as stream:
metadata, source_code = yamldown.load(stream)
except UnicodeDecodeError:
metadata = None
if metadata:
if name_extension[1] == '.md':
source_code = md(source_code)
stage1 = jinja_env.from_string(stage1_template).render(
page=metadata,
extends=metadata.get('template'),
source_code=source_code
)
stage2 = jinja_env.from_string(stage1).render(page=metadata)
with open(os.path.join(destination_dir, path, output_filename), 'w+') as wstream:
wstream.write(stage2)
else:
path_so_far = destination_dir
for part in path_list:
path_so_far = os.path.join(path_so_far, part)
if not os.path.exists(path_so_far):
os.mkdir(path_so_far)
with open(os.path.join(source_dir, path, filename), 'rb') as src_stream:
with open(os.path.join(destination_dir, path, output_filename), 'wb+') as dest_stream:
data = src_stream.read(512)
while data != b'':
dest_stream.write(data)
data = src_stream.read(512)
def date(format):
return datetime.now().strftime(format)
def build(source_dir, destination_dir):
# TODO: blog post support
try:
with open(os.path.join(source_dir, '_config.yml')) as stream:
config = yaml.safe_load(stream)
except FileNotFoundError as ex:
print(ex)
print('No _config.yml found in source directory.')
exit(1)
except yaml.YAMLError:
print('YAML syntax error in _config.yml.')
exit(1)
jinja_env = Environment(loader=FileSystemLoader(
os.path.join(source_dir, '_templates')
))
jinja_env.globals = {
'site': config,
'date': date
}
for filename, path_list in traverse_folder(source_dir):
compile_file(jinja_env, filename, source_dir, destination_dir, path_list)
|
the-stack_106_24810
|
from talon import actions, Module, speech_system
from typing import Any, List
mod = Module()
last_phrase = None
def on_phrase(d):
global last_phrase
last_phrase = d
speech_system.register("pre:phrase", on_phrase)
class NotSet:
def __repr__(self):
return "<argument not set>"
@mod.action_class
class Actions:
def cursorless_single_target_command(
action: str,
target: dict,
arg1: Any = NotSet,
arg2: Any = NotSet,
arg3: Any = NotSet,
):
"""Execute single-target cursorless command"""
actions.user.cursorless_multiple_target_command(
action, [target], arg1, arg2, arg3
)
def cursorless_single_target_command_with_arg_list(
action: str, target: str, args: list[Any]
):
"""Execute single-target cursorless command with argument list"""
actions.user.cursorless_single_target_command(
action,
target,
*args,
)
def cursorless_single_target_command_with_arg_list(
action: str, target: str, args: list[Any]
):
"""Execute single-target cursorless command with argument list"""
actions.user.cursorless_single_target_command(
action,
target,
*args,
)
def cursorless_single_target_command_get(
action: str,
target: dict,
arg1: Any = NotSet,
arg2: Any = NotSet,
arg3: Any = NotSet,
):
"""Execute single-target cursorless command and return result"""
args = list(filter(lambda x: x is not NotSet, [arg1, arg2, arg3]))
return actions.user.vscode_get(
"cursorless.command",
get_spoken_form(),
action,
[target],
*args,
)
def cursorless_multiple_target_command(
action: str,
targets: List[dict],
arg1: Any = NotSet,
arg2: Any = NotSet,
arg3: Any = NotSet,
):
"""Execute multi-target cursorless command"""
args = list(filter(lambda x: x is not NotSet, [arg1, arg2, arg3]))
actions.user.vscode_with_plugin_and_wait(
"cursorless.command",
get_spoken_form(),
action,
targets,
*args,
)
def get_spoken_form():
return " ".join(last_phrase["phrase"])
|
the-stack_106_24811
|
# Copyright 2017 NTT Corporation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.functional import test_servers
from nova.tests.unit import policy_fixture
def create_request_body():
return {
"interfaceAttachment": {
"port_id": uuids.port,
"net_id": uuids.net,
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": uuids.subnet
}
]
}
}
class InterfaceFullstack(integrated_helpers._IntegratedTestBase):
"""Tests for port interfaces command.
Extension: os-interface
os-interface adds a set of functions to the port interfaces
for the creation and deletion of port interfaces.
POST /v2.1/{tenant_id}/servers/{server_id}/os-interface
DELETE /v2.1/{tenant_id}/servers/{server_id}/os-interface/{attachment_id}
Functional Test Scope:
This test starts the wsgi stack for the nova api services, uses an
in memory database to ensure the path through the wsgi layer to
the database.
"""
api_major_version = 'v2.1'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
def setUp(self):
super(InterfaceFullstack, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture())
self.api = api_fixture.api
def test_interface_func_negative(self):
"""Test port interface edge conditions.
- Bogus body is a 400
"""
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
# Test for API failure conditions
# bad body is 400
os_interface_url = '/servers/%s/os-interface' % created_server_id
# Check in the case that both net_id and port_id are specified.
body = create_request_body()
del body['interfaceAttachment']['fixed_ips']
resp = self.api.api_post(os_interface_url, body,
check_response_status=False)
self.assertEqual(400, resp.status)
# Check in the case that fixed_ips is specified,
# but net_id is not specifed.
body = create_request_body()
del body['interfaceAttachment']['port_id']
del body['interfaceAttachment']['net_id']
resp = self.api.api_post(os_interface_url, body,
check_response_status=False)
self.assertEqual(400, resp.status)
class InterfaceFullstackWithNeutron(test_servers.ServersTestBase):
"""Tests for port interfaces command.
Functional Test Scope:
This test uses Neutron.
os-interface API specifies a port ID created by Neutron.
"""
api_major_version = 'v2.1'
USE_NEUTRON = True
def test_detach_interface_negative_invalid_state(self):
# Create server with network
image = self.api.get_images()[0]['id']
post = {"server": {"name": "test", "flavorRef": "1",
"imageRef": image,
"networks": [{"uuid": "3cb9bc59-5699-4588-a4b1-b87f96708bc6"}]}}
created_server = self.api.post_server(post)
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
post = {
'interfaceAttachment': {
'net_id': "3cb9bc59-5699-4588-a4b1-b87f96708bc6"
}
}
self.api.attach_interface(created_server_id, post)
response = self.api.get_port_interfaces(created_server_id)[0]
port_id = response['port_id']
# Change status from ACTIVE to SUSPENDED for negative test
post = {'suspend': {}}
self.api.post_server_action(created_server_id, post)
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SUSPENDED', found_server['status'])
# Detach port interface in SUSPENDED (not ACTIVE, etc.)
ex = self.assertRaises(client.OpenStackApiException,
self.api.detach_interface,
created_server_id, port_id)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SUSPENDED', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
|
the-stack_106_24813
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
def test_elapsed_time():
from pycompss.functions.elapsed_time import timeit
@timeit
def increment(value):
import time
time.sleep(0.1)
return value + 1
result = increment(1)
assert len(result) == 2, "ERROR: Time it does not retrieve two elements."
assert result[0] == 2, "ERROR: Got unexpected result."
assert isinstance(result[1], float), "ERROR: Time is in incorrect format."
assert result[1] > 0, "ERROR: Time can not be 0 or negative."
|
the-stack_106_24816
|
# Given an n x n matrix mat[n][n] of integers,
# find the maximum value of mat(c, d) – mat(a, b)
# over all choices of indexes such that both c > a and d > b.
import sys
# TC: O(CxR) | SC: O(CxR)
def find_max_diff(matrix):
# max difference
max_value = -sys.maxsize-1
# print(max_value)
# temp matrix just copied the item of last row last column value
temp_m = [[0 for x in matrix[0]]
for y in matrix]
temp_m[-1][-1] = matrix[-1][-1]
# initilize the last row and col
for i in range(len(temp_m)-2, -1, -1):
# initilize the last col
if matrix[i][-1] > temp_m[i+1][-1]:
temp_m[i][-1] = matrix[i][-1]
else:
temp_m[i][-1] = temp_m[i+1][-1]
for i in range(len(temp_m[0])-2, -1, -1):
# initilize the last row
if matrix[-1][i] > temp_m[-1][i+1]:
temp_m[-1][i] = matrix[-1][i]
else:
temp_m[-1][i] = temp_m[-1][i+1]
# computer remaining matrix
for i in range(len(matrix)-2, -1, -1):
for j in range(len(matrix[0])-2, -1, -1):
# update the max_value
if (temp_m[i+1][j+1]-matrix[i][j]) > max_value:
max_value = temp_m[i+1][j+1]-matrix[i][j]
# update temp matrix
temp_m[i][j] = max(matrix[i][j], max(
temp_m[i][j+1], temp_m[i+1][j]))
return max_value
if __name__ == '__main__':
# 18
matrix1 = [[1, 2, -1, -4, -20],
[-8, -3, 4, 2, 1],
[3, 8, 6, 1, 3],
[-4, -1, 1, 7, -6],
[0, -4, 10, -5, 1]]
# 8
matrix2 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
print(find_max_diff(matrix1))
print(find_max_diff(matrix2))
|
the-stack_106_24820
|
# encoding: UTF-8
# api: streamtuner2
# title: RadioBrowser
# description: Community collection of stations; votes, clicks, homepage links.
# version: 0.3
# type: channel
# url: http://www.radio-browser.info/
# category: radio
# priority: optional
# config:
# { type=select, name=radiobrowser_cat, value=tags, select="tags|countries|languages", description=Which category types to list. }
# documentation: http://www.radio-browser.info/#ui-tabs-7
# png:
# iVBORw0KGgoAAAANSUhEUgAAABAAAAAMCAMAAABcOc2zAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAACQ1BMVEWNYNOIWNFyOsZtNcFx
# N8hxN8hxN8hxN8hxN8hxN8hxN8dtNcFuNcJ+Ss2NX9N6T7uNX9NxPL9jMLBtNcBkMbFqNLuCT89wRq6MXtOATc17Rsp8SMl6Rcl6RctrQqmpht1qQ6PUxex6WqnXye18XarYyu3QyNzp5u739/jh3Ojd
# 2OX4+Pl7XKrYy+3i3eh8Y6Dg2+i2q8ecjrGqm8Krm8LTzN+ikbunl8D5+fl7W6rZy+7z8fTk4Or29fjAuM3Dv8rx7vTs6vHy8PTh3Ojy8PX5+fl6Wqraze75+fn5+vn6+vn6+vn6+vl6WqrMuOl1U6iR
# bMmNbb2NbryOb72PcL6Qcb+Rcr+SdMCTdcGUdsGVd8KWeMOXesSZfMWMa71cNpSLW9JxN8hxN8hxN8hxN8hxN8hrNL2NX9OMXdJ+Ss1/S85/S85/S85+Ss18SMqHV9GMXdK/p+W/p+W+peW+peS9pOS9
# o+S8ouS7oeO6oOO5nuO4neK3m+K3m+Kqidv5+fn5+vn5+fn5+fn5+fn5+fn5+fn4+fn4+Pn4+Pn4+Pn4+Pn5+fnl3vD5+fn5+fn7+/r6+vn5+fn5+vn5+vn5+vn5+fn6+/r6+vr5+fn6+/rp4/H6+vn0
# 8/X08vbz8vX08/b29vf6+/ro4vH7+/r6+/ro4vH6+vn6+vrn4fH6+/n6+vr6+/r6+vn6+/r6+vn6+vn7+/ro4fHt6PXu6fXu6vXv6vXv6/Xw6/bw7Pbw7fbx7fbx7vby7vby7/fz8ffd0+7///+qD5Mw
# AAAAYHRSTlPJ4/Hz8/Lx7+3s6ufi08N9/fve8/bo//T8/vb6/fr67eL02vbc9/Tt//3v/N34/5aO/MWeoM7Rbene+f7E0PykaWqx3K333/v//Pv7/eD34Z/m7O3v8fL09vf5+vv8/9Pw7ECfAAAAAWJL
# R0TAE2Hf+AAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB98EARcyBebz0PQAAADXSURBVAjXAcwAM/8AAAECAwQFBgcICQoLDA0ODwAQYBESE2FiY2RlZhQVFmcXABhoGRobaWprbG1uHB1vcB4A
# H3Fyc3R1dnd4eXp7fH1+IAAhf4CBgoOEhYaHiImKi4wiACONjo+QkZKTlJWWl5iZmiQAJZucJiconZ6foCkqK6GiLAAtoy4vMDEyMzQ1Njc4pKU5ADqmOzw9Pj9AQUJDREWnqEYAR6mqq6xISUpLTK2u
# r7CxTQBOsrO0tba3uLm6u7y9vr9PAFBRUlNUVVZXWFlaW1xdXl9emUehk/NThwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNS0wNC0wMlQwMTo0OTozOSswMjowMH98i/gAAAAldEVYdGRhdGU6bW9kaWZ5
# ADIwMTUtMDQtMDJUMDE6NDk6MTcrMDI6MDAcO09kAAAAAElFTkSuQmCC
# x-icon-src: openclipart:tape.png
# x-service-by: segler_alex
# extraction-method: json
#
#
# Radio-Browser is a community-collected list of internet radios.
# Currently lists ≥4000 streaming stations, and tracks favourited
# entries. Furthermore includes station homepage links!
#
# If you change the categories between tags/countries/languages,
# please apply [Channel]→[Reload Category Tree] afterwards.
#
# Also has a neat JSON API, has an excellent documentation, thus
# is quite easy to support. It's also used by Rhythmbox / VLC /
# Clementine / Kodi / RadioDroid / etc.
import re
import json
from config import *
from channels import *
from uikit import uikit
import ahttp
# API endpoints:
# http://www.radio-browser.info/webservice/json/countries
# http://www.radio-browser.info/webservice/json/languages
# http://www.radio-browser.info/webservice/json/tags
# http://www.radio-browser.info/webservice/json/stations/topclick
# http://www.radio-browser.info/webservice/json/stations/topvote
# http://www.radio-browser.info/webservice/json/stations
# http://www.radio-browser.info/webservice/json/stations/searchterm
# http://www.radio-browser.info/webservice/json/stations/bytag/searchterm
#
# ENTRY sets:
# {"id":63,"name": "Energy Sachsen", "url":"http://www.energyradio.de/sachsen",
# "homepage":"http://www.energy.de", "favicon":"http://www.energy.de/favicon.ico",
# "tags":"Pop Dance RnB Techno","country":"Germany","subcountry":"","language":"German",
# "votes":4,"negativevotes":10},
#
class radiobrowser (ChannelPlugin):
# control flags
has_search = True
listformat = "pls"
titles = dict(listeners="Votes+", bitrate="Votes-", playing="Country")
base = "http://www.radio-browser.info/webservice/json/"
categories = []
pricat = ("topvote", "topclick")
catmap = { "tags": "bytag", "countries": "bycountry", "languages": "bylanguage" }
# hook menu
def init2(self, parent):
if parent:
uikit.add_menu([parent.streammenu, parent.streamactions], "Share in Radio-Browser", self.submit, insert=5)
# votes, and tags, no countries or languages
def update_categories(self):
self.categories = list(self.pricat)
for sub in [conf.radiobrowser_cat]:
cats = []
for entry in self.api(sub):
if entry["value"] and len(entry["value"]) > 1:
cats.append(entry["value"])
self.categories.append(sub)
self.categories.append(cats)
# Direct mapping
def update_streams(self, cat, search=None):
if cat:
if cat in self.pricat:
data = self.api("stations/" + cat)
elif cat in ("tags", "countries", "languages"):
return [dict(genre="-", title="Placeholder category", url="offline:")]
else:
data = self.api("stations/" + self.catmap[conf.radiobrowser_cat] + "/" + cat)
elif search:
data = self.api("stations/" + search)
else:
return []
r = []
for e in data:
r.append(dict(
genre = e["tags"],
url = e["url"],
format = "audio/mpeg",
title = e["name"],
homepage = e["homepage"],
playing = e["country"],
listeners = int(e["votes"]),
bitrate = - int(e["negativevotes"]),
))
return r
# fetch multiple pages
def api(self, method, params={}, post=False):
j = ahttp.get(self.base + method, params, post=post)
try:
return json.loads(j, strict=False) # some entries contain invalid character encodings
except:
return []
# Add radio station to RBI
def submit(self, *w):
cn = self.parent.channel()
row = cn.row()
# convert row from channel
data = dict(
name = row["title"],
url = row["url"],
homepage = row["homepage"],
#favicon = self.parent.favicon.html_link_icon(row["url"]), # no longer available as module
tags = row["genre"].replace(" ", ","),
)
# map extra fields
for _from,_val,_to in [("playing","location","country")]:
#country Austria The name of the country where the radio station is located
#state Vienna The name of the part of the country where the station is located
#language English The main language which is used in spoken text parts of the radio station.
if _from in cn.titles and cn.titles[_from].lower() == _val:
data[_to] = _from
# API submit
j = self.api("add", data, post=1)
log.SUBMIT_RBI(j)
if j and "ok" in j and j["ok"] == "true" and "id" in j:
self.parent.status("Submitted successfully to Radio-Browser.info, new station id #%s." % j["id"], timeout=15)
|
the-stack_106_24824
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
elif browser == "opera":
self.wd = webdriver.Opera()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.edit_group = GroupHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
the-stack_106_24825
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest, parameterized
import jax
from jax.config import config
import jax.dlpack
import jax.numpy as jnp
from jax import test_util as jtu
import numpy as np
config.parse_flags_with_absl()
try:
import torch
import torch.utils.dlpack
except ImportError:
torch = None
try:
import cupy
except ImportError:
cupy = None
try:
import tensorflow as tf
tf_version = tuple(
int(x) for x in tf.version.VERSION.split("-")[0].split("."))
except:
tf = None
dlpack_dtypes = jax.dlpack.SUPPORTED_DTYPES
torch_dtypes = [jnp.int8, jnp.int16, jnp.int32, jnp.int64,
jnp.uint8, jnp.float16, jnp.float32, jnp.float64]
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (2, 3, 4)]
empty_array_shapes = []
empty_array_shapes += [(0,), (0, 4), (3, 0),]
nonempty_nonscalar_array_shapes += [(3, 1), (1, 4), (2, 1, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
all_shapes = nonempty_array_shapes + empty_array_shapes
class DLPackTest(jtu.JaxTestCase):
def setUp(self):
super(DLPackTest, self).setUp()
if jtu.device_under_test() == "tpu":
self.skipTest("DLPack not supported on TPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_take_ownership={}".format(
jtu.format_shape_dtype_string(shape, dtype),
take_ownership),
"shape": shape, "dtype": dtype, "take_ownership": take_ownership}
for shape in all_shapes
for dtype in dlpack_dtypes
for take_ownership in [False, True]))
def testJaxRoundTrip(self, shape, dtype, take_ownership):
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x, take_ownership=take_ownership)
self.assertEqual(take_ownership, x.device_buffer.is_deleted())
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np.astype(x.dtype), y)
self.assertRaisesRegex(RuntimeError,
"DLPack tensor may be consumed at most once",
lambda: jax.dlpack.from_dlpack(dlpack))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testTensorFlowToJax(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.uint64,
jnp.float64]:
raise self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.device_under_test() == "gpu" and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
with tf.device("/GPU:0" if jtu.device_under_test() == "gpu" else "/CPU:0"):
x = tf.constant(np)
dlpack = tf.experimental.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testJaxToTensorFlow(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.uint64,
jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.device_under_test() == "gpu" and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
# TODO(b/171320191): this line works around a missing context initialization
# bug in TensorFlow.
_ = tf.add(1, 1)
dlpack = jax.dlpack.to_dlpack(x)
y = tf.experimental.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y.numpy())
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
def testTorchToJax(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = torch.from_numpy(np)
x = x.cuda() if jtu.device_under_test() == "gpu" else x
dlpack = torch.utils.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
def testJaxToTorch(self, shape, dtype):
if not config.x64_enabled and dtype in [jnp.int64, jnp.float64]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x)
y = torch.utils.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y.cpu().numpy())
class CudaArrayInterfaceTest(jtu.JaxTestCase):
def setUp(self):
super(CudaArrayInterfaceTest, self).setUp()
if jtu.device_under_test() != "gpu":
self.skipTest("__cuda_array_interface__ is only supported on GPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not cupy, "Test requires CuPy")
def testJaxToCuPy(self, shape, dtype):
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
y = jnp.array(x)
z = cupy.asarray(y)
self.assertEqual(y.__cuda_array_interface__["data"][0],
z.__cuda_array_interface__["data"][0])
self.assertAllClose(x, cupy.asnumpy(z))
class Bfloat16Test(jtu.JaxTestCase):
@unittest.skipIf((not tf or tf_version < (2, 5, 0)),
"Test requires TensorFlow 2.5.0 or newer")
def testJaxAndTfHaveTheSameBfloat16Type(self):
self.assertEqual(np.dtype(jnp.bfloat16).num,
np.dtype(tf.dtypes.bfloat16.as_numpy_dtype).num)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
the-stack_106_24828
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import uuid
from decimal import Decimal
import pytest
from iceberg.expressions import base
@pytest.mark.parametrize(
"operation,opposite_operation",
[
(base.Operation.TRUE, base.Operation.FALSE),
(base.Operation.FALSE, base.Operation.TRUE),
(base.Operation.IS_NULL, base.Operation.NOT_NULL),
(base.Operation.NOT_NULL, base.Operation.IS_NULL),
(base.Operation.IS_NAN, base.Operation.NOT_NAN),
(base.Operation.NOT_NAN, base.Operation.IS_NAN),
(base.Operation.LT, base.Operation.GT_EQ),
(base.Operation.LT_EQ, base.Operation.GT),
(base.Operation.GT, base.Operation.LT_EQ),
(base.Operation.GT_EQ, base.Operation.LT),
(base.Operation.EQ, base.Operation.NOT_EQ),
(base.Operation.NOT_EQ, base.Operation.EQ),
(base.Operation.IN, base.Operation.NOT_IN),
(base.Operation.NOT_IN, base.Operation.IN),
],
)
def test_negation_of_operations(operation, opposite_operation):
assert operation.negate() == opposite_operation
@pytest.mark.parametrize(
"operation",
[
base.Operation.NOT,
base.Operation.AND,
base.Operation.OR,
],
)
def test_raise_on_no_negation_for_operation(operation):
with pytest.raises(ValueError) as exc_info:
operation.negate()
assert str(exc_info.value) == f"No negation defined for operation {operation}"
def test_accessor_base_class(foo_struct):
"""Test retrieving a value at a position of a container using an accessor"""
uuid_value = uuid.uuid4()
foo_struct.set(0, "foo")
foo_struct.set(1, "bar")
foo_struct.set(2, "baz")
foo_struct.set(3, 1)
foo_struct.set(4, 2)
foo_struct.set(5, 3)
foo_struct.set(6, 1.234)
foo_struct.set(7, Decimal("1.234"))
foo_struct.set(8, uuid_value)
foo_struct.set(9, True)
foo_struct.set(10, False)
foo_struct.set(11, b"\x19\x04\x9e?")
assert base.Accessor(position=0).get(foo_struct) == "foo"
assert base.Accessor(position=1).get(foo_struct) == "bar"
assert base.Accessor(position=2).get(foo_struct) == "baz"
assert base.Accessor(position=3).get(foo_struct) == 1
assert base.Accessor(position=4).get(foo_struct) == 2
assert base.Accessor(position=5).get(foo_struct) == 3
assert base.Accessor(position=6).get(foo_struct) == 1.234
assert base.Accessor(position=7).get(foo_struct) == Decimal("1.234")
assert base.Accessor(position=8).get(foo_struct) == uuid_value
assert base.Accessor(position=9).get(foo_struct) == True
assert base.Accessor(position=10).get(foo_struct) == False
assert base.Accessor(position=11).get(foo_struct) == b"\x19\x04\x9e?"
|
the-stack_106_24829
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A copy of tf.test.benchmark_config() to be used until next stable release.
Copied with minor modifications from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/benchmark.py
"""
import tensorflow.compat.v1 as tf
def import_benchmark_config():
try:
tf.test.benchmark_config()
except AttributeError:
from tensorflow import core
def benchmark_config():
"""Returns a tf.ConfigProto for disabling the dependency optimizer.
Returns:
A TensorFlow ConfigProto object.
"""
config = core.protobuf.config_pb2.ConfigProto()
config.graph_options.rewrite_options.dependency_optimization = (
core.protobuf.rewriter_config_pb2.RewriterConfig.OFF)
return config
tf.test.benchmark_config = benchmark_config
|
the-stack_106_24830
|
#!/Users/apple/Desktop/PGalla/virtual/bin/python3.6
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
the-stack_106_24831
|
# pylint: disable=E1101
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
@pytest.fixture(scope="function")
def temp_site(tmp_path_factory):
return tmp_path_factory.mktemp("gerrit-index-test")
@pytest.fixture(scope="function")
def container_run_endless(request, docker_client, gerrit_init_image, temp_site):
container_run = docker_client.containers.run(
image=gerrit_init_image.id,
entrypoint="/bin/ash",
command=["-c", "tail -f /dev/null"],
volumes={str(temp_site): {"bind": "/var/gerrit", "mode": "rw"}},
user="gerrit",
detach=True,
auto_remove=True,
)
def stop_container():
container_run.stop(timeout=1)
request.addfinalizer(stop_container)
return container_run
@pytest.mark.incremental
class TestGerritReindex:
def _get_indices(self, container):
_, indices = container.exec_run(
"git config -f /var/gerrit/index/gerrit_index.config "
+ "--name-only "
+ "--get-regexp index"
)
indices = indices.decode().strip().splitlines()
return [index.split(".")[1] for index in indices]
def test_gerrit_init_skips_reindexing_on_fresh_site(
self, temp_site, container_run_endless
):
assert not os.path.exists(
os.path.join(temp_site, "index", "gerrit_index.config")
)
exit_code, _ = container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml init"
)
)
assert exit_code == 0
expected_files = ["gerrit_index.config"] + self._get_indices(
container_run_endless
)
for expected_file in expected_files:
assert os.path.exists(os.path.join(temp_site, "index", expected_file))
timestamp_index_dir = os.path.getctime(os.path.join(temp_site, "index"))
exit_code, _ = container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml reindex"
)
)
assert exit_code == 0
assert timestamp_index_dir == os.path.getctime(os.path.join(temp_site, "index"))
def test_gerrit_init_fixes_missing_index_config(
self, container_run_endless, temp_site
):
container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml init"
)
)
os.remove(os.path.join(temp_site, "index", "gerrit_index.config"))
exit_code, _ = container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml reindex"
)
)
assert exit_code == 0
exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
assert exit_code == 0
def test_gerrit_init_fixes_unready_indices(self, container_run_endless):
container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml init"
)
)
indices = self._get_indices(container_run_endless)
assert indices
container_run_endless.exec_run(
"git config -f /var/gerrit/index/gerrit_index.config %s false" % indices[0]
)
exit_code, _ = container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml reindex"
)
)
assert exit_code == 0
exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
assert exit_code == 0
def test_gerrit_init_fixes_outdated_indices(self, container_run_endless, temp_site):
container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml init"
)
)
index = self._get_indices(container_run_endless)[0]
(name, version) = index.split("_")
os.rename(
os.path.join(temp_site, "index", index),
os.path.join(
temp_site, "index", "{name}_{0:04d}".format(int(version) - 1, name=name)
),
)
exit_code, _ = container_run_endless.exec_run(
(
"python3 /var/tools/gerrit-initializer "
"-s /var/gerrit -c /var/config/default.config.yaml reindex"
)
)
assert exit_code == 0
exit_code, _ = container_run_endless.exec_run("/var/gerrit/bin/gerrit.sh start")
assert exit_code == 0
|
the-stack_106_24833
|
import os
from pathlib import Path
import pickle
import numpy as np
import jittor_utils
from jittor_utils import LOG
import sys
jittor_utils.try_import_jit_utils_core()
has_error = 0
def convert(data):
if isinstance(data, tuple):
return tuple( convert(v) for v in data )
if isinstance(data, list):
return [ convert(v) for v in data ]
if isinstance(data, np.ndarray):
return data
if isinstance(data, dict):
return {k:convert(data[k]) for k in data}
if hasattr(data, "numpy"):
if "Var" in data.__class__.__name__:
return data.numpy()
else:
return data.detach().cpu().numpy()
return data
rand_hooked = False
def hook_pt_rand(*shape):
import torch
if isinstance(shape, tuple) and len(shape)==1 and isinstance(shape[0], torch.Size):
shape = tuple(shape[0])
np.random.seed(0)
return torch.from_numpy(np.random.rand(*tuple(shape)).astype("float32"))
def hook_pt_normal(mean, std):
import torch
shape = tuple(mean.shape)
np.random.seed(0)
return torch.from_numpy(np.random.normal(size=shape).astype("float32")).to(std.device) * std + mean
def hook_jt_rand(shape, dtype="float32", rtype="uniform"):
import jittor
np.random.seed(0)
if rtype == "normal":
return jittor.array(np.random.normal(size=shape).astype(str(dtype)))
return jittor.array(np.random.rand(*shape).astype(str(dtype)))
def hook_rand():
global rand_hooked
if rand_hooked: return
rand_hooked = True
np.random.seed(0)
if "torch" in sys.modules:
LOG.i("Hook torch.rand")
torch = sys.modules["torch"]
torch.rand = hook_pt_rand
torch.normal = hook_pt_normal
torch.manual_seed(0)
if "jittor" in sys.modules:
jittor = sys.modules["jittor"]
LOG.i("Hook jittor.random")
jittor.random = hook_jt_rand
jittor.seed(0)
class Hook:
def __init__(self, base_name, rtol=5e-2, atol=1e-3):
if os.environ.get("use_auto_diff", '1') == '0':
return
hook_rand()
self.rid = 0
self.base_name = base_name
self.base_path = os.path.join(str(Path.home()), ".cache", "jittor", "auto_diff", base_name)
os.makedirs(self.base_path, exist_ok=True)
self.rtol = rtol
self.atol = atol
LOG.i("Use cache path:", self.base_path)
LOG.i(f"rtol:{rtol} atol:{atol}")
def check_array(self, name, a, b):
rtol = self.rtol
atol = self.atol
global has_error
err = np.abs(a-b)
tol = atol + rtol * np.abs(b)
is_error = np.logical_or( err > tol, (a>=-1e-5)!=(b>=-1e-5))
index = np.where(is_error)
assert len(index)>0
if len(index[0]) == 0:
return
has_error += 1
LOG.w(f"Ndarray <{name}> not match, shape:{a.shape}")
i = tuple( i[0] for i in index )
err_rate = is_error.mean()
LOG.w(f"error index at [{i}], a({a[i]}) b({b[i]}) err({err[i]}) > tol({tol[i]}), err_rate:{err_rate*100:.3f}% amean({a.mean()}) bmean({b.mean()}) astd({a.std()}) bstd({b.std()}) ")
if err_rate > 0.01:
LOG.e("!"*10+"Very HIGH err rate"+"!"*10)
def check(self, name, pre_data, data):
global has_error
if type(pre_data) != type(data):
LOG.e(f"type not match, {pre_data.__class__.__name__}!={data.__class__.__name__}, name: {name}")
has_error += 1
return
if isinstance(pre_data, (list, tuple)):
if len(pre_data) != len(data):
has_error += 1
LOG.e(f"Name <{name}> len not match, {len(pre_data)} != {len(data)}")
n = max(len(pre_data), len(data))
for i in range(n):
a = pre_data[i] if i<len(pre_data) else "None"
b = data[i] if i<len(data) else "None"
self.check(name+f".{i}", a, b)
elif isinstance(pre_data, np.ndarray):
if pre_data.shape != data.shape:
has_error += 1
LOG.e(f"Ndarray shape <{name}> not match")
return
self.check_array(name, pre_data, data)
elif isinstance(pre_data, dict):
if len(pre_data) != len(data):
has_error += 1
LOG.w(f"Dict Name <{name}> len not match, {len(pre_data)} != {len(data)}")
for k in pre_data:
pv = pre_data[k]
if k not in data:
has_error += 1
msg = f"Key <{k}> not in data, Name <{name}>"
if isinstance(pv, np.ndarray):
LOG.e(msg)
else:
LOG.w(msg)
continue
self.check(name+f".{k}", pre_data[k], data[k])
else:
if pre_data != data:
has_error += 1
LOG.e(f"Type: {type(pre_data).__name__} Name <{name}> not match {pre_data} != {data}")
def record(self, name, data, ex_name=""):
if os.environ.get("use_auto_diff", '1') == '0':
return
rid = self.rid
self.rid += 1
fpath = os.path.join(self.base_path, f"{rid}.pkl")
data = convert(data)
if os.path.isfile(fpath):
with open(fpath, 'rb') as f:
pre_name, pre_data = pickle.load(f)
if pre_name != name:
global has_error
has_error += 1
LOG.e(f"The {rid} result name not match, {pre_name} != {name}")
self.rid -= 1
return
LOG.i(f"check {rid}:<{ex_name}{name}> ...")
self.check(ex_name+name, pre_data, data)
else:
with open(fpath, 'wb') as f:
pickle.dump((name, data), f)
LOG.i(f"save {rid}:<{name}> ok")
def record_params(self, parameters_dict):
if os.environ.get("use_auto_diff", '1') == '0':
return
rid = self.rid
self.rid += 1
global has_error
pps = {}
for k, v in parameters_dict.items():
if k.endswith("num_batches_tracked"):
continue
pps[k] = v
ps = { name:convert(param) for name, param in pps.items() }
fpath = os.path.join(self.base_path, f"{rid}-params.pkl")
if os.path.isfile(fpath):
with open(fpath, 'rb') as f:
prev_ps = pickle.load(f)
if len(prev_ps) != len(ps):
has_error += 1
LOG.e(f"Params len not match {len(prev_ps)} != {len(ps)}")
for k in ps:
a = ps[k]
if k not in prev_ps:
has_error += 1
LOG.e(f"prev param <{k}> not found.")
continue
b = prev_ps[k]
if a.shape != b.shape:
has_error += 1
LOG.e(f"Params <{k}> shape not match {a.shape} != {b.shape}")
continue
std_a, mean_a = a.std(), a.mean()
std_b, mean_b = b.std(), b.mean()
n = a.size
# law of large number
std_mean_a = (std_a+std_b)/2 / np.sqrt(n) + 1e-6
std_std_a = (std_a+std_b)/2 / np.sqrt((n-1)/2) + 1e-6
x = 4
if np.abs(mean_a - mean_b) > x * std_mean_a:
has_error += 1
LOG.e(f"param mean not match, mean_a:{mean_a}, mean_b:{mean_b}, acceptable range:({mean_a - x * std_mean_a}, {mean_a + x * std_mean_a}) name:{k} shape:{a.shape}")
elif np.abs(std_a - std_b) > x * std_std_a:
has_error += 1
LOG.e(f"param std not match, std_a:{std_a}, std_b:{std_b}, acceptable range:({std_a - x * std_std_a}, {std_a + x * std_std_a}) name:{k} shape:{a.shape}")
else:
LOG.i(f"check param ok: <{k}> shape:{a.shape}")
var = pps[k]
if hasattr(var, "copy_"):
import torch
var.data.copy_(torch.from_numpy(b))
else:
var.assign(b)
else:
with open(fpath, 'wb') as f:
pickle.dump(ps, f)
LOG.i(f"save params ok")
def hook_function(self, func):
name = func.__name__
def new_func(*args, **kw):
ret = func(*args, **kw)
self.record(name+".args", args)
self.record(name+".kw", kw)
self.record(name+".ret", ret)
return ret
return new_func
def hook_module(self, mod, mod_name=""):
if os.environ.get("use_auto_diff", '1') == '0':
return
if mod_name != "":
mod_name = "<" + mod_name + ">"
def forward_hook(self2, input, output, kw=None):
ex_name = '[' + self2.__class__.__name__ + ']'
if "relu" not in self2.__class__.__name__.lower():
# not test relu, because input may be inplaced
self.record(self2.__ad_mod_name__+".input", input, ex_name)
self.record(self2.__ad_mod_name__+".output", output, ex_name)
if kw is not None:
self.record(self2.__ad_mod_name__+".kw", kw, ex_name)
names = []
for name, module in mod.named_modules():
name = mod_name + name
module.__ad_mod_name__ = name
names.append(name)
module.register_forward_hook(forward_hook)
mod_class_name = module.__class__.__name__.lower()
# make dropout in eval mod and record dropout.p
if "dropout" in mod_class_name:
self.record(name+'.p', module.p, "["+mod_class_name+"]")
module.eval()
ps = { mod_name+k:v for k, v in mod.state_dict().items() }
self.record_params(ps)
self.record("module names", names)
def hook_optimizer(self, opt, opt_name=""):
'''
net = Model()
opt = optim.SGD(net.parameters(), 0.1)
hook.hook_optimizer(opt)
'''
if os.environ.get("use_auto_diff", '1') == '0':
return
origin_step = opt.step
ex_name = '['+opt.__class__.__name__+']'
def step_hook(*args, **kw):
origin_step(*args, **kw)
self.record(opt_name+".default", opt.defaults, ex_name)
gid = 0
n_params = 0
for pg in opt.param_groups:
for p in pg["params"]:
if hasattr(p, "is_stop_grad"):
if p.is_stop_grad():
continue
n_params += 1
else:
n_params += 1
self.record(opt_name+".n_params", n_params, ex_name)
for pg in opt.param_groups:
for i, p in reversed(list(enumerate(pg["params"]))):
if hasattr(p, "is_stop_grad"):
if p.is_stop_grad():
continue
self.record(f"{opt_name}.grads[{gid}]", pg["grads"][i], "["+p.name()+"]")
self.record(f"{opt_name}.params[{gid}]", p, "["+p.name()+"]")
gid += 1
else:
self.record(f"{opt_name}.grads[{gid}]", p.grad)
self.record(f"{opt_name}.params[{gid}]", p)
gid += 1
opt.step = step_hook
|
the-stack_106_24834
|
###_________ Introduction to automatic learning with scikit-learn __________###
# En los últimos tiempos habrás oído hablar de _machine learning_, _deep learning_,
# _reinforcement learning_, muchas más cosas que contienen la palabra _learning_ y,
# por supuesto, _Big Data_. Con los avances en capacidad de cálculo de los últimos
# años y la popularización de lenguajes de alto nivel, hemos entrado de lleno en
# la fiebre de hacer que las máquinas aprendan. En esta clase veremos cómo utilizar
# el paquete `scikit-learn` de Python para poder crear modelos predictivos a partir
# de nuestros datos de una manera rápida y sencilla.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(42)
# En primer lugar vamos a probar con un ejemplo muy sencillo: ajustar una recta
# a unos datos. Esto difícilmente se puede llamar _machine learning_, pero nos
# servirá para ver cómo es la forma de trabajar con `scikit-learn`, cómo se entrenan
# los modelos y cómo se calculan las predicciones.
# En primer lugar fabricamos unos datos distribuidos a lo largo de una recta con
# un poco de ruido:
def noisy_line(a=2.0, b=0.8, c=50):
x = np.random.randn(c)
y = a * x + b * np.random.randn(c)
return x, y
x, y = noisy_line()
plt.scatter(x,y)
plt.show()
# El proceso para usar `scikit-learn` es el siguiente:
# 1. Separar los datos en matriz de características `features` y variable a
# predecir `y`
# 2. Seleccionar el modelo
# 3. Elegir los hiperparámetros
# 4. Ajustar o entrenar el modelo (`model.fit`)
# 5. Predecir con datos nuevos (`model.predict`)
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True)
# Tenemos que hacer este `reshape` para transformar nuestro vector en una matriz
# de columnas. Rara vez tendremos que repetir este paso, puesto que en la práctica
# siempre tendremos varias variables.
features = x.reshape(-1, 1)
model.fit(features, y)
y_hat = model.predict(features)
# Para calcular el error, en el módulo `sklearn.metrics` tenemos varias funciones útiles:
from sklearn import metrics
abs_error = metrics.mean_absolute_error(y, y_hat)
# Predict with new data
x_new = np.linspace(x.min(), x.max(), 10)
y_pred = model.predict(x_new.reshape(-1, 1))
plt.scatter(x, y)
plt.plot(x_new, y_pred, 'k--')
plt.scatter(x_new, y_pred, marker='x', lw=3, zorder=10)
plt.fill_between(x_new, y_pred + abs_error, y_pred - abs_error, color="C0", alpha=0.3)
plt.show()
##______________ Quick Overview ______________##
# En aprendizaje automático tenemos dos tipos de problemas:
# * **Aprendizaje supervisado**, cuando tengo datos _etiquetados_, es decir:
# conozco la variable a predecir de un cierto número de observaciones.
# Pasándole esta información al algoritmo, este será capaz de predecir dicha
# variable cuando reciba observaciones nuevas. Dependiendo de la naturaleza
# de la variable a predecir, tendremos a su vez:
# - **Regresión**, si es continua (como el caso anterior), o
# - **Clasificación**, si es discreta o categórica (sí/no, color de ojos, etc)
# * **Aprendizaje no supervisado**, cuando no tenemos datos _etiquetados_ y por
# tanto no tengo ninguna información _a priori_. En este caso usaremos los
# algoritmos para descubrir patrones en los datos y agruparlos, pero tendremos
# que manualmente inspeccionar el resultado después y ver qué sentido podemos
# darle a esos grupos.
# En función de la naturaleza de nuestro problema, `scikit-learn` proporciona una
# gran variedad de algoritmos que podemos elegir.
##______________ Classification ______________##
from sklearn.datasets import load_digits
digits = load_digits()
print(digits["DESCR"])
# Ya tenemos los datos separados en matriz de características y vector de predicción.
# En este caso, tendré 64 = 8x8 características (un valor numérico por cada pixel
# de la imagen) y mi variable a predecir será el número en sí.
features, labels = digits.data, digits.target
# Para visualizar estas imágenes tendremos que hacer un `.reshape`:
num_ = features[42]
label_ = labels[42]
num_.reshape(8, 8).astype(int)
plt.figure(figsize=(2, 2))
plt.imshow(num_.reshape(8, 8), cmap=plt.cm.gray_r)
plt.show()
# Ten en cuenta que nosotros sabemos qué número es cada imagen porque somos humanos
# y podemos leerlas. El ordenador lo sabe porque están etiquetadas, pero ¿qué pasa
# si viene una imagen nueva? Para eso tendremos que construir un modelo de
# clasificación.
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(features, labels)
labels_hat = model.predict(features)
accuracy = metrics.accuracy_score(labels, labels_hat)
# ¡Parece que hemos acertado prácticamente todas! Más tarde volveremos sobre este
# porcentaje de éxito, que bien podría ser engañoso. De momento, representemos
# otra medida de éxito que es la matriz de confusión:
confusion_mat = metrics.confusion_matrix(labels, labels_hat)
plt.imshow(confusion_mat, cmap=plt.cm.Blues)
plt.show()
##__________ Clustering __________##
# Una vez que hemos visto los dos tipos de problemas supervisados, vamos a ver
# cómo se trabajan los problemas no supervisados. En primer lugar vamos a fabricar
# dos nubes de puntos usando la función `make_blobs`:
from sklearn.datasets import make_blobs
features, labels = make_blobs(centers=[[6, 0], [2, -1]], random_state=0)
plt.scatter(features[:, 0], features[:, 1], c=labels, cmap = plt.cm.Spectral)
plt.show()
# Hemos creado dos grupos y algunos puntos se solapan, pero ¿qué pasaría si no
# tuviésemos esta información visual? Vamos a emplear un modelo de clustering
# para agrupar los datos:
from sklearn.cluster import KMeans
model = KMeans()
# por defecto tenemos 8 clusters. Veamos qué ocurre: #can change with n_clusters=2
model.fit(features)
centroids = model.cluster_centers_
labels_pred = model.predict(features)
xmin, xmax = features[:, 0].min(), features[:, 0].max()
ymin, ymax = features[:, 1].min(), features[:, 1].max()
# Y ahora preparamos el código para representar todas las regiones:
xx, yy = np.meshgrid(
np.linspace(xmin, xmax),
np.linspace(ymin, ymax)
)
mesh = np.c_[xx.ravel(), yy.ravel()]
Z = model.predict(mesh)
plt.pcolormesh(xx, yy, Z.reshape(xx.shape))
plt.scatter(features[:, 0], features[:, 1], marker='x', color='k')#c=labels_pred)
plt.scatter(centroids[:, 0], centroids[:, 1], marker='+', color='r', lw=2)
plt.show()
##______________ Dimensionality Reduction ______________##
# Vamos a rescatar nuestro dataset de los dígitos y tratar de visualizarlo en
# dos dimensiones, lo que se conoce como _reducción de dimensionalidad_.
from sklearn.manifold import Isomap
model = Isomap(n_components=2)
model.fit(digits.data)
# Y ahora proyectamos los datos usando .transform:
digits_proj = model.transform(digits.data)
plt.scatter(digits_proj[:, 0], digits_proj[:, 1],
c=digits.target, cmap=plt.cm.Spectral, alpha=0.5)
plt.colorbar()
plt.gca(aspect=1)
plt.show()
##______________ Exercise ______________##
# 1. Visualiza el dataset de las flores (`load_iris`) utilizando las funciones
# que tienes más abajo. ¿Hay alguna forma clara de separar las tres especies
# de flores?
# 2. Separa el dataset en matriz de características `features` y vector de
# etiquetas `labels`. Conviértelos a arrays de NumPy usando `.as_matrix()`.
# 3. Reduce la dimensionalidad del dataset a 2 usando `sklearn.manifold.Isomap`
# o `sklearn.decomposition.PCA` y usa un algoritmo de clustering con 3 clusters.
# ¿Se parecen los clusters que aparecen a los grupos originales?
# 4. Predice el tipo de flor usando un algoritmo de clasificación. Visualiza la
# matriz de confusión. ¿Cuál es el porcentaje de aciertos del algoritmo? ¿Es
# más certero en algún tipo de flor en concreto? ¿Concuerda esto con lo que
# pensaste en el apartado 1?
import pandas as pd
def load_iris_df():
from sklearn.datasets import load_iris
iris = load_iris()
features, labels = iris.data, iris.target
df = pd.DataFrame(features, columns=iris.feature_names)
df["species"] = pd.Categorical.from_codes(iris.target, categories=iris.target_names)
#df = df.replace({'species': {0: iris.target_names[0], 1: iris.target_names[1], 2: iris.target_names[2]}})
return df
iris_df = load_iris_df()
from pandas.plotting import scatter_matrix
_ = scatter_matrix(iris_df, c=iris_df["species"].cat.codes, figsize=(10, 10))
|
the-stack_106_24835
|
import textwrap
from conan.tools.cmake.base import CMakeToolchainBase
class CMakeiOSToolchain(CMakeToolchainBase):
_toolchain_tpl = textwrap.dedent("""
{% extends 'base_toolchain' %}
{% block before_try_compile %}
{{ super() }}
# set cmake vars
set(CMAKE_SYSTEM_NAME {{ CMAKE_SYSTEM_NAME }})
set(CMAKE_SYSTEM_VERSION {{ CMAKE_SYSTEM_VERSION }})
set(DEPLOYMENT_TARGET ${CONAN_SETTINGS_HOST_MIN_OS_VERSION})
# Set the architectures for which to build.
set(CMAKE_OSX_ARCHITECTURES {{ CMAKE_OSX_ARCHITECTURES }})
# Setting CMAKE_OSX_SYSROOT SDK, when using Xcode generator the name is enough
# but full path is necessary for others
set(CMAKE_OSX_SYSROOT {{ CMAKE_OSX_SYSROOT }})
if(NOT DEFINED CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM)
set(CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM "123456789A" CACHE INTERNAL "")
endif()
{% endblock %}
{% block main %}
{{ super() }}
{% if shared_libs -%}
message(STATUS "Conan toolchain: Setting BUILD_SHARED_LIBS= {{ shared_libs }}")
set(BUILD_SHARED_LIBS {{ shared_libs }})
{%- endif %}
{% if parallel -%}
set(CONAN_CXX_FLAGS "${CONAN_CXX_FLAGS} {{ parallel }}")
set(CONAN_C_FLAGS "${CONAN_C_FLAGS} {{ parallel }}")
{%- endif %}
{% if cppstd -%}
message(STATUS "Conan C++ Standard {{ cppstd }} with extensions {{ cppstd_extensions }}}")
set(CMAKE_CXX_STANDARD {{ cppstd }})
set(CMAKE_CXX_EXTENSIONS {{ cppstd_extensions }})
{%- endif %}
set(CMAKE_CXX_FLAGS_INIT "${CONAN_CXX_FLAGS}" CACHE STRING "" FORCE)
set(CMAKE_C_FLAGS_INIT "${CONAN_C_FLAGS}" CACHE STRING "" FORCE)
set(CMAKE_SHARED_LINKER_FLAGS_INIT "${CONAN_SHARED_LINKER_FLAGS}" CACHE STRING "" FORCE)
set(CMAKE_EXE_LINKER_FLAGS_INIT "${CONAN_EXE_LINKER_FLAGS}" CACHE STRING "" FORCE)
{% endblock %}
""")
def __init__(self, conanfile, build_type=None, **kwargs):
super(CMakeiOSToolchain, self).__init__(conanfile, build_type=build_type, **kwargs)
self.build_type = build_type or self._conanfile.settings.get_safe("build_type")
self.host_architecture = self._get_architecture()
self.host_os = self._conanfile.settings.get_safe("os")
self.host_os_version = self._conanfile.settings.get_safe("os.version")
self.host_sdk_name = self._apple_sdk_name()
# TODO: Discuss how to handle CMAKE_OSX_DEPLOYMENT_TARGET to set min-version
# add a setting? check an option and if not present set a default?
# default to os.version?
def _get_templates(self):
templates = super(CMakeiOSToolchain, self)._get_templates()
templates.update({
CMakeToolchainBase.filename: self._toolchain_tpl,
})
return templates
def _get_architecture(self):
# check valid combinations of architecture - os ?
# for iOS a FAT library valid for simulator and device
# can be generated if multiple archs are specified:
# "-DCMAKE_OSX_ARCHITECTURES=armv7;armv7s;arm64;i386;x86_64"
arch = self._conanfile.settings.get_safe("arch")
return {"x86": "i386",
"x86_64": "x86_64",
"armv8": "arm64",
"armv8_32": "arm64_32"}.get(arch, arch)
# TODO: refactor, comes from conans.client.tools.apple.py
def _apple_sdk_name(self):
"""returns proper SDK name suitable for OS and architecture
we're building for (considering simulators)"""
arch = self._conanfile.settings.get_safe('arch')
os_ = self._conanfile.settings.get_safe('os')
if str(arch).startswith('x86'):
return {'Macos': 'macosx',
'iOS': 'iphonesimulator',
'watchOS': 'watchsimulator',
'tvOS': 'appletvsimulator'}.get(str(os_))
else:
return {'Macos': 'macosx',
'iOS': 'iphoneos',
'watchOS': 'watchos',
'tvOS': 'appletvos'}.get(str(os_), None)
def _get_template_context_data(self):
ctxt_toolchain = super(CMakeiOSToolchain, self)._get_template_context_data()
ctxt_toolchain.update({
"CMAKE_OSX_ARCHITECTURES": self.host_architecture,
"CMAKE_SYSTEM_NAME": self.host_os,
"CMAKE_SYSTEM_VERSION": self.host_os_version,
"CMAKE_OSX_SYSROOT": self.host_sdk_name
})
return ctxt_toolchain
|
the-stack_106_24836
|
import io
import htic
CVT = "cvt{ \n 0 cvt0 \n 10 cvt1 \n 20 cvt2 \n}"
FLAGS = "flags{ x 1 \n m 11110 \n}"
FPGM = "fpgm{ FDEF 0 func0 val \n POP \n ENDF \n FDEF 1 func1 val \n POP \n ENDF \n}"
FPGMPARAMS = "fpgm{ FDEF 0 func0 val pt cvt func stor \n POP \n POP \n POP \n POP \n POP \n ENDF \n}"
def getData(code):
parser = htic.parser.Parser()
return parser.parse(io.BytesIO(code))
def toBytes(instructions, precode="", name="A"):
data = getData(precode + name + "{" + instructions + "\n}")
translator = htic.translator.BinaryTranslator()
if name == "prep":
return translator.translate(data.prep)
elif name == "fpgm":
return translator.translate(data.fpgm)
else:
return translator.translate(data.glyphs[name])
|
the-stack_106_24839
|
from cv2 import cv2
import time
import pyautogui
import numpy as np
import mss
from os import listdir
# from run import getBackgroundText
import torch
from random import randint
# example_captcha_img = cv2.imread('images/example.png')
model = torch.hub.load('./captcha', 'custom', "captcha/bomb_captcha.pt", source='local')
def getBackgroundText(img, percent_required):
boxes = []
if type(img) == np.ndarray and percent_required:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = model(img, size=416)
digits = []
if results.xyxy[0].shape[0] >= 1:
for box in results.xyxy[0]:
x1, _, _, _, percent, digit = box
if percent >= percent_required:
digits.append({'x':x1.item(), 'd':digit.item()})
def getX(e):
return e['x']
digits.sort(key=getX)
def getD(e):
return str(int(e['d']))
return ''.join(list(map(getD, digits)))
def remove_suffix(input_string, suffix):
if suffix and input_string.endswith(suffix):
return input_string[:-len(suffix)]
return input_string
#TODO tirar duplicata
def load_images():
dir_name = './captcha/images/'
file_names = listdir(dir_name)
targets = {}
for file in file_names:
path = dir_name + file
targets[remove_suffix(file, '.png')] = cv2.imread(path)
return targets
d = load_images()
#TODO tirar duplicata
def positions(target, threshold=0.85,img = None):
if img is None:
img = printSreen()
result = cv2.matchTemplate(img,target,cv2.TM_CCOEFF_NORMED)
w = target.shape[1]
h = target.shape[0]
yloc, xloc = np.where(result >= threshold)
rectangles = []
for (x, y) in zip(xloc, yloc):
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.2)
return rectangles
def getDigits(d,img):
digits = []
for i in range(10):
p = positions(d[str(i)],img=img,threshold=0.95)
if len (p) > 0:
digits.append({'digit':str(i),'x':p[0][0]})
def getX(e):
return e['x']
digits.sort(key=getX)
r = list(map(lambda x : x['digit'],digits))
return(''.join(r))
# getFirstDigits(first)
def printSreen():
with mss.mss() as sct:
monitor = sct.monitors[0]
sct_img = np.array(sct.grab(monitor))
# The screen part to capture
# monitor = {"top": 160, "left": 160, "width": 1000, "height": 135}
# Grab the data
return sct_img[:,:,:3]
def captchaImg(img, pos,w = 500, h = 180):
# path = "./captchas-saved/{}.png".format(str(time.time()))
rx, ry, _, _ = pos
x_offset = -10
y_offset = 89
y = ry + y_offset
x = rx + x_offset
cropped = img[ y : y + h , x: x + w]
return cropped
def position(target, threshold=0.85,img = None):
if img is None:
img = printSreen()
result = cv2.matchTemplate(img,target,cv2.TM_CCOEFF_NORMED)
w = target.shape[1]
h = target.shape[0]
yloc, xloc = np.where(result >= threshold)
rectangles = []
for (x, y) in zip(xloc, yloc):
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.2)
if len(rectangles) > 0:
x,y, w,h = rectangles[0]
return (x+(w/2),y+h/2)
def getSliderPositions(screenshot, popup_pos):
slider = position(d['slider'],img=screenshot,threshold=0.8)
cont = int()
while slider is None:
if cont == 10:
break
print('no slider')
slider = position(d['slider'],img=screenshot,threshold=0.78)
time.sleep(5)
cont += 1
(start_x, start_y) = slider
pyautogui.moveTo(start_x,start_y+randint(0,10),1)
pyautogui.mouseDown()
pyautogui.moveTo(start_x+400,start_y+randint(0,10),1)
screenshot = printSreen()
end = position(d['slider'],img=screenshot,threshold = 0.8)
(end_x, end_y) = end
size = end_x-start_x
increment = size/4
positions = []
for i in range(5):
# pyautogui.moveTo(start_x+increment*pos ,start_y+randint(0,10),1)
positions.append((start_x+increment*i ,start_y+randint(0,10)))
# screenshot = printSreen()
# time.sleep(2)
# pyautogui.mouseUp()
return positions
def solveCaptcha():
screenshot = printSreen()
img = screenshot.copy()
popup_pos = positions(d['robot'],img=img)
print(popup_pos)
if len(popup_pos) == 0:
print('no captcha popup found!')
return
screenshot = printSreen()
img = screenshot.copy()
img = captchaImg(img, popup_pos[0])
digits = getDigits(d, img)
slider_positions = getSliderPositions(screenshot, popup_pos)
# moveSlider(screenshot,3,popup_pos)
for position in slider_positions:
x, y = position
pyautogui.moveTo(x,y,1)
screenshot = printSreen()
popup_pos = positions(d['robot'],img=screenshot)
captcha_img = captchaImg(screenshot, popup_pos[0])
# captcha_img = example_captcha_img
background_digits = getBackgroundText(captcha_img, 0.7)
print( 'dig: {}, background_digits: {}'.format(digits, background_digits))
if digits == background_digits:
print('FOUND!')
pyautogui.mouseUp()
return
else:
pyautogui.mouseUp()
print('NÃO ACHOU!')
if __name__ == '__main__':
solveCaptcha()
#TODO colocar positions em um arquivo separado e importar nos outros.
# tirar o load digits daqui e passar como argumento na funçao
|
the-stack_106_24840
|
from theano import gof
class TypedListType(gof.Type):
def __init__(self, ttype, depth=0):
"""
:Parameters:
-'ttype' : Type of theano variable this list
will contains, can be another list.
-'depth' : Optionnal parameters, any value
above 0 will create a nested list of this
depth. (0-based)
"""
if depth < 0:
raise ValueError('Please specify a depth superior or'
'equal to 0')
if not isinstance(ttype, gof.Type):
raise TypeError('Expected a Theano Type')
if depth == 0:
self.ttype = ttype
else:
self.ttype = TypedListType(ttype, depth - 1)
def filter(self, x, strict=False, allow_downcast=None):
"""
:Parameters:
-'x' : value to filter
-'strict' : if true, only native python list will be accepted
-'allow_downcast' : does not have any utility at the moment
"""
if strict:
if not isinstance(x, list):
raise TypeError('Expected a python list')
else:
x = [self.ttype.filter(y) for y in x]
if all(self.ttype.is_valid_value(y) for y in x):
return x
else:
raise TypeError('Expected all elements to'
' be %s' % str(self.ttype))
def __eq__(self, other):
"""
two list are equals if they contains the same type.
"""
return type(self) == type(other) and self.ttype == other.ttype
def __hash__(self):
return gof.hashtype(self) ^ hash(self.ttype)
def __str__(self):
return 'TypedList <' + str(self.ttype) + '>'
def get_depth(self):
"""
utilitary function to get the 0 based
level of the list
"""
if isinstance(self.ttype, TypedListType):
return self.ttype.get_depth() + 1
else:
return 0
def values_eq(self, a, b):
if not len(a) == len(b):
return False
for x in range(len(a)):
if not self.ttype.values_eq(a[x], b[x]):
return False
return True
def may_share_memory(self, a, b):
if a is b:
return True
# As a list contain other element, if a or b isn't a list, we
# still need to check if that element is contained in the
# other list.
if not isinstance(a, list):
a = [a]
if not isinstance(b, list):
b = [b]
for idx1 in range(len(a)):
for idx2 in range(len(b)):
if self.ttype.may_share_memory(a[idx1], b[idx2]):
return True
def c_declare(self, name, sub, check_input=True):
return """
PyListObject* %(name)s;
""" % dict(name=name)
def c_init(self, name, sub):
return """
%(name)s = NULL;
""" % dict(name=name)
def c_extract(self, name, sub, check_input=True):
if check_input:
pre = """
if (!PyList_Check(py_%(name)s)) {
PyErr_SetString(PyExc_TypeError, "expected a list");
%(fail)s
}""" % dict(name=name, fail=sub['fail'])
else:
pre = ""
return pre + """
%(name)s = (PyListObject*) (py_%(name)s);
""" % dict(name=name, fail=sub['fail'])
def c_sync(self, name, sub):
return """
Py_XDECREF(py_%(name)s);
py_%(name)s = (PyObject*)(%(name)s);
Py_INCREF(py_%(name)s);
""" % dict(name=name)
def c_cleanup(self, name, sub):
return ""
def c_code_cache_version(self):
return (2,)
dtype = property(lambda self: self.ttype)
ndim = property(lambda self: self.ttype.ndim + 1)
|
the-stack_106_24841
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
__all__ = ["get_reg_loss"]
def sigmoid_focal_loss(logits, labels, weights, gamma=2.0, alpha=0.25):
sce_loss = fluid.layers.sigmoid_cross_entropy_with_logits(logits, labels)
prob = fluid.layers.sigmoid(logits)
p_t = labels * prob + (1.0 - labels) * (1.0 - prob)
modulating_factor = fluid.layers.pow(1.0 - p_t, gamma)
alpha_weight_factor = labels * alpha + (1.0 - labels) * (1.0 - alpha)
return modulating_factor * alpha_weight_factor * sce_loss * weights
def get_reg_loss(pred_reg, reg_label, fg_mask, point_num, loc_scope,
loc_bin_size, num_head_bin, anchor_size,
get_xz_fine=True, get_y_by_bin=False, loc_y_scope=0.5,
loc_y_bin_size=0.25, get_ry_fine=False):
"""
Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.
:param pred_reg: (N, C)
:param reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]
:param loc_scope: constant
:param loc_bin_size: constant
:param num_head_bin: constant
:param anchor_size: (N, 3) or (3)
:param get_xz_fine:
:param get_y_by_bin:
:param loc_y_scope:
:param loc_y_bin_size:
:param get_ry_fine:
:return:
"""
fg_num = fluid.layers.cast(fluid.layers.reduce_sum(fg_mask), dtype=pred_reg.dtype)
fg_num = fluid.layers.clip(fg_num, min=1.0, max=point_num)
fg_scale = float(point_num) / fg_num
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
reg_loss_dict = {}
# xz localization loss
x_offset_label, y_offset_label, z_offset_label = reg_label[:, 0:1], reg_label[:, 1:2], reg_label[:, 2:3]
x_shift = fluid.layers.clip(x_offset_label + loc_scope, 0., loc_scope * 2 - 1e-3)
z_shift = fluid.layers.clip(z_offset_label + loc_scope, 0., loc_scope * 2 - 1e-3)
x_bin_label = fluid.layers.cast(x_shift / loc_bin_size, dtype='int64')
z_bin_label = fluid.layers.cast(z_shift / loc_bin_size, dtype='int64')
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
loss_x_bin = fluid.layers.softmax_with_cross_entropy(pred_reg[:, x_bin_l: x_bin_r], x_bin_label)
loss_x_bin = fluid.layers.reduce_mean(loss_x_bin * fg_mask) * fg_scale
loss_z_bin = fluid.layers.softmax_with_cross_entropy(pred_reg[:, z_bin_l: z_bin_r], z_bin_label)
loss_z_bin = fluid.layers.reduce_mean(loss_z_bin * fg_mask) * fg_scale
reg_loss_dict['loss_x_bin'] = loss_x_bin
reg_loss_dict['loss_z_bin'] = loss_z_bin
loc_loss = loss_x_bin + loss_z_bin
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_label = x_shift - (fluid.layers.cast(x_bin_label, dtype=x_shift.dtype) * loc_bin_size + loc_bin_size / 2.)
z_res_label = z_shift - (fluid.layers.cast(z_bin_label, dtype=z_shift.dtype) * loc_bin_size + loc_bin_size / 2.)
x_res_norm_label = x_res_label / loc_bin_size
z_res_norm_label = z_res_label / loc_bin_size
x_bin_onehot = fluid.layers.one_hot(x_bin_label, depth=per_loc_bin_num)
z_bin_onehot = fluid.layers.one_hot(z_bin_label, depth=per_loc_bin_num)
loss_x_res = fluid.layers.smooth_l1(fluid.layers.reduce_sum(pred_reg[:, x_res_l: x_res_r] * x_bin_onehot, dim=1, keep_dim=True), x_res_norm_label)
loss_x_res = fluid.layers.reduce_mean(loss_x_res * fg_mask) * fg_scale
loss_z_res = fluid.layers.smooth_l1(fluid.layers.reduce_sum(pred_reg[:, z_res_l: z_res_r] * z_bin_onehot, dim=1, keep_dim=True), z_res_norm_label)
loss_z_res = fluid.layers.reduce_mean(loss_z_res * fg_mask) * fg_scale
reg_loss_dict['loss_x_res'] = loss_x_res
reg_loss_dict['loss_z_res'] = loss_z_res
loc_loss += loss_x_res + loss_z_res
# y localization loss
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_shift = fluid.layers.clip(y_offset_label + loc_y_scope, 0., loc_y_scope * 2 - 1e-3)
y_bin_label = fluid.layers.cast(y_shift / loc_y_bin_size, dtype='int64')
y_res_label = y_shift - (fluid.layers.cast(y_bin_label, dtype=y_shift.dtype) * loc_y_bin_size + loc_y_bin_size / 2.)
y_res_norm_label = y_res_label / loc_y_bin_size
y_bin_onehot = fluid.layers.one_hot(y_bin_label, depth=per_loc_bin_num)
loss_y_bin = fluid.layers.cross_entropy(pred_reg[:, y_bin_l: y_bin_r], y_bin_label)
loss_y_bin = fluid.layers.reduce_mean(loss_y_bin * fg_mask) * fg_scale
loss_y_res = fluid.layers.smooth_l1(fluid.layers.reduce_sum(pred_reg[:, y_res_l: y_res_r] * y_bin_onehot, dim=1, keep_dim=True), y_res_norm_label)
loss_y_res = fluid.layers.reduce_mean(loss_y_res * fg_mask) * fg_scale
reg_loss_dict['loss_y_bin'] = loss_y_bin
reg_loss_dict['loss_y_res'] = loss_y_res
loc_loss += loss_y_bin + loss_y_res
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
loss_y_offset = fluid.layers.smooth_l1(fluid.layers.reduce_sum(pred_reg[:, y_offset_l: y_offset_r], dim=1, keep_dim=True), y_offset_label)
loss_y_offset = fluid.layers.reduce_mean(loss_y_offset * fg_mask) * fg_scale
reg_loss_dict['loss_y_offset'] = loss_y_offset
loc_loss += loss_y_offset
# angle loss
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_label = reg_label[:, 6:7]
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
opposite_flag = fluid.layers.logical_and(ry_label > np.pi * 0.5, ry_label < np.pi * 1.5)
opposite_flag = fluid.layers.cast(opposite_flag, dtype=ry_label.dtype)
shift_angle = (ry_label + opposite_flag * np.pi + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle.stop_gradient = True
shift_angle = fluid.layers.clip(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = fluid.layers.cast(shift_angle / angle_per_class, dtype='int64')
ry_res_label = shift_angle - (fluid.layers.cast(ry_bin_label, dtype=shift_angle.dtype) * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
shift_angle.stop_gradient = True
ry_bin_label = fluid.layers.cast(shift_angle / angle_per_class, dtype='int64')
ry_res_label = shift_angle - (fluid.layers.cast(ry_bin_label, dtype=shift_angle.dtype) * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
ry_bin_onehot = fluid.layers.one_hot(ry_bin_label, depth=num_head_bin)
loss_ry_bin = fluid.layers.softmax_with_cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)
loss_ry_bin = fluid.layers.reduce_mean(loss_ry_bin * fg_mask) * fg_scale
loss_ry_res = fluid.layers.smooth_l1(fluid.layers.reduce_sum(pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot, dim=1, keep_dim=True), ry_res_norm_label)
loss_ry_res = fluid.layers.reduce_mean(loss_ry_res * fg_mask) * fg_scale
reg_loss_dict['loss_ry_bin'] = loss_ry_bin
reg_loss_dict['loss_ry_res'] = loss_ry_res
angle_loss = loss_ry_bin + loss_ry_res
# size loss
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert pred_reg.shape[1] == size_res_r, '%d vs %d' % (pred_reg.shape[1], size_res_r)
anchor_size_var = fluid.layers.zeros(shape=[3], dtype=reg_label.dtype)
fluid.layers.assign(np.array(anchor_size).astype('float32'), anchor_size_var)
size_res_norm_label = (reg_label[:, 3:6] - anchor_size_var) / anchor_size_var
size_res_norm_label = fluid.layers.reshape(size_res_norm_label, shape=[-1, 1], inplace=True)
size_res_norm = pred_reg[:, size_res_l:size_res_r]
size_res_norm = fluid.layers.reshape(size_res_norm, shape=[-1, 1], inplace=True)
size_loss = fluid.layers.smooth_l1(size_res_norm, size_res_norm_label)
size_loss = fluid.layers.reduce_mean(fluid.layers.reshape(size_loss, [-1, 3]) * fg_mask) * fg_scale
# Total regression loss
reg_loss_dict['loss_loc'] = loc_loss
reg_loss_dict['loss_angle'] = angle_loss
reg_loss_dict['loss_size'] = size_loss
return loc_loss, angle_loss, size_loss, reg_loss_dict
|
the-stack_106_24842
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import sys
def get_locale(name):
'''Returns an appropriate :class:`Locale <locale.Locale>` corresponding
to an inpute locale name.
:param name: the name of the locale.
'''
locale_cls = _locales.get(name.lower())
if locale_cls is None:
raise ValueError('Unsupported locale \'{0}\''.format(name))
return locale_cls()
# base locale type.
class Locale(object):
''' Represents locale-specific data and functionality. '''
names = []
timeframes = {
'now': '',
'seconds': '',
'minute': '',
'minutes': '',
'hour': '',
'hours': '',
'day': '',
'days': '',
'month': '',
'months': '',
'year': '',
'years': '',
}
meridians = {
'am': '',
'pm': '',
'AM': '',
'PM': '',
}
past = None
future = None
month_names = []
month_abbreviations = []
day_names = []
day_abbreviations = []
ordinal_day_re = r'(\d+)'
def __init__(self):
self._month_name_to_ordinal = None
def describe(self, timeframe, delta=0, only_distance=False):
''' Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
'''
humanized = self._format_timeframe(timeframe, delta)
if not only_distance:
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
def day_name(self, day):
''' Returns the day name for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
'''
return self.day_names[day]
def day_abbreviation(self, day):
''' Returns the day abbreviation for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
'''
return self.day_abbreviations[day]
def month_name(self, month):
''' Returns the month name for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
'''
return self.month_names[month]
def month_abbreviation(self, month):
''' Returns the month abbreviation for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
'''
return self.month_abbreviations[month]
def month_number(self, name):
''' Returns the month number for a month specified by name or abbreviation.
:param name: the month name or abbreviation.
'''
if self._month_name_to_ordinal is None:
self._month_name_to_ordinal = self._name_to_ordinal(self.month_names)
self._month_name_to_ordinal.update(self._name_to_ordinal(self.month_abbreviations))
return self._month_name_to_ordinal.get(name)
def year_full(self, year):
''' Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
'''
return '{0:04d}'.format(year)
def year_abbreviation(self, year):
''' Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
'''
return '{0:04d}'.format(year)[2:]
def meridian(self, hour, token):
''' Returns the meridian indicator for a specified hour and format token.
:param hour: the ``int`` hour of the day.
:param token: the format token.
'''
if token == 'a':
return self.meridians['am'] if hour < 12 else self.meridians['pm']
if token == 'A':
return self.meridians['AM'] if hour < 12 else self.meridians['PM']
def ordinal_number(self, n):
''' Returns the ordinal format of a given integer
:param n: an integer
'''
return self._ordinal_number(n)
def _ordinal_number(self, n):
return '{0}'.format(n)
def _name_to_ordinal(self, lst):
return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:])))
def _format_timeframe(self, timeframe, delta):
return self.timeframes[timeframe].format(abs(delta))
def _format_relative(self, humanized, timeframe, delta):
if timeframe == 'now':
return humanized
direction = self.past if delta < 0 else self.future
return direction.format(humanized)
# base locale type implementations.
class EnglishLocale(Locale):
names = ['en', 'en_us', 'en_gb', 'en_au', 'en_be', 'en_jp', 'en_za', 'en_ca']
past = '{0} ago'
future = 'in {0}'
timeframes = {
'now': 'just now',
'seconds': 'seconds',
'minute': 'a minute',
'minutes': '{0} minutes',
'hour': 'an hour',
'hours': '{0} hours',
'day': 'a day',
'days': '{0} days',
'month': 'a month',
'months': '{0} months',
'year': 'a year',
'years': '{0} years',
}
meridians = {
'am': 'am',
'pm': 'pm',
'AM': 'AM',
'PM': 'PM',
}
month_names = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
month_abbreviations = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
day_names = ['', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
day_abbreviations = ['', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
ordinal_day_re = r'((?P<value>[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))'
def _ordinal_number(self, n):
if n % 100 not in (11, 12, 13):
remainder = abs(n) % 10
if remainder == 1:
return '{0}st'.format(n)
elif remainder == 2:
return '{0}nd'.format(n)
elif remainder == 3:
return '{0}rd'.format(n)
return '{0}th'.format(n)
class ItalianLocale(Locale):
names = ['it', 'it_it']
past = '{0} fa'
future = 'tra {0}'
timeframes = {
'now': 'adesso',
'seconds': 'qualche secondo',
'minute': 'un minuto',
'minutes': '{0} minuti',
'hour': 'un\'ora',
'hours': '{0} ore',
'day': 'un giorno',
'days': '{0} giorni',
'month': 'un mese',
'months': '{0} mesi',
'year': 'un anno',
'years': '{0} anni',
}
month_names = ['', 'gennaio', 'febbraio', 'marzo', 'aprile', 'maggio', 'giugno', 'luglio',
'agosto', 'settembre', 'ottobre', 'novembre', 'dicembre']
month_abbreviations = ['', 'gen', 'feb', 'mar', 'apr', 'mag', 'giu', 'lug', 'ago',
'set', 'ott', 'nov', 'dic']
day_names = ['', 'lunedì', 'martedì', 'mercoledì', 'giovedì', 'venerdì', 'sabato', 'domenica']
day_abbreviations = ['', 'lun', 'mar', 'mer', 'gio', 'ven', 'sab', 'dom']
ordinal_day_re = r'((?P<value>[1-3]?[0-9](?=°))°)'
def _ordinal_number(self, n):
return '{0}°'.format(n)
class SpanishLocale(Locale):
names = ['es', 'es_es']
past = 'hace {0}'
future = 'en {0}'
timeframes = {
'now': 'ahora',
'seconds': 'segundos',
'minute': 'un minuto',
'minutes': '{0} minutos',
'hour': 'una hora',
'hours': '{0} horas',
'day': 'un día',
'days': '{0} días',
'month': 'un mes',
'months': '{0} meses',
'year': 'un año',
'years': '{0} años',
}
month_names = ['', 'enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio',
'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre']
month_abbreviations = ['', 'ene', 'feb', 'mar', 'abr', 'may', 'jun', 'jul', 'ago',
'sep', 'oct', 'nov', 'dic']
day_names = ['', 'lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo']
day_abbreviations = ['', 'lun', 'mar', 'mie', 'jue', 'vie', 'sab', 'dom']
ordinal_day_re = r'((?P<value>[1-3]?[0-9](?=°))°)'
def _ordinal_number(self, n):
return '{0}°'.format(n)
class FrenchLocale(Locale):
names = ['fr', 'fr_fr']
past = 'il y a {0}'
future = 'dans {0}'
timeframes = {
'now': 'maintenant',
'seconds': 'quelques secondes',
'minute': 'une minute',
'minutes': '{0} minutes',
'hour': 'une heure',
'hours': '{0} heures',
'day': 'un jour',
'days': '{0} jours',
'month': 'un mois',
'months': '{0} mois',
'year': 'un an',
'years': '{0} ans',
}
month_names = ['', 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet',
'août', 'septembre', 'octobre', 'novembre', 'décembre']
month_abbreviations = ['', 'janv', 'févr', 'mars', 'avr', 'mai', 'juin', 'juil', 'août',
'sept', 'oct', 'nov', 'déc']
day_names = ['', 'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche']
day_abbreviations = ['', 'lun', 'mar', 'mer', 'jeu', 'ven', 'sam', 'dim']
ordinal_day_re = r'((?P<value>\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)'
def _ordinal_number(self, n):
if abs(n) == 1:
return '{0}er'.format(n)
return '{0}e'.format(n)
class GreekLocale(Locale):
names = ['el', 'el_gr']
past = '{0} πριν'
future = 'σε {0}'
timeframes = {
'now': 'τώρα',
'seconds': 'δευτερόλεπτα',
'minute': 'ένα λεπτό',
'minutes': '{0} λεπτά',
'hour': 'μια ώρα',
'hours': '{0} ώρες',
'day': 'μια μέρα',
'days': '{0} μέρες',
'month': 'ένα μήνα',
'months': '{0} μήνες',
'year': 'ένα χρόνο',
'years': '{0} χρόνια',
}
month_names = ['', 'Ιανουαρίου', 'Φεβρουαρίου', 'Μαρτίου', 'Απριλίου', 'Μαΐου', 'Ιουνίου',
'Ιουλίου', 'Αυγούστου', 'Σεπτεμβρίου', 'Οκτωβρίου', 'Νοεμβρίου', 'Δεκεμβρίου']
month_abbreviations = ['', 'Ιαν', 'Φεβ', 'Μαρ', 'Απρ', 'Μαϊ', 'Ιον', 'Ιολ', 'Αυγ',
'Σεπ', 'Οκτ', 'Νοε', 'Δεκ']
day_names = ['', 'Δευτέρα', 'Τρίτη', 'Τετάρτη', 'Πέμπτη', 'Παρασκευή', 'Σάββατο', 'Κυριακή']
day_abbreviations = ['', 'Δευ', 'Τρι', 'Τετ', 'Πεμ', 'Παρ', 'Σαβ', 'Κυρ']
class JapaneseLocale(Locale):
names = ['ja', 'ja_jp']
past = '{0}前'
future = '{0}後'
timeframes = {
'now': '現在',
'seconds': '数秒',
'minute': '1分',
'minutes': '{0}分',
'hour': '1時間',
'hours': '{0}時間',
'day': '1日',
'days': '{0}日',
'month': '1ヶ月',
'months': '{0}ヶ月',
'year': '1年',
'years': '{0}年',
}
month_names = ['', '1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月',
'9月', '10月', '11月', '12月']
month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8',
' 9', '10', '11', '12']
day_names = ['', '月曜日', '火曜日', '水曜日', '木曜日', '金曜日', '土曜日', '日曜日']
day_abbreviations = ['', '月', '火', '水', '木', '金', '土', '日']
class SwedishLocale(Locale):
names = ['sv', 'sv_se']
past = 'för {0} sen'
future = 'om {0}'
timeframes = {
'now': 'just nu',
'seconds': 'några sekunder',
'minute': 'en minut',
'minutes': '{0} minuter',
'hour': 'en timme',
'hours': '{0} timmar',
'day': 'en dag',
'days': '{0} dagar',
'month': 'en månad',
'months': '{0} månader',
'year': 'ett år',
'years': '{0} år',
}
month_names = ['', 'januari', 'februari', 'mars', 'april', 'maj', 'juni', 'juli',
'augusti', 'september', 'oktober', 'november', 'december']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun', 'jul',
'aug', 'sep', 'okt', 'nov', 'dec']
day_names = ['', 'måndag', 'tisdag', 'onsdag', 'torsdag', 'fredag', 'lördag', 'söndag']
day_abbreviations = ['', 'mån', 'tis', 'ons', 'tor', 'fre', 'lör', 'sön']
class FinnishLocale(Locale):
names = ['fi', 'fi_fi']
# The finnish grammar is very complex, and its hard to convert
# 1-to-1 to something like English.
past = '{0} sitten'
future = '{0} kuluttua'
timeframes = {
'now': ['juuri nyt', 'juuri nyt'],
'seconds': ['muutama sekunti', 'muutaman sekunnin'],
'minute': ['minuutti', 'minuutin'],
'minutes': ['{0} minuuttia', '{0} minuutin'],
'hour': ['tunti', 'tunnin'],
'hours': ['{0} tuntia', '{0} tunnin'],
'day': ['päivä', 'päivä'],
'days': ['{0} päivää', '{0} päivän'],
'month': ['kuukausi', 'kuukauden'],
'months': ['{0} kuukautta', '{0} kuukauden'],
'year': ['vuosi', 'vuoden'],
'years': ['{0} vuotta', '{0} vuoden'],
}
# Months and days are lowercase in Finnish
month_names = ['', 'tammikuu', 'helmikuu', 'maaliskuu', 'huhtikuu',
'toukokuu', 'kesäkuu', 'heinäkuu', 'elokuu',
'syyskuu', 'lokakuu', 'marraskuu', 'joulukuu']
month_abbreviations = ['', 'tammi', 'helmi', 'maalis', 'huhti',
'touko', 'kesä', 'heinä', 'elo',
'syys', 'loka', 'marras', 'joulu']
day_names = ['', 'maanantai', 'tiistai', 'keskiviikko', 'torstai',
'perjantai', 'lauantai', 'sunnuntai']
day_abbreviations = ['', 'ma', 'ti', 'ke', 'to', 'pe', 'la', 'su']
def _format_timeframe(self, timeframe, delta):
return (self.timeframes[timeframe][0].format(abs(delta)),
self.timeframes[timeframe][1].format(abs(delta)))
def _format_relative(self, humanized, timeframe, delta):
if timeframe == 'now':
return humanized[0]
direction = self.past if delta < 0 else self.future
which = 0 if delta < 0 else 1
return direction.format(humanized[which])
def _ordinal_number(self, n):
return '{0}.'.format(n)
class ChineseCNLocale(Locale):
names = ['zh', 'zh_cn']
past = '{0}前'
future = '{0}后'
timeframes = {
'now': '刚才',
'seconds': '几秒',
'minute': '1分钟',
'minutes': '{0}分钟',
'hour': '1小时',
'hours': '{0}小时',
'day': '1天',
'days': '{0}天',
'month': '1个月',
'months': '{0}个月',
'year': '1年',
'years': '{0}年',
}
month_names = ['', '一月', '二月', '三月', '四月', '五月', '六月', '七月',
'八月', '九月', '十月', '十一月', '十二月']
month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8',
' 9', '10', '11', '12']
day_names = ['', '星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日']
day_abbreviations = ['', '一', '二', '三', '四', '五', '六', '日']
class ChineseTWLocale(Locale):
names = ['zh_tw']
past = '{0}前'
future = '{0}後'
timeframes = {
'now': '剛才',
'seconds': '幾秒',
'minute': '1分鐘',
'minutes': '{0}分鐘',
'hour': '1小時',
'hours': '{0}小時',
'day': '1天',
'days': '{0}天',
'month': '1個月',
'months': '{0}個月',
'year': '1年',
'years': '{0}年',
}
month_names = ['', '1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月',
'9月', '10月', '11月', '12月']
month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8',
' 9', '10', '11', '12']
day_names = ['', '周一', '周二', '周三', '周四', '周五', '周六', '周日']
day_abbreviations = ['', '一', '二', '三', '四', '五', '六', '日']
class KoreanLocale(Locale):
names = ['ko', 'ko_kr']
past = '{0} 전'
future = '{0} 후'
timeframes = {
'now': '지금',
'seconds': '몇초',
'minute': '일 분',
'minutes': '{0}분',
'hour': '1시간',
'hours': '{0}시간',
'day': '1일',
'days': '{0}일',
'month': '1개월',
'months': '{0}개월',
'year': '1년',
'years': '{0}년',
}
month_names = ['', '1월', '2월', '3월', '4월', '5월', '6월', '7월', '8월',
'9월', '10월', '11월', '12월']
month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8',
' 9', '10', '11', '12']
day_names = ['', '월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']
day_abbreviations = ['', '월', '화', '수', '목', '금', '토', '일']
# derived locale types & implementations.
class DutchLocale(Locale):
names = ['nl', 'nl_nl']
past = '{0} geleden'
future = 'over {0}'
timeframes = {
'now': 'nu',
'seconds': 'seconden',
'minute': 'een minuut',
'minutes': '{0} minuten',
'hour': 'een uur',
'hours': '{0} uur',
'day': 'een dag',
'days': '{0} dagen',
'month': 'een maand',
'months': '{0} maanden',
'year': 'een jaar',
'years': '{0} jaar',
}
# In Dutch names of months and days are not starting with a capital letter
# like in the English language.
month_names = ['', 'januari', 'februari', 'maart', 'april', 'mei', 'juni', 'juli',
'augustus', 'september', 'oktober', 'november', 'december']
month_abbreviations = ['', 'jan', 'feb', 'mrt', 'apr', 'mei', 'jun', 'jul', 'aug',
'sep', 'okt', 'nov', 'dec']
day_names = ['', 'maandag', 'dinsdag', 'woensdag', 'donderdag', 'vrijdag', 'zaterdag', 'zondag']
day_abbreviations = ['', 'ma', 'di', 'wo', 'do', 'vr', 'za', 'zo']
class SlavicBaseLocale(Locale):
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, list):
if delta % 10 == 1 and delta % 100 != 11:
form = form[0]
elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[1]
else:
form = form[2]
return form.format(delta)
class BelarusianLocale(SlavicBaseLocale):
names = ['be', 'be_by']
past = '{0} таму'
future = 'праз {0}'
timeframes = {
'now': 'зараз',
'seconds': 'некалькі секунд',
'minute': 'хвіліну',
'minutes': ['{0} хвіліну', '{0} хвіліны', '{0} хвілін'],
'hour': 'гадзіну',
'hours': ['{0} гадзіну', '{0} гадзіны', '{0} гадзін'],
'day': 'дзень',
'days': ['{0} дзень', '{0} дні', '{0} дзён'],
'month': 'месяц',
'months': ['{0} месяц', '{0} месяцы', '{0} месяцаў'],
'year': 'год',
'years': ['{0} год', '{0} гады', '{0} гадоў'],
}
month_names = ['', 'студзеня', 'лютага', 'сакавіка', 'красавіка', 'траўня', 'чэрвеня',
'ліпеня', 'жніўня', 'верасня', 'кастрычніка', 'лістапада', 'снежня']
month_abbreviations = ['', 'студ', 'лют', 'сак', 'крас', 'трав', 'чэрв', 'ліп', 'жнів',
'вер', 'каст', 'ліст', 'снеж']
day_names = ['', 'панядзелак', 'аўторак', 'серада', 'чацвер', 'пятніца', 'субота', 'нядзеля']
day_abbreviations = ['', 'пн', 'ат', 'ср', 'чц', 'пт', 'сб', 'нд']
class PolishLocale(SlavicBaseLocale):
names = ['pl', 'pl_pl']
past = '{0} temu'
future = 'za {0}'
timeframes = {
'now': 'teraz',
'seconds': 'kilka sekund',
'minute': 'minutę',
'minutes': ['{0} minut', '{0} minuty', '{0} minut'],
'hour': 'godzina',
'hours': ['{0} godzin', '{0} godziny', '{0} godzin'],
'day': 'dzień',
'days': ['{0} dzień', '{0} dni', '{0} dni'],
'month': 'miesiąc',
'months': ['{0} miesiąc', '{0} miesiące', '{0} miesięcy'],
'year': 'rok',
'years': ['{0} rok', '{0} lata', '{0} lat'],
}
month_names = ['', 'styczeń', 'luty', 'marzec', 'kwiecień', 'maj',
'czerwiec', 'lipiec', 'sierpień', 'wrzesień', 'październik',
'listopad', 'grudzień']
month_abbreviations = ['', 'sty', 'lut', 'mar', 'kwi', 'maj', 'cze', 'lip',
'sie', 'wrz', 'paź', 'lis', 'gru']
day_names = ['', 'poniedziałek', 'wtorek', 'środa', 'czwartek', 'piątek',
'sobota', 'niedziela']
day_abbreviations = ['', 'Pn', 'Wt', 'Śr', 'Czw', 'Pt', 'So', 'Nd']
class RussianLocale(SlavicBaseLocale):
names = ['ru', 'ru_ru']
past = '{0} назад'
future = 'через {0}'
timeframes = {
'now': 'сейчас',
'seconds': 'несколько секунд',
'minute': 'минуту',
'minutes': ['{0} минуту', '{0} минуты', '{0} минут'],
'hour': 'час',
'hours': ['{0} час', '{0} часа', '{0} часов'],
'day': 'день',
'days': ['{0} день', '{0} дня', '{0} дней'],
'month': 'месяц',
'months': ['{0} месяц', '{0} месяца', '{0} месяцев'],
'year': 'год',
'years': ['{0} год', '{0} года', '{0} лет'],
}
month_names = ['', 'января', 'февраля', 'марта', 'апреля', 'мая', 'июня',
'июля', 'августа', 'сентября', 'октября', 'ноября', 'декабря']
month_abbreviations = ['', 'янв', 'фев', 'мар', 'апр', 'май', 'июн', 'июл',
'авг', 'сен', 'окт', 'ноя', 'дек']
day_names = ['', 'понедельник', 'вторник', 'среда', 'четверг', 'пятница',
'суббота', 'воскресенье']
day_abbreviations = ['', 'пн', 'вт', 'ср', 'чт', 'пт', 'сб', 'вс']
class BulgarianLocale(SlavicBaseLocale):
names = ['bg', 'bg_BG']
past = '{0} назад'
future = 'напред {0}'
timeframes = {
'now': 'сега',
'seconds': 'няколко секунди',
'minute': 'минута',
'minutes': ['{0} минута', '{0} минути', '{0} минути'],
'hour': 'час',
'hours': ['{0} час', '{0} часа', '{0} часа'],
'day': 'ден',
'days': ['{0} ден', '{0} дни', '{0} дни'],
'month': 'месец',
'months': ['{0} месец', '{0} месеца', '{0} месеца'],
'year': 'година',
'years': ['{0} година', '{0} години', '{0} години'],
}
month_names = ['', 'януари', 'февруари', 'март', 'април', 'май', 'юни',
'юли', 'август', 'септември', 'октомври', 'ноември', 'декември']
month_abbreviations = ['', 'ян', 'февр', 'март', 'апр', 'май', 'юни', 'юли',
'авг', 'септ', 'окт', 'ноем', 'дек']
day_names = ['', 'понеделник', 'вторник', 'сряда', 'четвъртък', 'петък',
'събота', 'неделя']
day_abbreviations = ['', 'пон', 'вт', 'ср', 'четв', 'пет', 'съб', 'нед']
class UkrainianLocale(SlavicBaseLocale):
names = ['ua', 'uk_ua']
past = '{0} тому'
future = 'за {0}'
timeframes = {
'now': 'зараз',
'seconds': 'кілька секунд',
'minute': 'хвилину',
'minutes': ['{0} хвилину', '{0} хвилини', '{0} хвилин'],
'hour': 'годину',
'hours': ['{0} годину', '{0} години', '{0} годин'],
'day': 'день',
'days': ['{0} день', '{0} дні', '{0} днів'],
'month': 'місяць',
'months': ['{0} місяць', '{0} місяці', '{0} місяців'],
'year': 'рік',
'years': ['{0} рік', '{0} роки', '{0} років'],
}
month_names = ['', 'січня', 'лютого', 'березня', 'квітня', 'травня', 'червня',
'липня', 'серпня', 'вересня', 'жовтня', 'листопада', 'грудня']
month_abbreviations = ['', 'січ', 'лют', 'бер', 'квіт', 'трав', 'черв', 'лип', 'серп',
'вер', 'жовт', 'лист', 'груд']
day_names = ['', 'понеділок', 'вівторок', 'середа', 'четвер', 'п’ятниця', 'субота', 'неділя']
day_abbreviations = ['', 'пн', 'вт', 'ср', 'чт', 'пт', 'сб', 'нд']
class _DeutschLocaleCommonMixin(object):
past = 'vor {0}'
future = 'in {0}'
timeframes = {
'now': 'gerade eben',
'seconds': 'Sekunden',
'minute': 'einer Minute',
'minutes': '{0} Minuten',
'hour': 'einer Stunde',
'hours': '{0} Stunden',
'day': 'einem Tag',
'days': '{0} Tagen',
'month': 'einem Monat',
'months': '{0} Monaten',
'year': 'einem Jahr',
'years': '{0} Jahren',
}
month_names = [
'', 'Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli',
'August', 'September', 'Oktober', 'November', 'Dezember'
]
month_abbreviations = [
'', 'Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep',
'Okt', 'Nov', 'Dez'
]
day_names = [
'', 'Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',
'Samstag', 'Sonntag'
]
day_abbreviations = [
'', 'Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa', 'So'
]
def _ordinal_number(self, n):
return '{0}.'.format(n)
class GermanLocale(_DeutschLocaleCommonMixin, Locale):
names = ['de', 'de_de']
timeframes = _DeutschLocaleCommonMixin.timeframes.copy()
timeframes['days'] = '{0} Tagen'
class AustriaLocale(_DeutschLocaleCommonMixin, Locale):
names = ['de', 'de_at']
timeframes = _DeutschLocaleCommonMixin.timeframes.copy()
timeframes['days'] = '{0} Tage'
class NorwegianLocale(Locale):
names = ['nb', 'nb_no']
past = 'for {0} siden'
future = 'om {0}'
timeframes = {
'now': 'nå nettopp',
'seconds': 'noen sekunder',
'minute': 'ett minutt',
'minutes': '{0} minutter',
'hour': 'en time',
'hours': '{0} timer',
'day': 'en dag',
'days': '{0} dager',
'month': 'en måned',
'months': '{0} måneder',
'year': 'ett år',
'years': '{0} år',
}
month_names = ['', 'januar', 'februar', 'mars', 'april', 'mai', 'juni',
'juli', 'august', 'september', 'oktober', 'november',
'desember']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul',
'aug', 'sep', 'okt', 'nov', 'des']
day_names = ['', 'mandag', 'tirsdag', 'onsdag', 'torsdag', 'fredag',
'lørdag', 'søndag']
day_abbreviations = ['', 'ma', 'ti', 'on', 'to', 'fr', 'lø', 'sø']
class NewNorwegianLocale(Locale):
names = ['nn', 'nn_no']
past = 'for {0} sidan'
future = 'om {0}'
timeframes = {
'now': 'no nettopp',
'seconds': 'nokre sekund',
'minute': 'ett minutt',
'minutes': '{0} minutt',
'hour': 'ein time',
'hours': '{0} timar',
'day': 'ein dag',
'days': '{0} dagar',
'month': 'en månad',
'months': '{0} månader',
'year': 'eit år',
'years': '{0} år',
}
month_names = ['', 'januar', 'februar', 'mars', 'april', 'mai', 'juni',
'juli', 'august', 'september', 'oktober', 'november',
'desember']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul',
'aug', 'sep', 'okt', 'nov', 'des']
day_names = ['', 'måndag', 'tysdag', 'onsdag', 'torsdag', 'fredag',
'laurdag', 'sundag']
day_abbreviations = ['', 'må', 'ty', 'on', 'to', 'fr', 'la', 'su']
class PortugueseLocale(Locale):
names = ['pt', 'pt_pt']
past = 'há {0}'
future = 'em {0}'
timeframes = {
'now': 'agora',
'seconds': 'segundos',
'minute': 'um minuto',
'minutes': '{0} minutos',
'hour': 'uma hora',
'hours': '{0} horas',
'day': 'um dia',
'days': '{0} dias',
'month': 'um mês',
'months': '{0} meses',
'year': 'um ano',
'years': '{0} anos',
}
month_names = ['', 'janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho',
'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']
month_abbreviations = ['', 'jan', 'fev', 'mar', 'abr', 'maio', 'jun', 'jul', 'ago',
'set', 'out', 'nov', 'dez']
day_names = ['', 'segunda-feira', 'terça-feira', 'quarta-feira', 'quinta-feira', 'sexta-feira',
'sábado', 'domingo']
day_abbreviations = ['', 'seg', 'ter', 'qua', 'qui', 'sex', 'sab', 'dom']
class BrazilianPortugueseLocale(PortugueseLocale):
names = ['pt_br']
past = 'fazem {0}'
class TagalogLocale(Locale):
names = ['tl']
past = 'nakaraang {0}'
future = '{0} mula ngayon'
timeframes = {
'now': 'ngayon lang',
'seconds': 'segundo',
'minute': 'isang minuto',
'minutes': '{0} minuto',
'hour': 'isang oras',
'hours': '{0} oras',
'day': 'isang araw',
'days': '{0} araw',
'month': 'isang buwan',
'months': '{0} buwan',
'year': 'isang taon',
'years': '{0} taon',
}
month_names = ['', 'Enero', 'Pebrero', 'Marso', 'Abril', 'Mayo', 'Hunyo', 'Hulyo',
'Agosto', 'Setyembre', 'Oktubre', 'Nobyembre', 'Disyembre']
month_abbreviations = ['', 'Ene', 'Peb', 'Mar', 'Abr', 'May', 'Hun', 'Hul', 'Ago',
'Set', 'Okt', 'Nob', 'Dis']
day_names = ['', 'Lunes', 'Martes', 'Miyerkules', 'Huwebes', 'Biyernes', 'Sabado', 'Linggo']
day_abbreviations = ['', 'Lun', 'Mar', 'Miy', 'Huw', 'Biy', 'Sab', 'Lin']
class VietnameseLocale(Locale):
names = ['vi', 'vi_vn']
past = '{0} trước'
future = '{0} nữa'
timeframes = {
'now': 'hiện tại',
'seconds': 'giây',
'minute': 'một phút',
'minutes': '{0} phút',
'hour': 'một giờ',
'hours': '{0} giờ',
'day': 'một ngày',
'days': '{0} ngày',
'month': 'một tháng',
'months': '{0} tháng',
'year': 'một năm',
'years': '{0} năm',
}
month_names = ['', 'Tháng Một', 'Tháng Hai', 'Tháng Ba', 'Tháng Tư', 'Tháng Năm', 'Tháng Sáu', 'Tháng Bảy',
'Tháng Tám', 'Tháng Chín', 'Tháng Mười', 'Tháng Mười Một', 'Tháng Mười Hai']
month_abbreviations = ['', 'Tháng 1', 'Tháng 2', 'Tháng 3', 'Tháng 4', 'Tháng 5', 'Tháng 6', 'Tháng 7', 'Tháng 8',
'Tháng 9', 'Tháng 10', 'Tháng 11', 'Tháng 12']
day_names = ['', 'Thứ Hai', 'Thứ Ba', 'Thứ Tư', 'Thứ Năm', 'Thứ Sáu', 'Thứ Bảy', 'Chủ Nhật']
day_abbreviations = ['', 'Thứ 2', 'Thứ 3', 'Thứ 4', 'Thứ 5', 'Thứ 6', 'Thứ 7', 'CN']
class TurkishLocale(Locale):
names = ['tr', 'tr_tr']
past = '{0} önce'
future = '{0} sonra'
timeframes = {
'now': 'şimdi',
'seconds': 'saniye',
'minute': 'bir dakika',
'minutes': '{0} dakika',
'hour': 'bir saat',
'hours': '{0} saat',
'day': 'bir gün',
'days': '{0} gün',
'month': 'bir ay',
'months': '{0} ay',
'year': 'a yıl',
'years': '{0} yıl',
}
month_names = ['', 'Ocak', 'Şubat', 'Mart', 'Nisan', 'Mayıs', 'Haziran', 'Temmuz',
'Ağustos', 'Eylül', 'Ekim', 'Kasım', 'Aralık']
month_abbreviations = ['', 'Oca', 'Şub', 'Mar', 'Nis', 'May', 'Haz', 'Tem', 'Ağu',
'Eyl', 'Eki', 'Kas', 'Ara']
day_names = ['', 'Pazartesi', 'Salı', 'Çarşamba', 'Perşembe', 'Cuma', 'Cumartesi', 'Pazar']
day_abbreviations = ['', 'Pzt', 'Sal', 'Çar', 'Per', 'Cum', 'Cmt', 'Paz']
class ArabicLocale(Locale):
names = ['ar', 'ar_eg']
past = 'منذ {0}'
future = 'خلال {0}'
timeframes = {
'now': 'الآن',
'seconds': 'ثوان',
'minute': 'دقيقة',
'minutes': '{0} دقائق',
'hour': 'ساعة',
'hours': '{0} ساعات',
'day': 'يوم',
'days': '{0} أيام',
'month': 'شهر',
'months': '{0} شهور',
'year': 'سنة',
'years': '{0} سنوات',
}
month_names = ['', 'يناير', 'فبراير', 'مارس', 'أبريل', 'مايو', 'يونيو', 'يوليو',
'أغسطس', 'سبتمبر', 'أكتوبر', 'نوفمبر', 'ديسمبر']
month_abbreviations = ['', 'يناير', 'فبراير', 'مارس', 'أبريل', 'مايو', 'يونيو', 'يوليو',
'أغسطس', 'سبتمبر', 'أكتوبر', 'نوفمبر', 'ديسمبر']
day_names = ['', 'الاثنين', 'الثلاثاء', 'الأربعاء', 'الخميس', 'الجمعة', 'السبت', 'الأحد']
day_abbreviations = ['', 'اثنين', 'ثلاثاء', 'أربعاء', 'خميس', 'جمعة', 'سبت', 'أحد']
class IcelandicLocale(Locale):
def _format_timeframe(self, timeframe, delta):
timeframe = self.timeframes[timeframe]
if delta < 0:
timeframe = timeframe[0]
elif delta > 0:
timeframe = timeframe[1]
return timeframe.format(abs(delta))
names = ['is', 'is_is']
past = 'fyrir {0} síðan'
future = 'eftir {0}'
timeframes = {
'now': 'rétt í þessu',
'seconds': ('nokkrum sekúndum', 'nokkrar sekúndur'),
'minute': ('einni mínútu', 'eina mínútu'),
'minutes': ('{0} mínútum', '{0} mínútur'),
'hour': ('einum tíma', 'einn tíma'),
'hours': ('{0} tímum', '{0} tíma'),
'day': ('einum degi', 'einn dag'),
'days': ('{0} dögum', '{0} daga'),
'month': ('einum mánuði', 'einn mánuð'),
'months': ('{0} mánuðum', '{0} mánuði'),
'year': ('einu ári', 'eitt ár'),
'years': ('{0} árum', '{0} ár'),
}
meridians = {
'am': 'f.h.',
'pm': 'e.h.',
'AM': 'f.h.',
'PM': 'e.h.',
}
month_names = ['', 'janúar', 'febrúar', 'mars', 'apríl', 'maí', 'júní',
'júlí', 'ágúst', 'september', 'október', 'nóvember', 'desember']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maí', 'jún',
'júl', 'ágú', 'sep', 'okt', 'nóv', 'des']
day_names = ['', 'mánudagur', 'þriðjudagur', 'miðvikudagur', 'fimmtudagur',
'föstudagur', 'laugardagur', 'sunnudagur']
day_abbreviations = ['', 'mán', 'þri', 'mið', 'fim', 'fös', 'lau', 'sun']
class DanishLocale(Locale):
names = ['da', 'da_dk']
past = 'for {0} siden'
future = 'efter {0}'
timeframes = {
'now': 'lige nu',
'seconds': 'et par sekunder',
'minute': 'et minut',
'minutes': '{0} minutter',
'hour': 'en time',
'hours': '{0} timer',
'day': 'en dag',
'days': '{0} dage',
'month': 'en måned',
'months': '{0} måneder',
'year': 'et år',
'years': '{0} år',
}
month_names = ['', 'januar', 'februar', 'marts', 'april', 'maj', 'juni',
'juli', 'august', 'september', 'oktober', 'november', 'december']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun',
'jul', 'aug', 'sep', 'okt', 'nov', 'dec']
day_names = ['', 'mandag', 'tirsdag', 'onsdag', 'torsdag', 'fredag',
'lørdag', 'søndag']
day_abbreviations = ['', 'man', 'tir', 'ons', 'tor', 'fre', 'lør', 'søn']
class MalayalamLocale(Locale):
names = ['ml']
past = '{0} മുമ്പ്'
future = '{0} ശേഷം'
timeframes = {
'now': 'ഇപ്പോൾ',
'seconds': 'സെക്കന്റ്',
'minute': 'ഒരു മിനിറ്റ്',
'minutes': '{0} മിനിറ്റ്',
'hour': 'ഒരു മണിക്കൂർ',
'hours': '{0} മണിക്കൂർ',
'day': 'ഒരു ദിവസം ',
'days': '{0} ദിവസം ',
'month': 'ഒരു മാസം ',
'months': '{0} മാസം ',
'year': 'ഒരു വർഷം ',
'years': '{0} വർഷം ',
}
meridians = {
'am': 'രാവിലെ',
'pm': 'ഉച്ചക്ക് ശേഷം',
'AM': 'രാവിലെ',
'PM': 'ഉച്ചക്ക് ശേഷം',
}
month_names = ['', 'ജനുവരി', 'ഫെബ്രുവരി', 'മാർച്ച്', 'ഏപ്രിൽ ', 'മെയ് ', 'ജൂണ്', 'ജൂലൈ',
'ഓഗസ്റ്റ്', 'സെപ്റ്റംബർ', 'ഒക്ടോബർ', 'നവംബർ', 'ഡിസംബർ']
month_abbreviations = ['', 'ജനു', 'ഫെബ് ', 'മാർ', 'ഏപ്രിൽ', 'മേയ്', 'ജൂണ്', 'ജൂലൈ', 'ഓഗസ്റ',
'സെപ്റ്റ', 'ഒക്ടോ', 'നവം', 'ഡിസം']
day_names = ['', 'തിങ്കള്', 'ചൊവ്വ', 'ബുധന്', 'വ്യാഴം', 'വെള്ളി', 'ശനി', 'ഞായര്']
day_abbreviations = ['', 'തിങ്കള്', 'ചൊവ്വ', 'ബുധന്', 'വ്യാഴം', 'വെള്ളി', 'ശനി', 'ഞായര്']
class HindiLocale(Locale):
names = ['hi']
past = '{0} पहले'
future = '{0} बाद'
timeframes = {
'now': 'अभि',
'seconds': 'सेकंड्',
'minute': 'एक मिनट ',
'minutes': '{0} मिनट ',
'hour': 'एक घंट',
'hours': '{0} घंटे',
'day': 'एक दिन',
'days': '{0} दिन',
'month': 'एक माह ',
'months': '{0} महीने ',
'year': 'एक वर्ष ',
'years': '{0} साल ',
}
meridians = {
'am': 'सुबह',
'pm': 'शाम',
'AM': 'सुबह',
'PM': 'शाम',
}
month_names = ['', 'जनवरी', 'फ़रवरी', 'मार्च', 'अप्रैल ', 'मई', 'जून', 'जुलाई',
'आगस्त', 'सितम्बर', 'अकतूबर', 'नवेम्बर', 'दिसम्बर']
month_abbreviations = ['', 'जन', 'फ़र', 'मार्च', 'अप्रै', 'मई', 'जून', 'जुलाई', 'आग',
'सित', 'अकत', 'नवे', 'दिस']
day_names = ['', 'सोमवार', 'मंगलवार', 'बुधवार', 'गुरुवार', 'शुक्रवार', 'शनिवार', 'रविवार']
day_abbreviations = ['', 'सोम', 'मंगल', 'बुध', 'गुरुवार', 'शुक्र', 'शनि', 'रवि']
class CzechLocale(Locale):
names = ['cs', 'cs_cz']
timeframes = {
'now': 'Teď',
'seconds': {
'past': '{0} sekundami',
'future': ['{0} sekundy', '{0} sekund']
},
'minute': {'past': 'minutou', 'future': 'minutu', 'zero': '{0} minut'},
'minutes': {
'past': '{0} minutami',
'future': ['{0} minuty', '{0} minut']
},
'hour': {'past': 'hodinou', 'future': 'hodinu', 'zero': '{0} hodin'},
'hours': {
'past': '{0} hodinami',
'future': ['{0} hodiny', '{0} hodin']
},
'day': {'past': 'dnem', 'future': 'den', 'zero': '{0} dnů'},
'days': {
'past': '{0} dny',
'future': ['{0} dny', '{0} dnů']
},
'month': {'past': 'měsícem', 'future': 'měsíc', 'zero': '{0} měsíců'},
'months': {
'past': '{0} měsíci',
'future': ['{0} měsíce', '{0} měsíců']
},
'year': {'past': 'rokem', 'future': 'rok', 'zero': '{0} let'},
'years': {
'past': '{0} lety',
'future': ['{0} roky', '{0} let']
}
}
past = 'Před {0}'
future = 'Za {0}'
month_names = ['', 'leden', 'únor', 'březen', 'duben', 'květen', 'červen',
'červenec', 'srpen', 'září', 'říjen', 'listopad', 'prosinec']
month_abbreviations = ['', 'led', 'úno', 'bře', 'dub', 'kvě', 'čvn', 'čvc',
'srp', 'zář', 'říj', 'lis', 'pro']
day_names = ['', 'pondělí', 'úterý', 'středa', 'čtvrtek', 'pátek',
'sobota', 'neděle']
day_abbreviations = ['', 'po', 'út', 'st', 'čt', 'pá', 'so', 'ne']
def _format_timeframe(self, timeframe, delta):
'''Czech aware time frame format function, takes into account the differences between past and future forms.'''
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta == 0:
form = form['zero'] # And *never* use 0 in the singular!
elif delta > 0:
form = form['future']
else:
form = form['past']
delta = abs(delta)
if isinstance(form, list):
if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[0]
else:
form = form[1]
return form.format(delta)
class FarsiLocale(Locale):
names = ['fa', 'fa_ir']
past = '{0} قبل'
future = 'در {0}'
timeframes = {
'now': 'اکنون',
'seconds': 'ثانیه',
'minute': 'یک دقیقه',
'minutes': '{0} دقیقه',
'hour': 'یک ساعت',
'hours': '{0} ساعت',
'day': 'یک روز',
'days': '{0} روز',
'month': 'یک ماه',
'months': '{0} ماه',
'year': 'یک سال',
'years': '{0} سال',
}
meridians = {
'am': 'قبل از ظهر',
'pm': 'بعد از ظهر',
'AM': 'قبل از ظهر',
'PM': 'بعد از ظهر',
}
month_names = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
month_abbreviations = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
day_names = ['', 'دو شنبه', 'سه شنبه', 'چهارشنبه', 'پنجشنبه', 'جمعه', 'شنبه', 'یکشنبه']
day_abbreviations = ['', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
class MacedonianLocale(Locale):
names = ['mk', 'mk_mk']
past = 'пред {0}'
future = 'за {0}'
timeframes = {
'now': 'сега',
'seconds': 'секунди',
'minute': 'една минута',
'minutes': '{0} минути',
'hour': 'еден саат',
'hours': '{0} саати',
'day': 'еден ден',
'days': '{0} дена',
'month': 'еден месец',
'months': '{0} месеци',
'year': 'една година',
'years': '{0} години',
}
meridians = {
'am': 'дп',
'pm': 'пп',
'AM': 'претпладне',
'PM': 'попладне',
}
month_names = ['', 'Јануари', 'Февруари', 'Март', 'Април', 'Мај', 'Јуни', 'Јули', 'Август', 'Септември', 'Октомври',
'Ноември', 'Декември']
month_abbreviations = ['', 'Јан.', ' Фев.', ' Мар.', ' Апр.', ' Мај', ' Јун.', ' Јул.', ' Авг.', ' Септ.', ' Окт.',
' Ноем.', ' Декем.']
day_names = ['', 'Понеделник', ' Вторник', ' Среда', ' Четврток', ' Петок', ' Сабота', ' Недела']
day_abbreviations = ['', 'Пон.', ' Вт.', ' Сре.', ' Чет.', ' Пет.', ' Саб.', ' Нед.']
class HebrewLocale(Locale):
names = ['he', 'he_IL']
past = 'לפני {0}'
future = 'בעוד {0}'
timeframes = {
'now': 'הרגע',
'seconds': 'שניות',
'minute': 'דקה',
'minutes': '{0} דקות',
'hour': 'שעה',
'hours': '{0} שעות',
'2-hours': 'שעתיים',
'day': 'יום',
'days': '{0} ימים',
'2-days': 'יומיים',
'month': 'חודש',
'months': '{0} חודשים',
'2-months': 'חודשיים',
'year': 'שנה',
'years': '{0} שנים',
'2-years': 'שנתיים',
}
meridians = {
'am': 'לפנ"צ',
'pm': 'אחר"צ',
'AM': 'לפני הצהריים',
'PM': 'אחרי הצהריים',
}
month_names = ['', 'ינואר', 'פברואר', 'מרץ', 'אפריל', 'מאי', 'יוני', 'יולי',
'אוגוסט', 'ספטמבר', 'אוקטובר', 'נובמבר', 'דצמבר']
month_abbreviations = ['', 'ינו׳', 'פבר׳', 'מרץ', 'אפר׳', 'מאי', 'יוני', 'יולי', 'אוג׳',
'ספט׳', 'אוק׳', 'נוב׳', 'דצמ׳']
day_names = ['', 'שני', 'שלישי', 'רביעי', 'חמישי', 'שישי', 'שבת', 'ראשון']
day_abbreviations = ['', 'ב׳', 'ג׳', 'ד׳', 'ה׳', 'ו׳', 'ש׳', 'א׳']
def _format_timeframe(self, timeframe, delta):
'''Hebrew couple of <timeframe> aware'''
couple = '2-{0}'.format(timeframe)
if abs(delta) == 2 and couple in self.timeframes:
return self.timeframes[couple].format(abs(delta))
else:
return self.timeframes[timeframe].format(abs(delta))
class MarathiLocale(Locale):
names = ['mr']
past = '{0} आधी'
future = '{0} नंतर'
timeframes = {
'now': 'सद्य',
'seconds': 'सेकंद',
'minute': 'एक मिनिट ',
'minutes': '{0} मिनिट ',
'hour': 'एक तास',
'hours': '{0} तास',
'day': 'एक दिवस',
'days': '{0} दिवस',
'month': 'एक महिना ',
'months': '{0} महिने ',
'year': 'एक वर्ष ',
'years': '{0} वर्ष ',
}
meridians = {
'am': 'सकाळ',
'pm': 'संध्याकाळ',
'AM': 'सकाळ',
'PM': 'संध्याकाळ',
}
month_names = ['', 'जानेवारी', 'फेब्रुवारी', 'मार्च', 'एप्रिल', 'मे', 'जून', 'जुलै',
'अॉगस्ट', 'सप्टेंबर', 'अॉक्टोबर', 'नोव्हेंबर', 'डिसेंबर']
month_abbreviations = ['', 'जान', 'फेब्रु', 'मार्च', 'एप्रि', 'मे', 'जून', 'जुलै', 'अॉग',
'सप्टें', 'अॉक्टो', 'नोव्हें', 'डिसें']
day_names = ['', 'सोमवार', 'मंगळवार', 'बुधवार', 'गुरुवार', 'शुक्रवार', 'शनिवार', 'रविवार']
day_abbreviations = ['', 'सोम', 'मंगळ', 'बुध', 'गुरु', 'शुक्र', 'शनि', 'रवि']
def _map_locales():
locales = {}
for cls_name, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if issubclass(cls, Locale):
for name in cls.names:
locales[name.lower()] = cls
return locales
class CatalanLocale(Locale):
names = ['ca', 'ca_es', 'ca_ad', 'ca_fr', 'ca_it']
past = 'Fa {0}'
future = 'En {0}'
timeframes = {
'now': 'Ara mateix',
'seconds': 'segons',
'minute': '1 minut',
'minutes': '{0} minuts',
'hour': 'una hora',
'hours': '{0} hores',
'day': 'un dia',
'days': '{0} dies',
'month': 'un mes',
'months': '{0} mesos',
'year': 'un any',
'years': '{0} anys',
}
month_names = ['', 'Gener', 'Febrer', 'Març', 'Abril', 'Maig', 'Juny', 'Juliol', 'Agost', 'Setembre', 'Octubre', 'Novembre', 'Desembre']
month_abbreviations = ['', 'Gener', 'Febrer', 'Març', 'Abril', 'Maig', 'Juny', 'Juliol', 'Agost', 'Setembre', 'Octubre', 'Novembre', 'Desembre']
day_names = ['', 'Dilluns', 'Dimarts', 'Dimecres', 'Dijous', 'Divendres', 'Dissabte', 'Diumenge']
day_abbreviations = ['', 'Dilluns', 'Dimarts', 'Dimecres', 'Dijous', 'Divendres', 'Dissabte', 'Diumenge']
class BasqueLocale(Locale):
names = ['eu', 'eu_eu']
past = 'duela {0}'
future = '{0}' # I don't know what's the right phrase in Basque for the future.
timeframes = {
'now': 'Orain',
'seconds': 'segundu',
'minute': 'minutu bat',
'minutes': '{0} minutu',
'hour': 'ordu bat',
'hours': '{0} ordu',
'day': 'egun bat',
'days': '{0} egun',
'month': 'hilabete bat',
'months': '{0} hilabet',
'year': 'urte bat',
'years': '{0} urte',
}
month_names = ['', 'urtarrilak', 'otsailak', 'martxoak', 'apirilak', 'maiatzak', 'ekainak', 'uztailak', 'abuztuak', 'irailak', 'urriak', 'azaroak', 'abenduak']
month_abbreviations = ['', 'urt', 'ots', 'mar', 'api', 'mai', 'eka', 'uzt', 'abu', 'ira', 'urr', 'aza', 'abe']
day_names = ['', 'asteleehna', 'asteartea', 'asteazkena', 'osteguna', 'ostirala', 'larunbata', 'igandea']
day_abbreviations = ['', 'al', 'ar', 'az', 'og', 'ol', 'lr', 'ig']
class HungarianLocale(Locale):
names = ['hu', 'hu_hu']
past = '{0} ezelőtt'
future = '{0} múlva'
timeframes = {
'now': 'éppen most',
'seconds': {
'past': 'másodpercekkel',
'future': 'pár másodperc'
},
'minute': {'past': 'egy perccel', 'future': 'egy perc'},
'minutes': {'past': '{0} perccel', 'future': '{0} perc'},
'hour': {'past': 'egy órával', 'future': 'egy óra'},
'hours': {'past': '{0} órával', 'future': '{0} óra'},
'day': {
'past': 'egy nappal',
'future': 'egy nap'
},
'days': {
'past': '{0} nappal',
'future': '{0} nap'
},
'month': {'past': 'egy hónappal', 'future': 'egy hónap'},
'months': {'past': '{0} hónappal', 'future': '{0} hónap'},
'year': {'past': 'egy évvel', 'future': 'egy év'},
'years': {'past': '{0} évvel', 'future': '{0} év'},
}
month_names = ['', 'január', 'február', 'március', 'április', 'május',
'június', 'július', 'augusztus', 'szeptember',
'október', 'november', 'december']
month_abbreviations = ['', 'jan', 'febr', 'márc', 'ápr', 'máj', 'jún',
'júl', 'aug', 'szept', 'okt', 'nov', 'dec']
day_names = ['', 'hétfő', 'kedd', 'szerda', 'csütörtök', 'péntek',
'szombat', 'vasárnap']
day_abbreviations = ['', 'hét', 'kedd', 'szer', 'csüt', 'pént',
'szom', 'vas']
meridians = {
'am': 'de',
'pm': 'du',
'AM': 'DE',
'PM': 'DU',
}
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta > 0:
form = form['future']
else:
form = form['past']
return form.format(abs(delta))
class EsperantoLocale(Locale):
names = ['eo', 'eo_xx']
past = 'antaŭ {0}'
future = 'post {0}'
timeframes = {
'now': 'nun',
'seconds': 'kelkaj sekundoj',
'minute': 'unu minuto',
'minutes': '{0} minutoj',
'hour': 'un horo',
'hours': '{0} horoj',
'day': 'unu tago',
'days': '{0} tagoj',
'month': 'unu monato',
'months': '{0} monatoj',
'year': 'unu jaro',
'years': '{0} jaroj',
}
month_names = ['', 'januaro', 'februaro', 'marto', 'aprilo', 'majo',
'junio', 'julio', 'aŭgusto', 'septembro', 'oktobro',
'novembro', 'decembro']
month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun',
'jul', 'aŭg', 'sep', 'okt', 'nov', 'dec']
day_names = ['', 'lundo', 'mardo', 'merkredo', 'ĵaŭdo', 'vendredo',
'sabato', 'dimanĉo']
day_abbreviations = ['', 'lun', 'mar', 'mer', 'ĵaŭ', 'ven',
'sab', 'dim']
meridians = {
'am': 'atm',
'pm': 'ptm',
'AM': 'ATM',
'PM': 'PTM',
}
ordinal_day_re = r'((?P<value>[1-3]?[0-9](?=a))a)'
def _ordinal_number(self, n):
return '{0}a'.format(n)
class ThaiLocale(Locale):
names = ['th', 'th_th']
past = '{0}{1}ที่ผ่านมา'
future = 'ในอีก{1}{0}'
timeframes = {
'now': 'ขณะนี้',
'seconds': 'ไม่กี่วินาที',
'minute': '1 นาที',
'minutes': '{0} นาที',
'hour': '1 ชั่วโมง',
'hours': '{0} ชั่วโมง',
'day': '1 วัน',
'days': '{0} วัน',
'month': '1 เดือน',
'months': '{0} เดือน',
'year': '1 ปี',
'years': '{0} ปี',
}
month_names = ['', 'มกราคม', 'กุมภาพันธ์', 'มีนาคม', 'เมษายน',
'พฤษภาคม', 'มิถุนายน', 'กรกฏาคม', 'สิงหาคม',
'กันยายน', 'ตุลาคม', 'พฤศจิกายน', 'ธันวาคม']
month_abbreviations = ['', 'ม.ค.', 'ก.พ.', 'มี.ค.', 'เม.ย.', 'พ.ค.',
'มิ.ย.', 'ก.ค.', 'ส.ค.', 'ก.ย.', 'ต.ค.',
'พ.ย.', 'ธ.ค.']
day_names = ['', 'จันทร์', 'อังคาร', 'พุธ', 'พฤหัสบดี', 'ศุกร์',
'เสาร์', 'อาทิตย์']
day_abbreviations = ['', 'จ', 'อ', 'พ', 'พฤ', 'ศ', 'ส', 'อา']
meridians = {
'am': 'am',
'pm': 'pm',
'AM': 'AM',
'PM': 'PM',
}
BE_OFFSET = 543
def year_full(self, year):
'''Thai always use Buddhist Era (BE) which is CE + 543'''
year += self.BE_OFFSET
return '{0:04d}'.format(year)
def year_abbreviation(self, year):
'''Thai always use Buddhist Era (BE) which is CE + 543'''
year += self.BE_OFFSET
return '{0:04d}'.format(year)[2:]
def _format_relative(self, humanized, timeframe, delta):
'''Thai normally doesn't have any space between words'''
if timeframe == 'now':
return humanized
space = '' if timeframe == 'seconds' else ' '
direction = self.past if delta < 0 else self.future
return direction.format(humanized, space)
class BengaliLocale(Locale):
names = ['bn', 'bn_bd', 'bn_in']
past = '{0} আগে'
future = '{0} পরে'
timeframes = {
'now': 'এখন',
'seconds': 'সেকেন্ড',
'minute': 'এক মিনিট',
'minutes': '{0} মিনিট',
'hour': 'এক ঘণ্টা',
'hours': '{0} ঘণ্টা',
'day': 'এক দিন',
'days': '{0} দিন',
'month': 'এক মাস',
'months': '{0} মাস ',
'year': 'এক বছর',
'years': '{0} বছর',
}
meridians = {
'am': 'সকাল',
'pm': 'বিকাল',
'AM': 'সকাল',
'PM': 'বিকাল',
}
month_names = ['', 'জানুয়ারি', 'ফেব্রুয়ারি', 'মার্চ', 'এপ্রিল', 'মে', 'জুন', 'জুলাই',
'আগস্ট', 'সেপ্টেম্বর', 'অক্টোবর', 'নভেম্বর', 'ডিসেম্বর']
month_abbreviations = ['', 'জানু', 'ফেব', 'মার্চ', 'এপ্রি', 'মে', 'জুন', 'জুল',
'অগা','সেপ্ট', 'অক্টো', 'নভে', 'ডিসে']
day_names = ['', 'সোমবার', 'মঙ্গলবার', 'বুধবার', 'বৃহস্পতিবার', 'শুক্রবার', 'শনিবার', 'রবিবার']
day_abbreviations = ['', 'সোম', 'মঙ্গল', 'বুধ', 'বৃহঃ', 'শুক্র', 'শনি', 'রবি']
def _ordinal_number(self, n):
if n > 10 or n == 0:
return '{0}তম'.format(n)
if n in [1, 5, 7, 8, 9, 10]:
return '{0}ম'.format(n)
if n in [2, 3]:
return '{0}য়'.format(n)
if n == 4:
return '{0}র্থ'.format(n)
if n == 6:
return '{0}ষ্ঠ'.format(n)
class RomanshLocale(Locale):
names = ['rm', 'rm_ch']
past = 'avant {0}'
future = 'en {0}'
timeframes = {
'now': 'en quest mument',
'seconds': 'secundas',
'minute': 'ina minuta',
'minutes': '{0} minutas',
'hour': 'in\'ura',
'hours': '{0} ura',
'day': 'in di',
'days': '{0} dis',
'month': 'in mais',
'months': '{0} mais',
'year': 'in onn',
'years': '{0} onns',
}
month_names = [
'', 'schaner', 'favrer', 'mars', 'avrigl', 'matg', 'zercladur',
'fanadur', 'avust', 'settember', 'october', 'november', 'december'
]
month_abbreviations = [
'', 'schan', 'fav', 'mars', 'avr', 'matg', 'zer', 'fan', 'avu',
'set', 'oct', 'nov', 'dec'
]
day_names = [
'', 'glindesdi', 'mardi', 'mesemna', 'gievgia', 'venderdi',
'sonda', 'dumengia'
]
day_abbreviations = [
'', 'gli', 'ma', 'me', 'gie', 've', 'so', 'du'
]
_locales = _map_locales()
|
the-stack_106_24845
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <[email protected]>
# MIT License
#
"""
Calculate the mean values of the cube.
"""
import argparse
import numpy as np
def main():
parser = argparse.ArgumentParser(
description="Calculate the mean value of the data cube")
parser.add_argument("-d", "--dtype", default="float32",
help="NumPy dtype of data cubes (default: float32)")
parser.add_argument("infiles", nargs="+", help="input data cubes")
args = parser.parse_args()
print("# filename:\t\t mean\t Nside(cubic)")
for f in args.infiles:
cube = np.fromfile(open(f, "rb"), dtype=args.dtype)
sidelen = round(cube.shape[0] ** (1.0/3))
mean = cube.mean()
print("%s:\t%g\t\t%d" % (f, mean, sidelen))
if __name__ == "__main__":
main()
|
the-stack_106_24849
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Storage to BigQuery operator.
"""
import json
from typing import Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class GCSToBigQueryOperator(BaseOperator):
"""
Loads files from Google Cloud Storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToBigQueryOperator`
:param bucket: The bucket to load from. (templated)
:type bucket: str
:param source_objects: List of Google Cloud Storage URIs to load from. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: list[str]
:param destination_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into.
If ``<project>`` is not included, project will be the project defined in
the connection json. (templated)
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
Parameter must be defined if 'schema_object' is null and autodetect is False.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
Parameter must be defined if 'schema_fields' is null and autodetect is False.
:type schema_object: str
:param source_format: File format to export.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. This will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: str
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the BigQuery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param external_table: Flag to specify if the destination table should be
a BigQuery external table. Default Value is False.
:type external_table: bool
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
Not applicable for external tables.
:type cluster_fields: list[str]
:param autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources. (Default: ``True``).
Parameter must be setted to True if 'schema_fields' and 'schema_object' are undefined.
It is suggested to set to True if table are create outside of Airflow.
:type autodetect: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: [Optional] The geographic location of the job. Required except for US and EU.
See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'impersonation_chain',
)
template_ext = ('.sql',)
ui_color = '#f0eee4'
# pylint: disable=too-many-locals,too-many-arguments
@apply_defaults
def __init__(
self,
*,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
encoding="UTF-8",
max_id_key=None,
bigquery_conn_id='google_cloud_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
schema_update_options=(),
src_fmt_configs=None,
external_table=False,
time_partitioning=None,
cluster_fields=None,
autodetect=True,
encryption_configuration=None,
location=None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
):
super().__init__(**kwargs)
# GCS config
if src_fmt_configs is None:
src_fmt_configs = {}
if time_partitioning is None:
time_partitioning = {}
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.ignore_unknown_values = ignore_unknown_values
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.external_table = external_table
self.encoding = encoding
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
self.cluster_fields = cluster_fields
self.autodetect = autodetect
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context):
bq_hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields:
if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(self.bucket, self.schema_object).decode("utf-8"))
elif self.schema_object is None and self.autodetect is False:
raise AirflowException(
'At least one of `schema_fields`, ' '`schema_object`, or `autodetect` must be passed.'
)
else:
schema_fields = None
else:
schema_fields = self.schema_fields
source_uris = [
'gs://{}/{}'.format(self.bucket, source_object) for source_object in self.source_objects
]
conn = bq_hook.get_conn()
cursor = conn.cursor()
if self.external_table:
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
src_fmt_configs=self.src_fmt_configs,
encryption_configuration=self.encryption_configuration,
)
else:
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
autodetect=self.autodetect,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
if cursor.use_legacy_sql:
escaped_table_name = f'[{self.destination_project_dataset_table}]'
else:
escaped_table_name = f'`{self.destination_project_dataset_table}`'
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(self.max_id_key, escaped_table_name))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table,
self.max_id_key,
max_id,
)
|
the-stack_106_24850
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="ohlc", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs
)
|
the-stack_106_24851
|
"""
LED.py
Code by Sammy Haq
https://github.com/sammyhaq
Child class of OutputComponent that adds exclusive functions to LEDs.
"""
from OutputComponent import OutputComponent
import time
class LED(OutputComponent):
def __init__(self, pin):
OutputComponent.__init__(self, pin)
def breathe(self, duration=5, delay=0.05):
self.pulse.start(0)
timerEnd = time.time() + duration
while (time.time() < timerEnd):
for dutyCycle in range(0, 101, 5):
self.pulse.ChangeDutyCycle(dutyCycle)
time.sleep(delay)
for dutyCycle in range(100, -1, -5):
self.pulse.ChangeDutyCycle(dutyCycle)
time.sleep(delay)
|
the-stack_106_24852
|
"""
=========
SMOTE SVM
=========
An illustration of the random SMOTE SVM method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from unbalanced_dataset.over_sampling import SMOTE
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random under-sampling
sm = SMOTE(kind='svm')
X_resampled, y_resampled = sm.fit_transform(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('SMOTE svm')
plt.show()
|
the-stack_106_24853
|
# -----------------------------------------------------------
# Stacked Cross Attention Network implementation based on
# https://arxiv.org/abs/1803.08024.
# "Stacked Cross Attention for Image-Text Matching"
# Kuang-Huei Lee, Xi Chen, Gang Hua, Houdong Hu, Xiaodong He
#
# Writen by Kuang-Huei Lee, 2018
# ---------------------------------------------------------------
"""Evaluation"""
from __future__ import print_function
import os
import sys
from data import get_test_loader
import time
import numpy as np
from vocab import Vocabulary, deserialize_vocab # NOQA
import torch
from model import SCAN, xattn_score_t2i, xattn_score_i2t, get_score_attn_for_one_pair
from collections import OrderedDict
import time
import pdb
# from torch.autograd import Variable
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=0):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
"""String representation for logging
"""
# for values that should be recorded exactly e.g. iteration number
if self.count == 0:
return str(self.val)
# for stats
return '%.4f (%.4f)' % (self.val, self.avg)
class LogCollector(object):
"""A collection of logging objects that can change from train to val"""
def __init__(self):
# to keep the order of logged variables deterministic
self.meters = OrderedDict()
def update(self, k, v, n=0):
# create a new meter if previously not recorded
if k not in self.meters:
self.meters[k] = AverageMeter()
self.meters[k].update(v, n)
def __str__(self):
"""Concatenate the meters in one log line
"""
s = ''
# for i, (k, v) in enumerate(self.meters.iteritems()):
for i, (k, v) in enumerate(self.meters.items()):
if i > 0:
s += ' '
s += k + ' ' + str(v)
return s
def tb_log(self, tb_logger, prefix='', step=None):
"""Log using tensorboard
"""
# for k, v in self.meters.iteritems():
for k, v in self.meters.items():
tb_logger.log_value(prefix + k, v.val, step=step)
def encode_data(model, data_loader, log_step=10, logging=print):
"""Encode all images and captions loadable by `data_loader`
"""
batch_time = AverageMeter()
val_logger = LogCollector()
# switch to evaluate mode
model.val_start()
end = time.time()
# np array to keep all the embeddings
img_embs = None
cap_embs = None
cap_lens = None
cap_inds=[]
img_inds=[]
tag_masks=[]
tmp_tag_masks=[]
max_n_word = 0
for i, (images, captions, lengths, ids, img_ids, tag) in enumerate(data_loader):
max_n_word = max(max_n_word, max(lengths))
cap_inds.extend(ids)
img_inds.extend(img_ids)
#========================
tmp_tag_masks.extend(tag)
#========================
#============================
for i in range(len(cap_inds)):
tag_masks.append(tmp_tag_masks[cap_inds.index(i)])
#============================
for i, (images, captions, lengths, ids, img_ids, _) in enumerate(data_loader):
# make sure val logger is used
with torch.no_grad():
model.logger = val_logger
# compute the embeddings
img_emb, cap_emb, cap_len = model.forward_emb(images, captions, lengths, volatile=True)
#print(img_emb)
if img_embs is None:
if img_emb.dim() == 3:
img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1), img_emb.size(2)))
else:
img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1)))
cap_embs = np.zeros((len(data_loader.dataset), max_n_word, cap_emb.size(2)))
cap_lens = [0] * len(data_loader.dataset)
# cache embeddings
img_embs[ids] = img_emb.data.cpu().numpy().copy()
cap_embs[ids,:max(lengths),:] = cap_emb.data.cpu().numpy().copy()
for j, nid in enumerate(ids):
cap_lens[nid] = cap_len[j]
# measure accuracy and record loss
# model.forward_loss(img_emb, cap_emb, cap_len)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % log_step == 0:
logging('Test: [{0}/{1}]\t'
'{e_log}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(
i, len(data_loader), batch_time=batch_time,
e_log=str(model.logger)))
del images, captions
assert tag_masks[0].size(0) == cap_lens[0]
return img_embs, cap_embs, cap_lens, cap_inds, img_inds, tag_masks
def evalrank(model_path, data_path=None, split='dev', fold5=False):
"""
Evaluate a trained model on either dev or test. If `fold5=True`, 5 fold
cross-validation is done (only for MSCOCO). Otherwise, the full data is
used for evaluation.
"""
# load model and options
with torch.no_grad():
checkpoint = torch.load(model_path)
opt = checkpoint['opt']
print(opt)
if data_path is not None:
opt.data_path = data_path
#=========================================
if 'pos' not in opt:
opt.pos = False
#=========================================
# load vocabulary used by the model
vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
opt.vocab_size = len(vocab)
# construct model
model = SCAN(opt)
# load model state
model.load_state_dict(checkpoint['model'])
print('Loading dataset')
data_loader = get_test_loader(split, opt.data_name, vocab,
opt.batch_size, opt.workers, opt)
print('Computing results...')
img_embs, cap_embs, cap_lens, cap_inds, img_inds, tag_masks = encode_data(model, data_loader)
print('Images: %d, Captions: %d' %
(img_embs.shape[0] / 5, cap_embs.shape[0]))
if not fold5:
# no cross-validation, full evaluation
img_embs = np.array([img_embs[i] for i in range(0, len(img_embs), 5)])
start = time.time()
if opt.cross_attn == 't2i':
sims = shard_xattn_t2i(img_embs, cap_embs, cap_lens, opt, tag_masks, shard_size=128)
# np.savez_compressed('test_sim_mat_f30k_t2i_AVG_glove.npz',sim=sims)
elif opt.cross_attn == 'i2t':
sims = shard_xattn_i2t(img_embs, cap_embs, cap_lens, opt, tag_masks, shard_size=128)
else:
raise NotImplementedError
end = time.time()
print("calculate similarity time:", end-start)
r, rt = i2t(img_embs, cap_embs, cap_lens, sims, return_ranks=True)
ri, rti = t2i(img_embs, cap_embs, cap_lens, sims, return_ranks=True)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.1f" % rsum)
print("Average i2t Recall: %.1f" % ar)
print("Image to text: %.1f %.1f %.1f %.1f %.1f" % r)
print("Average t2i Recall: %.1f" % ari)
print("Text to image: %.1f %.1f %.1f %.1f %.1f" % ri)
else:
# 5fold cross-validation, only for MSCOCO
results = []
for i in range(5):
img_embs_shard = img_embs[i * 5000:(i + 1) * 5000:5]
cap_embs_shard = cap_embs[i * 5000:(i + 1) * 5000]
cap_lens_shard = cap_lens[i * 5000:(i + 1) * 5000]
start = time.time()
if opt.cross_attn == 't2i':
sims = shard_xattn_t2i(img_embs_shard, cap_embs_shard, cap_lens_shard, opt, tag_masks, shard_size=128)
elif opt.cross_attn == 'i2t':
sims = shard_xattn_i2t(img_embs_shard, cap_embs_shard, cap_lens_shard, opt, tag_masks, shard_size=128)
else:
raise NotImplementedError
end = time.time()
print("calculate similarity time:", end-start)
r, rt0 = i2t(img_embs_shard, cap_embs_shard, cap_lens_shard, sims, return_ranks=True)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" % r)
ri, rti0 = t2i(img_embs_shard, cap_embs_shard, cap_lens_shard, sims, return_ranks=True)
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" % ri)
if i == 0:
rt, rti = rt0, rti0
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.1f ar: %.1f ari: %.1f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
print("-----------------------------------")
print("Mean metrics: ")
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("rsum: %.1f" % (mean_metrics[10] * 6))
print("Average i2t Recall: %.1f" % mean_metrics[11])
print("Image to text: %.1f %.1f %.1f %.1f %.1f" %
mean_metrics[:5])
print("Average t2i Recall: %.1f" % mean_metrics[12])
print("Text to image: %.1f %.1f %.1f %.1f %.1f" %
mean_metrics[5:10])
torch.save({'rt': rt, 'rti': rti}, 'ranks.pth.tar')
def softmax(X, axis):
"""
Compute the softmax of each element along an axis of X.
"""
y = np.atleast_2d(X)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis = axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
# finally: divide elementwise
p = y / ax_sum
return p
def shard_xattn_t2i(images, captions, caplens, opt, tag_masks, shard_size=128):
"""
Computer pairwise t2i image-caption distance with locality sharding
"""
# n_im_shard = (len(images)-1)/shard_size + 1
# n_cap_shard = (len(captions)-1)/shard_size + 1
n_im_shard = (len(images)-1)//shard_size + 1
n_cap_shard = (len(captions)-1)//shard_size + 1
# pdb.set_trace()
d = np.zeros((len(images), len(captions)))
for i in range(n_im_shard):
im_start, im_end = shard_size*i, min(shard_size*(i+1), len(images))
for j in range(n_cap_shard):
sys.stdout.write('\r>> shard_xattn_t2i batch (%d,%d)' % (i,j))
cap_start, cap_end = shard_size*j, min(shard_size*(j+1), len(captions))
# im = Variable(torch.from_numpy(images[im_start:im_end]), volatile=True).cuda()
# s = Variable(torch.from_numpy(captions[cap_start:cap_end]), volatile=True).cuda()
im = torch.from_numpy(images[im_start:im_end]).cuda()
s = torch.from_numpy(captions[cap_start:cap_end]).cuda()
l = caplens[cap_start:cap_end]
#======================================================
batch_tag_masks = tag_masks[cap_start:cap_end]
#======================================================
if opt.pos:
sim = xattn_score_t2i(im, s, l, opt,batch_tag_masks)
else:
sim = xattn_score_t2i(im, s, l, opt)
d[im_start:im_end, cap_start:cap_end] = sim.data.cpu().numpy()
sys.stdout.write('\n')
return d
def shard_xattn_i2t(images, captions, caplens, opt, tag_masks, shard_size=128):
"""
Computer pairwise i2t image-caption distance with locality sharding
"""
n_im_shard = (len(images)-1)//shard_size + 1
n_cap_shard = (len(captions)-1)//shard_size + 1
d = np.zeros((len(images), len(captions)))
for i in range(n_im_shard):
im_start, im_end = shard_size*i, min(shard_size*(i+1), len(images))
for j in range(n_cap_shard):
sys.stdout.write('\r>> shard_xattn_i2t batch (%d,%d)' % (i,j))
cap_start, cap_end = shard_size*j, min(shard_size*(j+1), len(captions))
# im = Variable(torch.from_numpy(images[im_start:im_end]), volatile=True).cuda()
# s = Variable(torch.from_numpy(captions[cap_start:cap_end]), volatile=True).cuda()
im = torch.from_numpy(images[im_start:im_end]).cuda()
s = torch.from_numpy(captions[cap_start:cap_end]).cuda()
l = caplens[cap_start:cap_end]
#======================================================
batch_tag_masks = tag_masks[cap_start:cap_end]
#======================================================
if opt.pos:
sim = xattn_score_i2t(im, s, l, opt, batch_tag_masks)
else:
sim = xattn_score_i2t(im, s, l, opt)
d[im_start:im_end, cap_start:cap_end] = sim.data.cpu().numpy()
sys.stdout.write('\n')
return d
def i2t(images, captions, caplens, sims, npts=None, return_ranks=False):
"""
Images->Text (Image Annotation)
Images: (N, n_region, d) matrix of images
Captions: (5N, max_n_word, d) matrix of captions
CapLens: (5N) array of caption lengths
sims: (N, 5N) matrix of similarity im-cap
"""
npts = images.shape[0]
ranks = np.zeros(npts)
top1 = np.zeros(npts)
for index in range(npts):
inds = np.argsort(sims[index])[::-1]
# Score
rank = 1e20
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, captions, caplens, sims, npts=None, return_ranks=False):
"""
Text->Images (Image Search)
Images: (N, n_region, d) matrix of images
Captions: (5N, max_n_word, d) matrix of captions
CapLens: (5N) array of caption lengths
sims: (N, 5N) matrix of similarity im-cap
"""
npts = images.shape[0]
ranks = np.zeros(5 * npts)
top1 = np.zeros(5 * npts)
# --> (5N(caption), N(image))
sims = sims.T
for index in range(npts):
for i in range(5):
inds = np.argsort(sims[5 * index + i])[::-1]
ranks[5 * index + i] = np.where(inds == index)[0][0]
top1[5 * index + i] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
|
the-stack_106_24855
|
import functools
import math
from collections import defaultdict
from ..core import (Add, Basic, Dummy, E, Integer, Mul, Pow, Rational, cacheit,
count_ops, expand_log, expand_mul, factor_terms)
from ..core.mul import _keep_coeff
from ..core.rules import Transform
from ..core.sympify import sympify
from ..functions import exp, exp_polar, log, polarify, root, unpolarify
from ..logic import true
from ..ntheory import multiplicity
from ..polys import gcd, lcm
from ..utilities import default_sort_key, ordered
@cacheit
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
"""
Reduces expression by combining powers with similar bases and exponents.
Notes
=====
If deep is True then powsimp() will also simplify arguments of
functions. By default deep is set to False.
If force is True then bases will be combined without checking for
assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
if x and y are both negative.
You can make powsimp() only combine bases or only combine exponents by
changing combine='base' or combine='exp'. By default, combine='all',
which does both. combine='base' will only combine::
a a a 2x x
x * y => (x*y) as well as things like 2 => 4
and combine='exp' will only combine
::
a b (a + b)
x * x => x
combine='exp' will strictly only combine exponents in the way that used
to be automatic. Also use deep=True if you need the old behavior.
When combine='all', 'exp' is evaluated first. Consider the first
example below for when there could be an ambiguity relating to this.
This is done so things like the second example can be completely
combined. If you want 'base' combined first, do something like
powsimp(powsimp(expr, combine='base'), combine='exp').
Examples
========
>>> powsimp(x**y*x**z*y**z, combine='all')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='exp')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='base', force=True)
x**y*(x*y)**z
>>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
(n*x)**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
n**(y + z)*x**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
(n*x)**y*(n*x)**z
>>> x, y = symbols('x y', positive=True)
>>> powsimp(log(exp(x)*exp(y)))
log(E**x*E**y)
>>> powsimp(log(exp(x)*exp(y)), deep=True)
x + y
Radicals with Mul bases will be combined if combine='exp'
>>> x, y = symbols('x y')
Two radicals are automatically joined through Mul:
>>> a = sqrt(x*sqrt(y))
>>> a*a**3 == a**4
True
But if an integer power of that radical has been
autoexpanded then Mul does not join the resulting factors:
>>> a**4 # auto expands to a Mul, no longer a Pow
x**2*y
>>> _*a # so Mul doesn't combine them
x**2*y*sqrt(x*sqrt(y))
>>> powsimp(_) # but powsimp will
(x*sqrt(y))**(5/2)
>>> powsimp(x*y*a) # but won't when doing so would violate assumptions
x*y*sqrt(x*sqrt(y))
"""
from ..matrices import MatrixSymbol
def recurse(arg, **kwargs):
_deep = kwargs.get('deep', deep)
_combine = kwargs.get('combine', combine)
_force = kwargs.get('force', force)
_measure = kwargs.get('measure', measure)
return powsimp(arg, _deep, _combine, _force, _measure)
expr = sympify(expr)
if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or (
expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))):
return expr
if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
expr = expr.func(*[recurse(w) for w in expr.args])
if expr.is_Pow:
return recurse(expr*_y, deep=False)/_y
if not expr.is_Mul:
return expr
# handle the Mul
if combine in ('exp', 'all'):
# Collect base/exp data, while maintaining order in the
# non-commutative parts of the product
c_powers = defaultdict(list)
nc_part = []
newexpr = []
coeff = Integer(1)
for term in expr.args:
if term.is_Rational:
coeff *= term
continue
if term.is_Pow:
term = _denest_pow(term)
if term.is_commutative:
b, e = term.as_base_exp()
if deep:
b, e = [recurse(i) for i in [b, e]]
if b.is_Pow:
# don't let smthg like sqrt(x**a) split into x**a, 1/2
# or else it will be joined as x**(a/2) later
b, e = b**e, Integer(1)
c_powers[b].append(e)
else:
# This is the logic that combines exponents for equal,
# but non-commutative bases: A**x*A**y == A**(x+y).
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (b1 == b2 and
e1.is_commutative and e2.is_commutative):
nc_part[-1] = Pow(b1, Add(e1, e2))
continue
nc_part.append(term)
# add up exponents of common bases
for b, e in ordered(c_powers.items()):
# allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
# Numbers since autoevaluation will undo it, e.g.
# 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
if (b and b.is_Number and not all(ei.is_Number for ei in e) and
coeff != 1 and b not in (1, -1)):
m = multiplicity(abs(b), abs(coeff))
if m:
e.append(m)
coeff /= b**m
c_powers[b] = Add(*e)
if coeff != 1:
if coeff in c_powers:
c_powers[coeff] += Integer(1)
else:
c_powers[coeff] = Integer(1)
# convert to plain dictionary
c_powers = dict(c_powers)
# check for base and inverted base pairs
be = list(c_powers.items())
skip = set() # skip if we already saw them
for b, e in be:
if b in skip:
continue
bpos = b.is_positive or b.is_polar
if bpos:
binv = 1/b
if b != binv and binv in c_powers:
if b.as_numer_denom()[0] == 1:
c_powers.pop(b)
c_powers[binv] -= e
else:
skip.add(binv)
e = c_powers.pop(binv)
c_powers[b] -= e
# check for base and negated base pairs
be = list(c_powers.items())
_n = Integer(-1)
for i, (b, e) in enumerate(be):
if ((-b).is_Symbol or b.is_Add) and -b in c_powers:
if (b.is_positive in (0, 1) or e.is_integer):
c_powers[-b] += c_powers.pop(b)
if _n in c_powers:
c_powers[_n] += e
else:
c_powers[_n] = e
# filter c_powers and convert to a list
c_powers = [(b, e) for b, e in c_powers.items() if e]
# ==============================================================
# check for Mul bases of Rational powers that can be combined with
# separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
# (x*sqrt(x*y))**(3/2)
# ---------------- helper functions
def ratq(x):
"""Return Rational part of x's exponent as it appears in the bkey."""
return bkey(x)[0][1]
def bkey(b, e=None):
"""Return (b**s, c.denominator), c.numerator where e -> c*s. If e is not given then
it will be taken by using as_base_exp() on the input b.
e.g.
x**3/2 -> (x, 2), 3
x**y -> (x**y, 1), 1
x**(2*y/3) -> (x**y, 3), 2
exp(x/2) -> (exp(a), 2), 1
"""
if e is not None: # coming from c_powers or from below
if e.is_Integer:
return (b, Integer(1)), e
elif e.is_Rational:
return (b, Integer(e.denominator)), Integer(e.numerator)
else:
c, m = e.as_coeff_Mul(rational=True)
if c != 1 and b.is_positive:
return (b**m, Integer(c.denominator)), Integer(c.numerator)
else:
return (b**e, Integer(1)), Integer(1)
else:
return bkey(*b.as_base_exp())
def update(b):
"""Decide what to do with base, b. If its exponent is now an
integer multiple of the Rational denominator, then remove it
and put the factors of its base in the common_b dictionary or
update the existing bases if necessary. If it has been zeroed
out, simply remove the base.
"""
newe, r = divmod(common_b[b], b[1])
if not r:
common_b.pop(b)
if newe:
for m in Mul.make_args(b[0]**newe):
b, e = bkey(m)
if b not in common_b:
common_b[b] = 0
common_b[b] += e
if b[1] != 1:
bases.append(b)
# ---------------- end of helper functions
# assemble a dictionary of the factors having a Rational power
common_b = {}
done = []
bases = []
for b, e in c_powers:
b, e = bkey(b, e)
if b in common_b:
common_b[b] = common_b[b] + e
else:
common_b[b] = e
if b[1] != 1 and b[0].is_Mul:
bases.append(b)
c_powers = [(b, e) for b, e in common_b.items() if e]
bases.sort(key=default_sort_key) # this makes tie-breaking canonical
bases.sort(key=measure, reverse=True) # handle longest first
for base in bases:
if base not in common_b: # it may have been removed already
continue
b, exponent = base
last = False # True when no factor of base is a radical
qlcm = 1 # the lcm of the radical denominators
while True:
bstart = b
qstart = qlcm
bb = [] # list of factors
ee = [] # (factor's expo. and it's current value in common_b)
for bi in Mul.make_args(b):
bib, bie = bkey(bi)
if bib not in common_b or common_b[bib] < bie:
ee = bb = [] # failed
break
ee.append([bie, common_b[bib]])
bb.append(bib)
if ee:
# find the number of extractions possible
# e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1
min1 = ee[0][1]/ee[0][0]
for eei in ee:
rat = eei[1]/eei[0]
if rat < 1:
break
min1 = min(min1, rat)
else:
# update base factor counts
# e.g. if ee = [(2, 5), (3, 6)] then min1 = 2
# and the new base counts will be 5-2*2 and 6-2*3
for i, bbi in enumerate(bb):
common_b[bbi] -= min1*ee[i][0]
update(bb[i])
# update the count of the base
# e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y)
# will increase by 4 to give bkey (x*sqrt(y), 2, 5)
common_b[base] += min1*qstart*exponent
if (last # no more radicals in base
or len(common_b) == 1 # nothing left to join with
or all(k[1] == 1 for k in common_b)): # no rad's in common_b
break
# see what we can exponentiate base by to remove any radicals
# so we know what to search for
# e.g. if base were x**(1/2)*y**(1/3) then we should
# exponentiate by 6 and look for powers of x and y in the ratio
# of 2 to 3
qlcm = functools.reduce(lcm, [ratq(bi) for bi in Mul.make_args(bstart)])
if qlcm == 1:
break # we are done
b = bstart**qlcm
qlcm *= qstart
if all(ratq(bi) == 1 for bi in Mul.make_args(b)):
last = True # we are going to be done after this next pass
# this base no longer can find anything to join with and
# since it was longer than any other we are done with it
b, q = base
done.append((b, common_b.pop(base)*Rational(1, q)))
# update c_powers and get ready to continue with powsimp
c_powers = done
# there may be terms still in common_b that were bases that were
# identified as needing processing, so remove those, too
for (b, q), e in common_b.items():
if b.is_Pow and q != 1 and not b.exp.is_Rational:
b, be = b.as_base_exp()
b = b**(be/q)
else:
b = root(b, q)
c_powers.append((b, e))
check = len(c_powers)
c_powers = dict(c_powers)
assert len(c_powers) == check # there should have been no duplicates
# ==============================================================
# rebuild the expression
newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()]))
if combine == 'exp':
return expr.func(newexpr, expr.func(*nc_part))
else:
return recurse(expr.func(*nc_part), combine='base') * \
recurse(newexpr, combine='base')
elif combine == 'base':
# Build c_powers and nc_part. These must both be lists not
# dicts because exp's are not combined.
c_powers = []
nc_part = []
for term in expr.args:
if term.is_commutative:
c_powers.append(list(term.as_base_exp()))
else:
nc_part.append(term)
# Pull out numerical coefficients from exponent if assumptions allow
# e.g., 2**(2*x) => 4**x
for i, cpi in enumerate(c_powers):
b, e = cpi
if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar):
continue
exp_c, exp_t = e.as_coeff_Mul(rational=True)
if exp_c != 1 and exp_t != 1:
c_powers[i] = [Pow(b, exp_c), exp_t]
# Combine bases whenever they have the same exponent and
# assumptions allow
# first gather the potential bases under the common exponent
c_exp = defaultdict(list)
for b, e in c_powers:
if deep:
e = recurse(e)
c_exp[e].append(b)
del c_powers
# Merge back in the results of the above to form a new product
c_powers = defaultdict(list)
for e in c_exp:
bases = c_exp[e]
# calculate the new base for e
if len(bases) == 1:
new_base = bases[0]
elif e.is_integer or force:
new_base = expr.func(*bases)
else:
# see which ones can be joined
unk = []
nonneg = []
neg = []
for bi in bases:
if bi.is_negative:
neg.append(bi)
elif bi.is_nonnegative:
nonneg.append(bi)
elif bi.is_polar:
nonneg.append(
bi) # polar can be treated like non-negative
else:
unk.append(bi)
if len(unk) == 1 and not neg or len(neg) == 1 and not unk:
# a single neg or a single unk can join the rest
nonneg.extend(unk + neg)
unk = neg = []
elif neg:
# their negative signs cancel in groups of 2*q if we know
# that e = p/q else we have to treat them as unknown
israt = False
if e.is_Rational:
israt = True
else:
p, d = e.as_numer_denom()
if p.is_integer and d.is_integer:
israt = True
if israt:
neg = [-w for w in neg]
unk.extend([Integer(-1)]*len(neg))
else:
unk.extend(neg)
neg = []
del israt
# these shouldn't be joined
for b in unk:
c_powers[b].append(e)
# here is a new joined base
new_base = expr.func(*(nonneg + neg))
# if there are positive parts they will just get separated
# again unless some change is made
def _terms(e):
# return the number of terms of this expression
# when multiplied out -- assuming no joining of terms
if e.is_Add:
return sum(_terms(ai) for ai in e.args)
if e.is_Mul:
return math.prod(_terms(mi) for mi in e.args)
return 1
xnew_base = expand_mul(new_base, deep=False)
if len(Add.make_args(xnew_base)) < _terms(new_base):
new_base = factor_terms(xnew_base)
c_powers[new_base].append(e)
# break out the powers from c_powers now
c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e]
# we're done
return expr.func(*(c_part + nc_part))
else:
raise ValueError("combine must be one of ('all', 'exp', 'base').")
def powdenest(eq, force=False, polar=False):
r"""
Collect exponents on powers as assumptions allow.
Given ``(bb**be)**e``, this can be simplified as follows:
* if ``bb`` is positive, or
* ``e`` is an integer, or
* ``|be| < 1`` then this simplifies to ``bb**(be*e)``
Given a product of powers raised to a power, ``(bb1**be1 *
bb2**be2...)**e``, simplification can be done as follows:
- if e is positive, the gcd of all bei can be joined with e;
- all non-negative bb can be separated from those that are negative
and their gcd can be joined with e; autosimplification already
handles this separation.
- integer factors from powers that have integers in the denominator
of the exponent can be removed from any term and the gcd of such
integers can be joined with e
Setting ``force`` to True will make symbols that are not explicitly
negative behave as though they are positive, resulting in more
denesting.
Setting ``polar`` to True will do simplifications on the Riemann surface of
the logarithm, also resulting in more denestings.
When there are sums of logs in exp() then a product of powers may be
obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.
Examples
========
>>> powdenest((x**(2*a/3))**(3*x))
(x**(2*a/3))**(3*x)
>>> powdenest(exp(3*x*log(2)))
2**(3*x)
Assumptions may prevent expansion:
>>> powdenest(sqrt(x**2))
sqrt(x**2)
>>> p = symbols('p', positive=True)
>>> powdenest(sqrt(p**2))
p
No other expansion is done.
>>> i, j = symbols('i j', integer=True)
>>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j
x**(x*(i + j))
But exp() will be denested by moving all non-log terms outside of
the function; this may result in the collapsing of the exp to a power
with a different base:
>>> powdenest(exp(3*y*log(x)))
x**(3*y)
>>> powdenest(exp(y*(log(a) + log(b))))
(a*b)**y
>>> powdenest(exp(3*(log(a) + log(b))))
a**3*b**3
If assumptions allow, symbols can also be moved to the outermost exponent:
>>> i = Symbol('i', integer=True)
>>> powdenest(((x**(2*i))**(3*y))**x)
((x**(2*i))**(3*y))**x
>>> powdenest(((x**(2*i))**(3*y))**x, force=True)
x**(6*i*x*y)
>>> powdenest(((x**(2*a/3))**(3*y/i))**x)
((x**(2*a/3))**(3*y/i))**x
>>> powdenest((x**(2*i)*y**(4*i))**z, force=True)
(x*y**2)**(2*i*z)
>>> n = Symbol('n', negative=True)
>>> powdenest((x**i)**y, force=True)
x**(i*y)
>>> powdenest((n**i)**x, force=True)
(n**i)**x
"""
from .simplify import posify
if force:
eq, rep = posify(eq)
return powdenest(eq, force=False).xreplace(rep)
if polar:
eq, rep = polarify(eq)
return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep)
new = powsimp(sympify(eq))
return new.xreplace(Transform(_denest_pow, filter=lambda m: m.is_Pow))
_y = Dummy('y')
def _denest_pow(eq):
"""
Denest powers.
This is a helper function for powdenest that performs the actual
transformation.
"""
from .simplify import logcombine
b, e = eq.as_base_exp()
if b.is_Pow and e != 1:
new = b._eval_power(e)
if new is not None:
eq = new
b, e = new.as_base_exp()
# denest exp with log terms in exponent
if b is E and e.is_Mul:
logs = []
other = []
for ei in e.args:
if any(isinstance(ai, log) for ai in Add.make_args(ei)):
logs.append(ei)
else:
other.append(ei)
logs = logcombine(Mul(*logs))
return Pow(exp(logs), Mul(*other))
_, be = b.as_base_exp()
if be == 1 and not (b.is_Mul or
b.is_Rational and b.denominator != 1 or
b.is_positive):
return eq
# denest eq which is either pos**e or Pow**e or Mul**e or
# Mul(b1**e1, b2**e2)
# handle polar numbers specially
polars, nonpolars = [], []
for bb in Mul.make_args(b):
if bb.is_polar:
polars.append(bb.as_base_exp())
else:
nonpolars.append(bb)
if len(polars) == 1 and not polars[0][0].is_Mul:
return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e)
elif polars:
return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \
* powdenest(Mul(*nonpolars)**e)
if b.is_Integer:
# use log to see if there is a power here
logb = expand_log(log(b))
if logb.is_Mul:
c, logb = logb.args
e *= c
base = logb.args[0]
return Pow(base, e)
# if b is not a Mul or any factor is an atom then there is nothing to do
if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)):
return eq
# let log handle the case of the base of the argument being a Mul, e.g.
# sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we
# will take the log, expand it, and then factor out the common powers that
# now appear as coefficient. We do this manually since terms_gcd pulls out
# fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2;
# gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but
# we want 3*x. Neither work with noncommutatives.
def nc_gcd(aa, bb):
a, b = [i.as_coeff_Mul() for i in [aa, bb]]
c = gcd(a[0], b[0]).as_numer_denom()[0]
g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0]))
return _keep_coeff(c, g)
glogb = expand_log(log(b))
if glogb.is_Add:
args = glogb.args
g = functools.reduce(nc_gcd, args)
if g != 1:
cg, rg = g.as_coeff_Mul()
glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args]))
# now put the log back together again
if isinstance(glogb, log) or not glogb.is_Mul:
if glogb.args[0].is_Pow:
glogb = _denest_pow(glogb.args[0])
if (abs(glogb.exp) < 1) == true:
return Pow(glogb.base, glogb.exp*e)
return eq
# the log(b) was a Mul so join any adds with logcombine
add = []
other = []
for a in glogb.args:
if a.is_Add:
add.append(a)
else:
other.append(a)
return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
|
the-stack_106_24856
|
from lxml import etree
import requests
if __name__ == "__main__":
url = "https://blog.csdn.net/yhl_jxy"
html = requests.get(url).text
parser = etree.HTML(html)
# title = parser.xpath("//*[@id='archive']/div[1]/div[2]/p[1]/a[1]/text()")
# titles = parser.xpath("//a[@class='archive-title']/@href")
# print(titles)
articles = parser.xpath("//div[@class='post floated-thumb']")
for article in articles:
t = article.xpath("div[@class='post-meta']//a[@class='archive-title']")
if t:
href = t[0].attrib.get("href")
title = t[0].text
print(href, title)
img = article.xpath("div[@class='post-thumb']//img//@src")
if img:
img = img[0]
print(img)
|
the-stack_106_24857
|
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import signal
import netaddr
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_consts
from neutron_lib.utils import runtime
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as const
from neutron.common import utils as common_utils
from neutron.extensions import revisions
from neutron.extensions import timestamp
LOG = logging.getLogger(__name__)
HA_DEV_PREFIX = 'ha-'
IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
SIGTERM_TIMEOUT = 10
# The multiplier is used to compensate execution time of function sending
# SIGHUP to keepalived process. The constant multiplies ha_vrrp_advert_int
# config option and the result is the throttle delay.
THROTTLER_MULTIPLIER = 1.5
class HaRouterNamespace(namespaces.RouterNamespace):
"""Namespace for HA router.
This namespace sets the ip_nonlocal_bind to 0 for HA router namespaces.
It does so to prevent sending gratuitous ARPs for interfaces that got VIP
removed in the middle of processing.
It also disables ipv6 forwarding by default. Forwarding will be
enabled during router configuration processing only for the master node.
It has to be disabled on all other nodes to avoid sending MLD packets
which cause lost connectivity to Floating IPs.
"""
def create(self):
super(HaRouterNamespace, self).create(ipv6_forwarding=False)
# HA router namespaces should not have ip_nonlocal_bind enabled
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 0)
class HaRouter(router.RouterInfo):
def __init__(self, state_change_callback, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
self.state_change_callback = state_change_callback
def create_router_namespace_object(
self, router_id, agent_conf, iface_driver, use_ipv6):
return HaRouterNamespace(
router_id, agent_conf, iface_driver, use_ipv6)
@property
def ha_priority(self):
return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
@property
def ha_vr_id(self):
return self.router.get('ha_vr_id')
@property
def ha_state(self):
state = None
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'r') as f:
state = f.read()
except (OSError, IOError):
LOG.debug('Error while reading HA state for %s', self.router_id)
return state or 'unknown'
@ha_state.setter
def ha_state(self, new_state):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError):
LOG.error('Error while writing HA state for %s',
self.router_id)
@property
def ha_namespace(self):
return self.ns_name
def is_router_master(self):
"""this method is normally called before the ha_router object is fully
initialized
"""
if self.router.get('_ha_state') == 'active':
return True
else:
return False
def initialize(self, process_monitor):
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
msg = ("Unable to process HA router %s without HA port" %
self.router_id)
LOG.exception(msg)
raise Exception(msg)
super(HaRouter, self).initialize(process_monitor)
self.ha_port = ha_port
self._init_keepalived_manager(process_monitor)
self.ha_network_added()
self.update_initial_state(self.state_change_callback)
self.spawn_state_change_monitor(process_monitor)
def _init_keepalived_manager(self, process_monitor):
self.keepalived_manager = keepalived.KeepalivedManager(
self.router['id'],
keepalived.KeepalivedConf(),
process_monitor,
conf_path=self.agent_conf.ha_confs_path,
namespace=self.ha_namespace,
throttle_restart_value=(
self.agent_conf.ha_vrrp_advert_int * THROTTLER_MULTIPLIER))
config = self.keepalived_manager.config
interface_name = self.get_ha_device_name()
subnets = self.ha_port.get('subnets', [])
ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
instance = keepalived.KeepalivedInstance(
'BACKUP',
interface_name,
self.ha_vr_id,
ha_port_cidrs,
nopreempt=True,
advert_int=self.agent_conf.ha_vrrp_advert_int,
priority=self.ha_priority,
vrrp_health_check_interval=(
self.agent_conf.ha_vrrp_health_check_interval),
ha_conf_dir=self.keepalived_manager.get_conf_dir())
instance.track_interfaces.append(interface_name)
if self.agent_conf.ha_vrrp_auth_password:
# TODO(safchain): use oslo.config types when it will be available
# in order to check the validity of ha_vrrp_auth_type
instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
self.agent_conf.ha_vrrp_auth_password)
config.add_instance(instance)
def enable_keepalived(self):
self.keepalived_manager.spawn()
def disable_keepalived(self):
if not self.keepalived_manager:
LOG.debug('Error while disabling keepalived for %s - no manager',
self.router_id)
return
self.keepalived_manager.disable()
conf_dir = self.keepalived_manager.get_conf_dir()
shutil.rmtree(conf_dir)
def _get_keepalived_instance(self):
return self.keepalived_manager.config.get_instance(self.ha_vr_id)
def _get_primary_vip(self):
return self._get_keepalived_instance().get_primary_vip()
def get_ha_device_name(self):
return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
def ha_network_added(self):
interface_name = self.get_ha_device_name()
self.driver.plug(self.ha_port['network_id'],
self.ha_port['id'],
interface_name,
self.ha_port['mac_address'],
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX,
mtu=self.ha_port.get('mtu'))
ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs,
namespace=self.ha_namespace,
preserve_ips=[self._get_primary_vip()])
def ha_network_removed(self):
if not self.ha_port:
LOG.debug('Error while removing HA network for %s - no port',
self.router_id)
return
self.driver.unplug(self.get_ha_device_name(),
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX)
self.ha_port = None
def _add_vips(self, port, interface_name):
for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
def _add_vip(self, ip_cidr, interface, scope=None):
instance = self._get_keepalived_instance()
instance.add_vip(ip_cidr, interface, scope)
def _remove_vip(self, ip_cidr):
instance = self._get_keepalived_instance()
instance.remove_vip_by_ip_address(ip_cidr)
def _clear_vips(self, interface):
instance = self._get_keepalived_instance()
instance.remove_vips_vroutes_by_interface(interface)
def _get_cidrs_from_keepalived(self, interface_name):
instance = self._get_keepalived_instance()
return instance.get_existing_vip_ip_addresses(interface_name)
def get_router_cidrs(self, device):
return set(self._get_cidrs_from_keepalived(device.name))
def routes_updated(self, old_routes, new_routes):
instance = self._get_keepalived_instance()
instance.virtual_routes.extra_routes = [
keepalived.KeepalivedVirtualRoute(
route['destination'], route['nexthop'])
for route in new_routes]
super(HaRouter, self).routes_updated(old_routes, new_routes)
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
gateway_ips = self._get_external_gw_ips(ex_gw_port)
default_gw_rts = []
instance = self._get_keepalived_instance()
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version]
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
instance.virtual_routes.extra_subnets = [
keepalived.KeepalivedVirtualRoute(
onlink_route_cidr, None, interface_name, scope='link') for
onlink_route_cidr in onlink_route_cidrs]
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the master should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
it manage IPv4 addresses. If the router is not in the master state,
we must delete the address first as it is autoconfigured by the kernel.
"""
manager = self.keepalived_manager
if manager.get_process().active:
if self.ha_state != 'master':
conf = manager.get_conf_on_disk()
managed_by_keepalived = conf and ipv6_lladdr in conf
if managed_by_keepalived:
return False
else:
return False
return True
def _disable_ipv6_addressing_on_interface(self, interface_name):
"""Disable IPv6 link local addressing on the device and add it as
a VIP to keepalived. This means that the IPv6 link local address
will only be present on the master.
"""
device = ip_lib.IPDevice(interface_name, namespace=self.ha_namespace)
ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
if self._should_delete_ipv6_lladdr(ipv6_lladdr):
self.driver.configure_ipv6_ra(self.ha_namespace, interface_name,
const.ACCEPT_RA_DISABLED)
device.addr.flush(n_consts.IP_VERSION_6)
else:
self.driver.configure_ipv6_ra(self.ha_namespace, interface_name,
const.ACCEPT_RA_WITHOUT_FORWARDING)
self._remove_vip(ipv6_lladdr)
self._add_vip(ipv6_lladdr, interface_name, scope='link')
def _add_gateway_vip(self, ex_gw_port, interface_name):
self._add_vips(ex_gw_port, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
self._add_vip(ip_cidr, interface_name)
return n_consts.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
self._remove_vip(ip_cidr)
to = common_utils.cidr_to_ip(ip_cidr)
if device.addr.list(to=to):
super(HaRouter, self).remove_floating_ip(device, ip_cidr)
def internal_network_updated(self, interface_name, ip_cidrs, mtu):
self.driver.set_mtu(interface_name, mtu, namespace=self.ns_name,
prefix=router.INTERNAL_DEV_PREFIX)
self._clear_vips(interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in ip_cidrs:
self._add_vip(ip_cidr, interface_name)
def _plug_ha_router_port(self, port, name_getter, prefix):
port_id = port['id']
interface_name = name_getter(port_id)
self.driver.plug(port['network_id'],
port_id,
interface_name,
port['mac_address'],
namespace=self.ha_namespace,
prefix=prefix,
mtu=port.get('mtu'))
self._disable_ipv6_addressing_on_interface(interface_name)
self._add_vips(port, interface_name)
def internal_network_added(self, port):
self._plug_ha_router_port(
port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
def _get_state_change_monitor_process_manager(self):
return external_process.ProcessManager(
self.agent_conf,
'%s.monitor' % self.router_id,
self.ha_namespace,
default_cmd_callback=self._get_state_change_monitor_callback())
def _get_state_change_monitor_callback(self):
ha_device = self.get_ha_device_name()
ha_cidr = self._get_primary_vip()
def callback(pid_file):
cmd = [
'neutron-keepalived-state-change',
'--router_id=%s' % self.router_id,
'--namespace=%s' % self.ha_namespace,
'--conf_dir=%s' % self.keepalived_manager.get_conf_dir(),
'--monitor_interface=%s' % ha_device,
'--monitor_cidr=%s' % ha_cidr,
'--pid_file=%s' % pid_file,
'--state_path=%s' % self.agent_conf.state_path,
'--user=%s' % os.geteuid(),
'--group=%s' % os.getegid()]
return cmd
return callback
def spawn_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
pm.enable()
process_monitor.register(
self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
def destroy_state_change_monitor(self, process_monitor):
if not self.ha_port:
LOG.debug('Error while destroying state change monitor for %s - '
'no port', self.router_id)
return
pm = self._get_state_change_monitor_process_manager()
process_monitor.unregister(
self.router_id, IP_MONITOR_PROCESS_SERVICE)
pm.disable(sig=str(int(signal.SIGTERM)))
try:
common_utils.wait_until_true(lambda: not pm.active,
timeout=SIGTERM_TIMEOUT)
except common_utils.WaitTimeout:
pm.disable(sig=str(int(signal.SIGKILL)))
def update_initial_state(self, callback):
addresses = ip_lib.get_devices_with_ip(self.ha_namespace,
name=self.get_ha_device_name())
cidrs = (address['cidr'] for address in addresses)
ha_cidr = self._get_primary_vip()
state = 'master' if ha_cidr in cidrs else 'backup'
self.ha_state = state
callback(self.router_id, state)
@staticmethod
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return {k: v for k, v in d.items() if k not in ignore}
keys_to_ignore = set([portbindings.HOST_ID, timestamp.UPDATED,
revisions.REVISION])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
# Enable RA and IPv6 forwarding only for master instances. This will
# prevent backup routers from sending packets to the upstream switch
# and disrupt connections.
enable = self.ha_state == 'master'
self._configure_ipv6_params_on_gw(ex_gw_port, self.ns_name,
interface_name, enable)
def external_gateway_updated(self, ex_gw_port, interface_name):
self._plug_external_gateway(
ex_gw_port, interface_name, self.ha_namespace)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
for old_gateway_cidr in ip_cidrs:
self._remove_vip(old_gateway_cidr)
self._add_gateway_vip(ex_gw_port, interface_name)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._clear_vips(interface_name)
if self.ha_state == 'master':
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
else:
# We are not the master node, so no need to delete ip addresses.
self.driver.unplug(interface_name,
namespace=self.ns_name,
prefix=router.EXTERNAL_DEV_PREFIX)
def delete(self):
if self.process_monitor:
self.destroy_state_change_monitor(self.process_monitor)
self.disable_keepalived()
self.ha_network_removed()
super(HaRouter, self).delete()
def process(self):
super(HaRouter, self).process()
self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if (self.ha_port and
self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE):
self.enable_keepalived()
@runtime.synchronized('enable_radvd')
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'master'):
super(HaRouter, self).enable_radvd(internal_ports)
|
the-stack_106_24858
|
import concurrent.futures
import os
import gzip
import pickle
import os.path as osp
import random
import time
import pydicom
import numpy as np
from pylinac import image
from scipy.misc import imresize
from sklearn import svm, metrics, preprocessing, model_selection
def is_dicom(path):
"""Whether the file is a readable DICOM file via pydicom."""
try:
ds = pydicom.dcmread(path, force=True)
ds.pixel_array
return True
except:
return False
def drop_non_dicom(folder, use_pool=True):
"""Remove all files within a folder that are not DICOM images. Space-saving utility function."""
print("Dropping non-DICOM files...")
if not use_pool:
for pdir, _, files in os.walk(folder):
for file in files:
file = osp.join(pdir, file)
if not is_dicom(file):
os.remove(file)
print("Deleting", file)
else:
futures = {}
start = time.time()
with concurrent.futures.ProcessPoolExecutor() as exec:
for pdir, _, files in os.walk(folder):
for file in files:
filepath = osp.join(pdir, file)
future = exec.submit(is_dicom, filepath)
futures[future] = filepath
print("Queued {} file identifications".format(len(futures)))
# filepaths = []
for idx, future in enumerate(concurrent.futures.as_completed(futures)):
if not future.result():
os.remove(futures[future])
# filepaths.append(futures[future])
print("Done identifying files in {} in {:.2f}s".format(osp.basename(folder), time.time() - start))
def get_files(folder, func, use_pool=False, randomize=False, recursive=True):
"""Get a list of files that are valid images from the folder."""
if not osp.isdir(folder):
raise NotADirectoryError("{} is not a directory".format(folder))
print("Grabbing file names...")
# get filenames
all_files = []
if recursive:
for pdir, _, files in os.walk(folder):
for file in files:
filepath = osp.join(pdir, file)
all_files.append(filepath)
else:
files = os.listdir(folder)
for file in files:
filepath = osp.join(folder, file)
if osp.isfile(filepath):
all_files.append(filepath)
if not use_pool:
filepaths = []
for file in all_files:
if func(file):
filepaths.append(file)
else:
futures = {}
start = time.time()
with concurrent.futures.ProcessPoolExecutor() as exec:
for pdir, _, files in os.walk(folder):
for file in files:
filepath = osp.join(pdir, file)
future = exec.submit(func, filepath)
futures[future] = filepath
print("Queued {} file identifications".format(len(futures)))
filepaths = []
for idx, future in enumerate(concurrent.futures.as_completed(futures)):
if future.result():
filepaths.append(futures[future])
print("Done identifying files in {} in {:.2f}s".format(osp.basename(folder), time.time() - start))
if randomize:
random.shuffle(filepaths)
return filepaths
def load_images(path):
"""Load the built images for training."""
imgs = get_files(path, lambda x: 'images' in x, recursive=False)
img_arr = np.vstack([np.load(f) for f in imgs])
labels = get_files(path, lambda x: 'labels' in x, recursive=False)
labels_arr = np.concatenate([np.load(f) for f in labels])
return img_arr, labels_arr
def process_image(path):
"""Load and resize the images and return as flattened numpy array"""
img = image.load(path, dtype=np.float32)
resized_img = imresize(img.array, size=(100, 100), mode='F').flatten()
rescaled_img = preprocessing.minmax_scale(resized_img)
return rescaled_img
def train(path, train_size, parameters, clf_name):
"""Train an SVM classifier on a set of labeled images.
Parameters
----------
path : str
Path to the folder containing the images and labels as numpy array files.
train_size : float
Training size proportion of input images. Must be between 0 and 1.
parameters : dict
Set of parameters to pass to the SMV grid search algorithm.
clf_name : str
Prefix name of classifier; e.g. 'vmat', 'cbct'.
"""
data, labels = load_images(path)
data_train, data_test, y_train, y_test = model_selection.train_test_split(data, labels, train_size=train_size)
start = time.time()
classifier = model_selection.GridSearchCV(svm.SVC(verbose=True), parameters)
classifier.fit(data_train, y_train)
print()
print("Training took: {:.2f}s".format(time.time() - start))
for params, mean_score, scores in classifier.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print(classifier.best_estimator_)
print("Best parameters found:")
print(classifier.best_params_)
print("With a training score of:")
print(classifier.best_score_)
print()
print("Test data classification report:")
print(metrics.classification_report(y_test, classifier.predict(data_test)))
with gzip.open(clf_name + '_classifier.pkl.gz', mode='wb') as m:
pickle.dump(classifier, m)
def strip(folder, classifier_prefix, correct_prediction, correct_names=None, incorrect_names=None, drop_non_dicoms=False):
"""Strip a folder of non-DICOM files and of image files that are not of the predicted classification."""
if drop_non_dicoms:
drop_non_dicom(folder)
filepaths = get_files(folder, lambda x: True, randomize=False)
# load classifier
folder = osp.join(osp.dirname(__file__), classifier_prefix)
with gzip.open(osp.join(folder, classifier_prefix + '_classifier.pkl.gz'), mode='rb') as m:
clf = pickle.load(m)
files2delete = []
for file in filepaths:
incorrect = True
if incorrect_names is not None:
for name in incorrect_names:
if name in osp.basename(file).lower():
files2delete.append(file)
incorrect = False
break
if incorrect:
classify = True
if correct_names is not None:
for name in correct_names:
if name in osp.basename(file).lower():
classify = False
break
if classify:
img = process_image(file)
prediction = clf.predict(img.reshape(1, -1))
print("Prediction {} for file: {}".format(prediction, file))
time.sleep(0.3)
if prediction not in correct_prediction:
files2delete.append(file)
for file in files2delete:
os.remove(file)
print("Done stripping")
if __name__ == '__main__':
pass
path = r'C:\Users\James\Dropbox\Programming\Python\Projects\pylinac test files\Picket Fences'
drop_non_dicom(path)
|
the-stack_106_24859
|
from setuptools import setup, find_packages
version = "0.1"
setup(
name = "pyhdwallet",
version = version,
packages = find_packages(),
package_data = {
'': ['_bip32/data/*.db'],
},
#data_files=[('pyhdwallet/_bip32/data/bip32version.db', 'pyhdwallet/_bip32/data')],
install_requires = [
"mnemonic",
"ecdsa"],
zip_safe=False,
platforms="any",
python_requires=">=3.5",
author = "kcorlidy Chan",
author_email = "[email protected]",
url = "https://github.com/kcorlidy/pyhdwallet",
license = "http://opensource.org/licenses/MIT",
description = "Hierarchical Deterministic (HD) key creation tools",
long_description = ""
)
|
the-stack_106_24861
|
from setuptools import setup, find_packages
from rost.__version__ import VERSION
NAME = 'rost'
DESCRIPTION = 'A simple static site generator based on Jinja2 with a CLI build using Click.'
KEYWORDS = 'HTML, Jinja2, Click'
URL = 'https://github.com/Robert-96/rost'
EMAIL = '[email protected]'
AUTHOR = 'Robert-96'
REQUIRES_PYTHON = '>=3.4.0'
LICENSE = 'MIT'
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/Robert-96/rost/issues',
'Documentation': 'https://github.com/Robert-96/rost/blob/main/README.md',
'Source': 'https://github.com/Robert-96/rost'
}
with open('requirements.txt') as f:
REQUIRED = f.read().splitlines()
with open('README.md') as f:
README = f.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type='text/markdown',
license=LICENSE,
url=URL,
project_urls=PROJECT_URLS,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
setup_requires=REQUIRED,
install_requires=REQUIRED,
packages=find_packages(exclude=['tests']),
entry_points='''
[console_scripts]
rost=rost.cli:cli
''',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Text Processing',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: Markdown',
'Topic :: Utilities'
],
keywords=KEYWORDS,
)
|
the-stack_106_24862
|
from mmdet.apis import init_detector, inference_detector
config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
device = 'cuda:0'
# init a detector
model = init_detector(config_file, device=device)
# inference the demo image
inference_detector(model, 'demo/demo.jpg')
|
the-stack_106_24863
|
# -*- coding: utf-8 -*-
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent.absolute()))
# -- Project information -----------------------------------------------------
project = "BioPAL"
author = "BioPAL team"
copyright = "2021, BioPAL team"
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.autosectionlabel",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"nbsphinx",
]
autosummary_generate = True
exclude_patterns = ["_build", "**.ipynb_checkpoints", "legacy"]
# -- HTML options ------------------------------------------------------------
html_logo = "_static/logo.png"
html_static_path = ["_static"]
html_theme = "furo"
|
the-stack_106_24864
|
__author__ = 'Calvin'
try:
from builtins import range
except ImportError:
range = xrange
from pyperform import *
class SomeClass(object): #!
def __init__(self, n):
self.n = n
self.count = 0
if n > 0:
self.a = SomeClass(n-1)
def func(self):
self.count += 1
return sum(range(10))
@BenchmarkedClass()
class MyClass(object):
def __init__(self):
self.obj = SomeClass(5) # setup an object with some nested lookup
self.g = self.generator() # setup the generator in advance
@ComparisonBenchmark('gen', classname='MyClass', validation=True)
def call_generator(self):
for i in self.g: # Call the generator which calls the function 100 times (like not_generator)
pass
return self.obj.a.a.a.a.count
def generator(self):
func = self.obj.a.a.a.a.func
for i in range(100):
func()
yield i
@ComparisonBenchmark('gen', classname='MyClass', validation=True)
def not_generator(self):
func = self.obj.a.a.a.a.func
for i in range(100):
func()
return self.obj.a.a.a.a.count
if __name__ == '__main__':
# c = MyClass()
# c.call_generator()
with open('report.txt', 'w') as _f:
ComparisonBenchmark.summarize('gen', _f, include_source=1)
|
the-stack_106_24865
|
from utility import *
"""This class deals only in gene strings, not Flower objects"""
class BreedingResult:
# lists probabilities of each gene pair outcome
combinations = {
'00':[("0", 1.0)],
'01':[("0", 0.5), ("1", 0.5)],
'02':[("1", 1.0)],
'11':[("0", 0.25), ("1", 0.5), ("2", 0.25)],
'12':[("1", 0.5), ("2", 0.5)],
'22':[("2", 1.0)]
}
# stores the offspring of every possible flower combination
all = []
# each instance of BreedingResult calculates potential children of two specific genes
def __init__(self, f1, f2):
self.f1 = f1
self.f2 = f2
# generates list of keys for accessing combinations
self.keys = [''.join(sorted(i)) for i in zip(f1, f2)]
# create list of children and spawn probabilities
self.children = []
self.offspring(0, "", 1)
# recursively finds children & probs by looping through dict entries listed in self.keys
def offspring(self, index, string, prob):
if index == len(self.keys):
for i in BreedingResult.combinations[self.keys[index-1]]:
self.children.append({'genes':string, 'prob':prob})
return True
else:
for i in BreedingResult.combinations[self.keys[index]]:
self.offspring(index+1, string+i[0], prob*i[1])
# generates the offspring of every possible flower combination
@classmethod
def generateAll(cls, species):
# account for roses having an extra gene
if species == "rose":
genes = [pad(ternary(i, 10), 4) for i in range(81)]
else:
genes = [pad(ternary(i, 10), 3) for i in range(27)]
for i in range(len(genes)):
for j in range(i, len(genes)):
cls.all.append(BreedingResult(genes[i], genes[j]))
# clear 'all' list if changing species is required (only to and from rose)
@classmethod
def clearAll(cls):
cls.all = []
|
the-stack_106_24866
|
# pylint: disable=invalid-name,line-too-long,too-many-locals,too-many-arguments,too-many-branches,too-many-statements,stop-iteration-return
import os
import math
import glob
import typing
import random
import zipfile
import string
import itertools
import cv2
import tqdm
import numpy as np
import essential_generators
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
import fontTools.ttLib
from . import tools
LIGATURES = {"\U0000FB01": "fi", "\U0000FB02": "fl"}
LIGATURE_STRING = "".join(LIGATURES.keys())
def get_rotation_matrix(width, height, thetaX=0, thetaY=0, thetaZ=0):
"""Provide a rotation matrix about the center of a rectangle with
a given width and height.
Args:
width: The width of the rectangle
height: The height of the rectangle
thetaX: Rotation about the X axis
thetaY: Rotation about the Y axis
thetaZ: Rotation about the Z axis
Returns:
A 3x3 transformation matrix
"""
translate1 = np.array([[1, 0, width / 2], [0, 1, height / 2], [0, 0, 1]])
rotX = np.array(
[
[1, 0, 0],
[0, np.cos(thetaX), -np.sin(thetaX)],
[0, np.sin(thetaX), np.cos(thetaX)],
]
)
rotY = np.array(
[
[np.cos(thetaY), 0, np.sin(thetaY)],
[0, 1, 0],
[-np.sin(thetaY), 0, np.cos(thetaY)],
]
)
rotZ = np.array(
[
[np.cos(thetaZ), -np.sin(thetaZ), 0],
[np.sin(thetaZ), np.cos(thetaZ), 0],
[0, 0, 1],
]
)
translate2 = np.array([[1, 0, -width / 2], [0, 1, -height / 2], [0, 0, 1]])
M = translate1.dot(rotX).dot(rotY).dot(rotZ).dot(translate2)
return M
def get_maximum_uniform_contour(image, fontsize, margin=0):
"""Get the largest possible contour of light or
dark area in an image.
Args:
image: The image in which to find a contiguous area.
fontsize: The fontsize for text. Will be used for blurring
and for determining useful areas.
margin: The minimum margin required around the image.
Returns:
A (contour, isDark) tuple. If no contour is found, both
entries will be None.
"""
if margin > 0:
image = image[margin:-margin, margin:-margin]
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blurred = cv2.blur(src=gray, ksize=(fontsize // 2, fontsize // 2))
_, threshold = cv2.threshold(
src=blurred, thresh=255 / 2, maxval=255, type=cv2.THRESH_BINARY
)
contoursDark = cv2.findContours(
255 - threshold, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE
)[-2]
contoursLight = cv2.findContours(
threshold, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE
)[-2]
areasDark = list(map(cv2.contourArea, contoursDark))
areasLight = list(map(cv2.contourArea, contoursLight))
maxDarkArea = max(areasDark) if areasDark else 0
maxLightArea = max(areasLight) if areasLight else 0
if max(maxDarkArea, maxLightArea) < (4 * fontsize) ** 2:
return None, None
contour = None
isDark = None
if areasDark and (not areasLight or maxDarkArea >= maxLightArea):
contour = contoursDark[np.argmax(areasDark)]
isDark = True
else:
contour = contoursLight[np.argmax(areasLight)]
isDark = False
if contour is not None:
contour += margin
return contour, isDark
def font_supports_alphabet(filepath, alphabet):
"""Verify that a font contains a specific set of characters.
Args:
filepath: Path to fsontfile
alphabet: A string of characters to check for.
"""
if alphabet == "":
return True
font = fontTools.ttLib.TTFont(filepath)
if not all(
any(ord(c) in table.cmap.keys() for table in font["cmap"].tables)
for c in alphabet
):
return False
font = PIL.ImageFont.truetype(filepath)
try:
for character in alphabet:
font.getsize(character)
# pylint: disable=bare-except
except:
return False
return True
def get_text_generator(alphabet=None, lowercase=False, max_string_length=None):
"""Generates strings of sentences using only the letters in alphabet.
Args:
alphabet: The alphabet of permitted characters
lowercase: Whether to convert all strings to lowercase.
max_string_length: The maximum length of the string
"""
gen = essential_generators.DocumentGenerator()
while True:
sentence = gen.sentence()
if lowercase:
sentence = sentence.lower()
sentence = "".join([s for s in sentence if (alphabet is None or s in alphabet)])
if max_string_length is not None:
sentence = sentence[:max_string_length]
yield sentence
def _strip_line(line):
"""Modify a line so that spaces are excluded."""
first_character_index = next(
(
index
for index, (box, character) in enumerate(line)
if not character.isspace()
),
None,
)
if first_character_index is None:
return []
last_character_index = len(line) - next(
index
for index, (box, character) in enumerate(reversed(line))
if not character.isspace()
)
return line[first_character_index:last_character_index]
def _strip_lines(lines):
"""Modify a set of lines so that spaces are excluded."""
lines = [line for line in lines if len(line) > 0]
lines = [_strip_line(line) for line in lines]
lines = [line for line in lines if len(line) > 0]
return lines
def get_backgrounds(cache_dir=None):
"""Download a set of pre-reviewed backgrounds.
Args:
cache_dir: Where to save the dataset. By default, data will be
saved to ~/.keras-ocr.
Returns:
A list of background filepaths.
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join("~", ".keras-ocr"))
backgrounds_dir = os.path.join(cache_dir, "backgrounds")
backgrounds_zip_path = tools.download_and_verify(
url="https://github.com/faustomorales/keras-ocr/releases/download/v0.8.4/backgrounds.zip",
sha256="f263ed0d55de303185cc0f93e9fcb0b13104d68ed71af7aaaa8e8c91389db471",
filename="backgrounds.zip",
cache_dir=cache_dir,
)
if len(glob.glob(os.path.join(backgrounds_dir, "*"))) != 1035:
with zipfile.ZipFile(backgrounds_zip_path) as zfile:
zfile.extractall(backgrounds_dir)
return glob.glob(os.path.join(backgrounds_dir, "*.jpg"))
def get_fonts(
cache_dir=None,
alphabet=string.ascii_letters + string.digits,
exclude_smallcaps=False,
):
"""Download a set of pre-reviewed fonts.
Args:
cache_dir: Where to save the dataset. By default, data will be
saved to ~/.keras-ocr.
alphabet: An alphabet which we will use to exclude fonts
that are missing relevant characters. By default, this is
set to `string.ascii_letters + string.digits`.
exclude_smallcaps: If True, fonts that are known to use
the same glyph for lowercase and uppercase characters
are excluded.
Returns:
A list of font filepaths.
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join("~", ".keras-ocr"))
fonts_zip_path = tools.download_and_verify(
url="https://github.com/faustomorales/keras-ocr/releases/download/v0.8.4/fonts.zip",
sha256="d4d90c27a9bc4bf8fff1d2c0a00cfb174c7d5d10f60ed29d5f149ef04d45b700",
filename="fonts.zip",
cache_dir=cache_dir,
)
fonts_dir = os.path.join(cache_dir, "fonts")
if len(glob.glob(os.path.join(fonts_dir, "**/*.ttf"))) != 2746:
print("Unzipping fonts ZIP file.")
with zipfile.ZipFile(fonts_zip_path) as zfile:
zfile.extractall(fonts_dir)
font_filepaths = glob.glob(os.path.join(fonts_dir, "**/*.ttf"))
if exclude_smallcaps:
with open(
tools.download_and_verify(
url="https://github.com/faustomorales/keras-ocr/releases/download/v0.8.4/fonts_smallcaps.txt",
sha256="6531c700523c687f02852087530d1ab3c7cc0b59891bbecc77726fbb0aabe68e",
filename="fonts_smallcaps.txt",
cache_dir=cache_dir,
),
"r",
) as f:
smallcaps_fonts = f.read().split("\n")
font_filepaths = [
filepath
for filepath in font_filepaths
if os.path.join(*filepath.split(os.sep)[-2:]) not in smallcaps_fonts
]
if alphabet != "":
font_filepaths = [
filepath
for filepath in tqdm.tqdm(font_filepaths, desc="Filtering fonts.")
if font_supports_alphabet(filepath=filepath, alphabet=alphabet)
]
return font_filepaths
def convert_lines_to_paragraph(lines):
"""Convert a series of lines, each consisting of
(box, character) tuples, into a multi-line string."""
return "\n".join(["".join([c[-1] for c in line]) for line in lines])
def convert_image_generator_to_recognizer_input(
image_generator, max_string_length, target_width, target_height, margin=0
):
"""Convert an image generator created by get_image_generator
to (image, sentence) tuples for training a recognizer.
Args:
image_generator: An image generator created by get_image_generator
max_string_length: The maximum string length to allow
target_width: The width to warp lines into
target_height: The height to warp lines into
margin: The margin to apply around a single line.
"""
while True:
image, lines = next(image_generator)
if len(lines) == 0:
continue
for line in lines:
line = _strip_line(line[:max_string_length])
if not line:
continue
box, sentence = tools.combine_line(line)
# remove multiple sequential spaces
while " " in sentence:
sentence = sentence.replace(" ", " ")
crop = tools.warpBox(
image=image,
box=box,
target_width=target_width,
target_height=target_height,
margin=margin,
skip_rotate=True,
)
yield crop, sentence
def draw_text_image(
text,
fontsize,
height,
width,
fonts,
use_ligatures=False,
thetaX=0,
thetaY=0,
thetaZ=0,
color=(0, 0, 0),
permitted_contour=None,
draw_contour=False,
):
"""Get a transparent image containing text.
Args:
text: The text to draw on the image
fontsize: The size of text to show.
height: The height of the output image
width: The width of the output image
fonts: A dictionary of {subalphabet: paths_to_font}
thetaX: Rotation about the X axis
thetaY: Rotation about the Y axis
thetaZ: Rotation about the Z axis
color: The color of drawn text
permitted_contour: A contour defining which part of the image
we can put text. If None, the entire canvas is permitted
for text.
use_ligatures: Whether to render ligatures. If True,
ligatures are always used (with an initial check for support
which sometimes yields false positives). If False, ligatures
are never used.
Returns:
An (image, lines) tuple where image is the
transparent text image and lines is a list of lines
where each line itself is a list of (box, character) tuples and
box is an array of points with shape (4, 2) providing the coordinates
of the character box in clockwise order starting from the top left.
"""
if not use_ligatures:
fonts = {
subalphabet: PIL.ImageFont.truetype(font_path, size=fontsize)
if font_path is not None
else PIL.ImageFont.load_default()
for subalphabet, font_path in fonts.items()
}
if use_ligatures:
for subalphabet, font_path in fonts.items():
ligatures_supported = True
font = (
PIL.ImageFont.truetype(font_path, size=fontsize)
if font_path is not None
else PIL.ImageFont.load_default()
)
for ligature in LIGATURES:
try:
font.getsize(ligature)
except UnicodeEncodeError:
ligatures_supported = False
break
if ligatures_supported:
del fonts[subalphabet]
subalphabet += LIGATURE_STRING
fonts[subalphabet] = font
for insert, search in LIGATURES.items():
for subalphabet in fonts.keys()():
if insert in subalphabet:
text = text.replace(search, insert)
character_font_pairs = [
(
character,
next(
font for subalphabet, font in fonts.items() if character in subalphabet
),
)
for character in text
]
M = get_rotation_matrix(
width=width, height=height, thetaZ=thetaZ, thetaX=thetaX, thetaY=thetaY
)
if permitted_contour is None:
permitted_contour = np.array(
[[0, 0], [width, 0], [width, height], [0, height]]
).astype("float32")
character_sizes = np.array(
[font.font.getsize(character) for character, font in character_font_pairs]
)
min_character_size = character_sizes.sum(axis=1).min()
transformed_contour = compute_transformed_contour(
width=width,
height=height,
fontsize=max(min_character_size, 1),
M=M,
contour=permitted_contour,
)
start_x = transformed_contour[:, 0].min()
start_y = transformed_contour[:, 1].min()
end_x = transformed_contour[:, 0].max()
end_y = transformed_contour[:, 1].max()
image = PIL.Image.new(mode="RGBA", size=(width, height), color=(255, 255, 255, 0))
draw = PIL.ImageDraw.Draw(image)
lines_raw: typing.List[typing.List[typing.Tuple[np.ndarray, str]]] = [[]]
x = start_x
y = start_y
max_y = start_y
out_of_space = False
for character_index, (character, font) in enumerate(character_font_pairs):
if out_of_space:
break
(character_width, character_height), (offset_x, offset_y) = character_sizes[
character_index
]
if character in LIGATURES:
subcharacters = LIGATURES[character]
dx = character_width / len(subcharacters)
else:
subcharacters = character
dx = character_width
x2, y2 = (x + character_width + offset_x, y + character_height + offset_y)
while not all(
cv2.pointPolygonTest(contour=transformed_contour, pt=pt, measureDist=False)
>= 0
for pt in [(x, y), (x2, y), (x2, y2), (x, y2)]
):
if x2 > end_x:
dy = max(1, max_y - y)
if y + dy > end_y:
out_of_space = True
break
y += dy
x = start_x
else:
x += fontsize
if len(lines_raw[-1]) > 0:
# We add a new line whether we have advanced
# in the y-direction or not because we also want to separate
# horizontal segments of text.
lines_raw.append([])
x2, y2 = (x + character_width + offset_x, y + character_height + offset_y)
if out_of_space:
break
max_y = max(y + character_height + offset_y, max_y)
draw.text(xy=(x, y), text=character, fill=color + (255,), font=font)
for subcharacter in subcharacters:
lines_raw[-1].append(
(
np.array(
[
[x + offset_x, y + offset_y],
[x + dx + offset_x, y + offset_y],
[x + dx + offset_x, y2],
[x + offset_x, y2],
]
).astype("float32"),
subcharacter,
)
)
x += dx
image = cv2.warpPerspective(src=np.array(image), M=M, dsize=(width, height))
if draw_contour:
image = cv2.drawContours(
image,
contours=[permitted_contour.reshape((-1, 1, 2)).astype("int32")],
contourIdx=0,
color=(255, 0, 0, 255),
thickness=int(width / 100),
)
lines_stripped = _strip_lines(lines_raw)
lines_transformed = [
[
(cv2.perspectiveTransform(src=coords[np.newaxis], m=M)[0], character)
for coords, character in line
]
for line in lines_stripped
]
return image, lines_transformed
def compute_transformed_contour(width, height, fontsize, M, contour, minarea=0.5):
"""Compute the permitted drawing contour
on a padded canvas for an image of a given size.
We assume the canvas is padded with one full image width
and height on left and right, top and bottom respectively.
Args:
width: Width of image
height: Height of image
fontsize: Size of characters
M: The transformation matrix
contour: The contour to which we are limited inside
the rectangle of size width / height
minarea: The minimum area required for a character
slot to qualify as being visible, expressed as
a fraction of the untransformed fontsize x fontsize
slot.
"""
spacing = math.ceil(fontsize / 2)
xslots = int(np.floor(width / spacing))
yslots = int(np.floor(height / spacing))
ys, xs = np.mgrid[:yslots, :xslots]
basis = np.concatenate([xs[..., np.newaxis], ys[..., np.newaxis]], axis=-1).reshape(
(-1, 2)
)
basis *= spacing
slots_pretransform = np.concatenate(
[
(basis + offset)[:, np.newaxis, :]
for offset in [[0, 0], [spacing, 0], [spacing, spacing], [0, spacing]]
],
axis=1,
)
slots = cv2.perspectiveTransform(
src=slots_pretransform.reshape((1, -1, 2)).astype("float32"), m=M
)[0]
inside = (
np.array(
[
cv2.pointPolygonTest(contour=contour, pt=(x, y), measureDist=False) >= 0
for x, y in slots
]
)
.reshape(-1, 4)
.all(axis=1)
)
slots = slots.reshape(-1, 4, 2)
areas = (
np.abs(
(slots[:, 0, 0] * slots[:, 1, 1] - slots[:, 0, 1] * slots[:, 1, 0])
+ (slots[:, 1, 0] * slots[:, 2, 1] - slots[:, 1, 1] * slots[:, 2, 0])
+ (slots[:, 2, 0] * slots[:, 3, 1] - slots[:, 2, 1] * slots[:, 3, 0])
+ (slots[:, 3, 0] * slots[:, 0, 1] - slots[:, 3, 1] * slots[:, 0, 0])
)
/ 2
)
slots_filtered = slots_pretransform[(areas > minarea * spacing * spacing) & inside]
temporary_image = cv2.drawContours(
image=np.zeros((height, width), dtype="uint8"),
contours=slots_filtered,
contourIdx=-1,
color=255,
)
temporary_image = cv2.dilate(
src=temporary_image, kernel=np.ones((spacing, spacing))
)
newContours, _ = cv2.findContours(
temporary_image, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE
)
x, y = slots_filtered[0][0]
contour = newContours[
next(
index
for index, contour in enumerate(newContours)
if cv2.pointPolygonTest(contour=contour, pt=(x, y), measureDist=False) >= 0
)
][:, 0, :]
return contour
def get_image_generator(
height,
width,
font_groups,
text_generator,
font_size: typing.Union[int, typing.Tuple[int, int]] = 18,
backgrounds: typing.List[typing.Union[str, np.ndarray]] = None,
background_crop_mode="crop",
rotationX: typing.Union[int, typing.Tuple[int, int]] = 0,
rotationY: typing.Union[int, typing.Tuple[int, int]] = 0,
rotationZ: typing.Union[int, typing.Tuple[int, int]] = 0,
margin=0,
use_ligatures=False,
augmenter=None,
draw_contour=False,
draw_contour_text=False,
):
"""Create a generator for images containing text.
Args:
height: The height of the generated image
width: The width of the generated image.
font_groups: A dict mapping of { subalphabet: [path_to_font1, path_to_font2] }.
text_generator: See get_text_generator
font_size: The font size to use. Alternative, supply a tuple
and the font size will be randomly selected between
the two values.
backgrounds: A list of paths to image backgrounds or actual images
as numpy arrays with channels in RGB order.
background_crop_mode: One of letterbox or crop, indicates
how backgrounds will be resized to fit on the canvas.
rotationX: The X-axis text rotation to use. Alternative, supply a tuple
and the rotation will be randomly selected between
the two values.
rotationY: The Y-axis text rotation to use. Alternative, supply a tuple
and the rotation will be randomly selected between
the two values.
rotationZ: The Z-axis text rotation to use. Alternative, supply a tuple
and the rotation will be randomly selected between
the two values.
margin: The minimum margin around the edge of the image.
use_ligatures: Whether to render ligatures (see `draw_text_image`)
augmenter: An image augmenter to be applied to backgrounds
draw_contour: Draw the permitted contour onto images (debugging only)
draw_contour_text: Draw the permitted contour inside the text
drawing function.
Yields:
Tuples of (image, lines) where image is the
transparent text image and lines is a list of lines
where each line itself is a list of (box, character) tuples and
box is an array of points with shape (4, 2) providing the coordinates
of the character box in clockwise order starting from the top left.
"""
if backgrounds is None:
backgrounds = [np.zeros((height, width, 3), dtype="uint8")]
alphabet = "".join(font_groups.keys())
assert len(set(alphabet)) == len(
alphabet
), "Each character can appear in the subalphabet for only one font group."
for text, background_index, current_font_groups in zip(
text_generator,
itertools.cycle(range(len(backgrounds))),
zip(
*[
itertools.cycle(
[
(subalphabet, font_filepath)
for font_filepath in font_group_filepaths
]
)
for subalphabet, font_group_filepaths in font_groups.items()
]
),
):
if background_index == 0:
random.shuffle(backgrounds)
current_font_groups = dict(current_font_groups)
current_font_size = (
np.random.randint(low=font_size[0], high=font_size[1])
if isinstance(font_size, tuple)
else font_size
)
current_rotation_X, current_rotation_Y, current_rotation_Z = [
(
np.random.uniform(low=rotation[0], high=rotation[1])
if isinstance(rotation, tuple)
else rotation
)
* np.pi
/ 180
for rotation in [rotationX, rotationY, rotationZ]
]
current_background_filepath_or_array = backgrounds[background_index]
current_background = (
tools.read(current_background_filepath_or_array)
if isinstance(current_background_filepath_or_array, str)
else current_background_filepath_or_array
)
if augmenter is not None:
current_background = augmenter(images=[current_background])[0]
if (
current_background.shape[0] != height
or current_background.shape[1] != width
):
current_background = tools.fit(
current_background,
width=width,
height=height,
mode=background_crop_mode,
)
permitted_contour, isDark = get_maximum_uniform_contour(
image=current_background, fontsize=current_font_size, margin=margin
)
if permitted_contour is None:
# We can't draw on this background. Boo!
continue
random_color_values = np.random.randint(low=0, high=50, size=3)
text_color = (
tuple(np.array([255, 255, 255]) - random_color_values)
if isDark
else tuple(random_color_values)
)
text_image, lines = draw_text_image(
text=text,
width=width,
height=height,
fontsize=current_font_size,
fonts=current_font_groups,
thetaX=current_rotation_X,
thetaY=current_rotation_Y,
thetaZ=current_rotation_Z,
use_ligatures=use_ligatures,
permitted_contour=permitted_contour,
color=text_color,
draw_contour=draw_contour_text,
)
alpha = text_image[..., -1:].astype("float32") / 255
image = (alpha * text_image[..., :3] + (1 - alpha) * current_background).astype(
"uint8"
)
if draw_contour:
image = cv2.drawContours(
image,
contours=[permitted_contour.reshape((-1, 1, 2)).astype("int32")],
contourIdx=0,
color=(255, 0, 0),
thickness=int(width / 100),
)
yield image, lines
|
the-stack_106_24867
|
#!/usr/bin/env python
# coding: utf-8
# In[83]:
import itertools
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
import scipy.optimize as opt
import pylab as pl
import scipy.optimize as opt
from sklearn.model_selection import train_test_split
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# In[86]:
df = pd.read_csv("C:/Users/Mohsen/Desktop/Collision Reference No.csv")
df1=df.head(5)
print(df1)
X =df[['Day of Collision','Month of Collision','Hour of Collision (24 hour)','Carriageway Type','Speed Limit','Junction Detail','Junction Control','Pedestrian Crossing – Human Control']]
# In[77]:
Y = df['Collision Severity'].values
# In[38]:
X =preprocessing.StandardScaler().fit(X).transform(X.astype(float))
# In[39]:
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2, random_state=4)
print ('Train set:', X_train.shape, Y_train.shape)
print ('Test set:', X_test.shape, Y_test.shape)
# In[40]:
#K-Nearest Neighbors
from sklearn.neighbors import KNeighborsClassifier
# In[41]:
k = 4
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,Y_train)
neigh
# In[42]:
Yhat = neigh.predict(X_test)
Yhat[0:5]
# In[34]:
from sklearn import metrics
print("Train set Accuracy: ", metrics.accuracy_score(Y_train, neigh.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(Y_test, Yhat))
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,Y_train)
Yhat=neigh.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(Y_test, Yhat)
std_acc[n-1]=np.std(Yhat==Y_test)/np.sqrt(Yhat.shape[0])
mean_acc
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Nabors (K)')
plt.tight_layout()
plt.show()
# In[43]:
drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
drugTree # it shows the default parameters
# In[44]:
drugTree.fit(X_train,Y_train)
predTree = drugTree.predict(X_test)
# In[46]:
from sklearn import metrics
print("DecisionTrees's Accuracy: ", metrics.accuracy_score(Y_test, predTree))
# In[47]:
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,Y_train)
LR
# In[48]:
Yhat = LR.predict(X_test)
Yhat
# In[49]:
Yhat_prob = LR.predict_proba(X_test)
Yhat_prob
# In[50]:
from sklearn.metrics import log_loss
log_loss(Y_test, Yhat_prob)
# In[51]:
score = LR.score(X_test, Y_test)
print(score)
# In[56]:
from sklearn import svm
clf = svm.SVC(kernel='rbf')
clf.fit(X_train, Y_train)
# In[58]:
Yhat = clf.predict(X_test)
Yhat [0:5]
# In[59]:
from sklearn.metrics import classification_report, confusion_matrix
import itertools
# In[60]:
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[62]:
# Compute confusion matrix
cnf_matrix = confusion_matrix(Y_test, Yhat, labels=[2,4])
np.set_printoptions(precision=2)
print (classification_report(Y_test, Yhat))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')
# In[74]:
from sklearn.metrics import f1_score
f1_score(Y_test, Yhat, average='weighted')
# In[ ]:
# In[66]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
the-stack_106_24868
|
class TasksCli:
def __init__(self, git, tasks):
self.git = git
self.tasks = tasks
def add_subparser(self, subparsers):
task_parser = subparsers.add_parser('task', help="Remember tasks for a given branch")
task_parser.add_argument("add", nargs="?", type=str, default=None, help="Assign a task to current work branch")
task_parser.add_argument("-l", "--list", action="store_true", help="List tasks assigned to current work branch")
task_parser.add_argument("--all", action="store_true", help="Print all open tasks for all branches")
task_parser.add_argument("-d", "--done", nargs=1, type=int, help="Set task done by id")
task_parser.add_argument("--donelist", action="store_true", help="Show done items")
task_parser.add_argument("-r", "--remove", type=int, help="Remove task by id")
task_parser.add_argument("-m", nargs=2, type=int, help="Move task in list")
task_parser.add_argument("--movetop", type=int, help="Move task to top in list")
task_parser.add_argument("--assign", nargs=2, type=str,
help="Assign a task to arbitrary branch. [0] branch name, [1] task")
task_parser.set_defaults(func=self.handle_task)
def handle_task(self, args):
if args.add is not None:
self.assign_task(args.add)
elif args.list:
self.print_tasks()
elif args.all:
self.print_all_tasks()
elif args.done:
self.set_task_done(args.done[0])
elif args.remove:
self.remove_task(args.remove)
elif args.m:
self.move_task(args.m[0], args.m[1])
elif args.movetop:
self.move_task(args.movetop, 0)
elif args.assign:
self.assign_to_branch(args.assign[0], args.assign[1])
elif args.donelist:
self.print_done()
else:
print("Provide more arguments or check help. Until that, here are all tasks:\n")
self.print_tasks()
def assign_task(self, task):
branch = self.git.branch()
self.assign_to_branch(branch, task)
def assign_to_branch(self, branch, task):
self.tasks.assign_task(branch, task)
print("Task assigned to '%s'" % branch)
def print_tasks(self):
br = self.git.branch()
print("Tasks for %s branch:" % br)
for i, task in enumerate(self.tasks.get_tasks(br)):
print(i, task)
print()
def print_all_tasks(self):
all_tasks = self.tasks.get_all_tasks()
for branch in all_tasks.keys():
print("Tasks for %s branch:" % branch)
for i, task in enumerate(all_tasks[branch]):
print(i, task)
print()
def remove_task(self, index):
print("Done" if self.tasks.remove_task(self.git.branch(), index) else "No such task")
def set_task_done(self, index):
print("Done" if self.tasks.set_task_done(self.git.branch(), index) else "No such task")
def move_task(self, old_pos, new_pos):
print("Done" if self.tasks.move_task(self.git.branch(), old_pos, new_pos) else "Index error")
def print_done(self):
br = self.git.branch()
print("Done tasks for '%s' branch:" % br)
for i, task in enumerate(self.tasks.get_done_tasks(br)):
print(i, task)
print()
|
the-stack_106_24869
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import time
from frappe import _, msgprint
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.utils.background_jobs import enqueue
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from six import iteritems, string_types
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "[email protected]")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "[email protected]",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='[email protected]')
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], string_types):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], string_types):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(f):
"""Decorator: Whitelist method to be called remotely via REST API."""
f.whitelisted = True
return f
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# follow document on document creation
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
if not (frappe.flags.in_migrate or frappe.local.flags.in_install or frappe.flags.in_setup_wizard):
follow_document(self.doctype, self.name, frappe.session.user)
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_version!=None:
self.flags.ignore_version = ignore_version
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
return self
def copy_attachments_from_amended_from(self):
"""Copy attachments from `amended_from`"""
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
_file = frappe.get_doc({
"doctype": "File",
"file_url": attach_item.file_url,
"file_name": attach_item.file_name,
"attached_to_name": self.name,
"attached_to_doctype": self.doctype,
"folder": "Home/Attachments"})
_file.save()
def update_children(self):
"""update child tables"""
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
"""sync child table for given fieldname"""
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def get_doc_before_save(self):
return getattr(self, '_doc_before_save', None)
def set_new_name(self, force=False, set_name=None, set_child_names=True):
"""Calls `frappe.naming.set_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
if set_name:
self.name = set_name
else:
set_new_name(self)
if set_child_names:
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
"""Get the document title based on title_field or `title` or `name`"""
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in iteritems(values):
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from `tabSingles` where doctype=%s""", self.doctype)
for field, value in iteritems(d):
if field != "doctype":
frappe.db.sql("""insert into `tabSingles` (doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_data_fields()
self._validate_selects()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_data_fields()
d._validate_selects()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def validate_workflow(self):
"""Validate if the workflow transition is valid"""
if frappe.flags.in_install == 'frappe': return
workflow = self.meta.get_workflow()
if workflow:
validate_workflow(self)
if not self._action == 'save':
set_workflow_state_on_action(self, workflow, self._action)
def validate_set_only_once(self):
"""Validate that fields are not changed if not in insert"""
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype in table_fields:
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(field.fieldname)),
frappe.CannotChangeConstantError)
return False
def is_child_table_same(self, fieldname):
"""Validate child table is same as original table before saving"""
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
"""Remove values the user is not allowed to read (called when loading in desk)"""
has_higher_permlevel = False
for p in self.get_permissions():
if p.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# If new record then don't reset the values for child table
if self.is_new(): return
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
self._has_access_to = {}
if not self._has_access_to.get(permission_type):
self._has_access_to[permission_type] = []
roles = frappe.get_roles()
for perm in self.get_permissions():
if perm.role in roles and perm.permlevel > 0 and perm.get(permission_type):
if perm.permlevel not in self._has_access_to[permission_type]:
self._has_access_to[permission_type].append(perm.permlevel)
return self._has_access_to[permission_type]
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.sql("""select value from tabSingles
where doctype=%s and field='modified' for update""", self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 to 2"))
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 to 0"))
elif docstatus==2:
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": ['in', table_fields]}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
run_server_script_for_doc_event(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
"""Run notifications for this method"""
if frappe.flags.in_import or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed==None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications == None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts==None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['on_change'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("before_validate")
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("before_validate")
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
"""Save load document from db before saving"""
self._doc_before_save = None
if not self.is_new():
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
doc_before_save = self.get_doc_before_save()
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.clear_cache()
self.notify_update()
update_global_search(self)
if getattr(self.meta, 'track_changes', False) and self._doc_before_save and not self.flags.ignore_version:
self.save_version()
self.run_method('on_change')
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
# make event update log for doctypes having event consumers
if not frappe.flags.in_install and not frappe.flags.in_migrate and check_doctype_has_consumers(self.doctype):
if self.flags.update_log_for_doc_creation:
make_event_update_log(self, update_type='Create')
self.flags.update_log_for_doc_creation = False
else:
from frappe.event_streaming.doctype.event_update_log.event_update_log import get_update
diff = get_update(doc_before_save, self)
if diff:
doc = self
doc.diff = diff
make_event_update_log(doc, update_type='Update')
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
"""Clear _seen property and set current user as seen"""
if getattr(self.meta, 'track_seen', False):
frappe.db.set_value(self.doctype, self.name, "_seen", json.dumps([frappe.session.user]), update_modified=False)
def notify_update(self):
"""Publish realtime that the current document is modified"""
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
"""Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
"""
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
self.load_doc_before_save()
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
"""get database value for this fieldname"""
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
"""Save version info"""
version = frappe.new_doc('Version')
if version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
if not frappe.flags.in_migrate:
follow_document(self.doctype, self.name, frappe.session.user)
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}").format(doc.idx, label, condition_str, val2)
else:
msg = _("Incorrect value: {0} must be {1} {2}").format(label, condition_str, val2)
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/desk#Form/{doctype}/{name}`"""
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
def add_comment(self, comment_type='Comment', text=None, comment_email=None, link_doctype=None, link_name=None, comment_by=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
out = frappe.get_doc({
"doctype":"Comment",
'comment_type': comment_type,
"comment_email": comment_email or frappe.session.user,
"comment_by": comment_by,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
"""add the given/current user to list of users who have seen this document (_seen)"""
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
"""add log to communication when a user views a document"""
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def queue_action(self, action, **kwargs):
"""Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead"""
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
"""Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0"""
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
"""Delete the lock file for this document"""
file_lock.delete_lock(self.get_signature())
# validation helpers
def validate_from_to_dates(self, from_date_field, to_date_field):
"""
Generic validation to verify date sequence
"""
if date_diff(self.get(to_date_field), self.get(from_date_field)) < 0:
frappe.throw(_('{0} must be after {1}').format(
frappe.bold(self.meta.get_label(to_date_field)),
frappe.bold(self.meta.get_label(from_date_field)),
), frappe.exceptions.InvalidDates)
def get_assigned_users(self):
assignments = frappe.get_all('ToDo',
fields=['owner'],
filters={
'reference_type': self.doctype,
'reference_name': self.name,
'status': ('!=', 'Cancelled'),
})
users = set([assignment.owner for assignment in assignments])
return users
def execute_action(doctype, name, action, **kwargs):
"""Execute an action on a document (called by background worker)"""
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
def make_event_update_log(doc, update_type):
"""Save update info for doctypes that have event consumers"""
if update_type != 'Delete':
# diff for update type, doc for create type
data = frappe.as_json(doc) if not doc.get('diff') else frappe.as_json(doc.diff)
else:
data = None
log_doc = frappe.get_doc({
'doctype': 'Event Update Log',
'update_type': update_type,
'ref_doctype': doc.doctype,
'docname': doc.name,
'data': data
})
log_doc.insert(ignore_permissions=True)
frappe.db.commit()
def check_doctype_has_consumers(doctype):
"""Check if doctype has event consumers for event streaming"""
if not frappe.db.exists("DocType", "Event Consumer"):
return False
event_consumers = frappe.get_all('Event Consumer')
for event_consumer in event_consumers:
consumer = frappe.get_doc('Event Consumer', event_consumer.name)
for entry in consumer.consumer_doctypes:
if doctype == entry.ref_doctype and entry.status == 'Approved':
return True
return False
|
the-stack_106_24871
|
""" Functions to implement the randomized optimization and search algorithms.
"""
# Author: Genevieve Hayes (modified by Andrew Rollings)
# License: BSD 3 clause
import numpy as np
import itertools
from mlrose_hiive.decorators import short_name
@short_name('mimic')
def mimic(problem, pop_size=200, keep_pct=0.2, max_attempts=10,
max_iters=np.inf, curve=False, random_state=None,
state_fitness_callback=None, callback_user_info=None, noise=0.0):
"""Use MIMIC to find the optimum for a given optimization problem.
Parameters
----------
problem: optimization object
Object containing fitness function optimization problem to be solved.
For example, :code:`DiscreteOpt()` or :code:`TSPOpt()`.
pop_size: int, default: 200
Size of population to be used in algorithm.
keep_pct: float, default: 0.2
Proportion of samples to keep at each iteration of the algorithm,
expressed as a value between 0 and 1.
max_attempts: int, default: 10
Maximum number of attempts to find a better neighbor at each step.
max_iters: int, default: np.inf
Maximum number of iterations of the algorithm.
curve: bool, default: False
Boolean to keep fitness values for a curve.
If :code:`False`, then no curve is stored.
If :code:`True`, then a history of fitness values is provided as a
third return value.
random_state: int, default: None
If random_state is a positive integer, random_state is the seed used
by np.random.seed(); otherwise, the random seed is not set.
state_fitness_callback: function taking five parameters, default: None
If specified, this callback will be invoked once per iteration.
Parameters are (iteration, max attempts reached?, current best state, current best fit, user callback data).
Return true to continue iterating, or false to stop.
callback_user_info: any, default: None
User data passed as last parameter of callback.
Returns
-------
best_state: array
Numpy array containing state that optimizes the fitness function.
best_fitness: float
Value of fitness function at best state.
fitness_curve: array
Numpy array containing the fitness at every iteration.
Only returned if input argument :code:`curve` is :code:`True`.
References
----------
De Bonet, J., C. Isbell, and P. Viola (1997). MIMIC: Finding Optima by
Estimating Probability Densities. In *Advances in Neural Information
Processing Systems* (NIPS) 9, pp. 424–430.
Note
----
MIMIC cannot be used for solving continuous-state optimization problems.
"""
if problem.get_prob_type() == 'continuous':
raise Exception("""problem type must be discrete or tsp.""")
if pop_size < 0:
raise Exception("""pop_size must be a positive integer.""")
elif not isinstance(pop_size, int):
if pop_size.is_integer():
pop_size = int(pop_size)
else:
raise Exception("""pop_size must be a positive integer.""")
if (keep_pct < 0) or (keep_pct > 1):
raise Exception("""keep_pct must be between 0 and 1.""")
if (not isinstance(max_attempts, int) and not max_attempts.is_integer()) \
or (max_attempts < 0):
raise Exception("""max_attempts must be a positive integer.""")
if (not isinstance(max_iters, int) and max_iters != np.inf
and not max_iters.is_integer()) or (max_iters < 0):
raise Exception("""max_iters must be a positive integer.""")
if (noise < 0) or (noise > 0.1):
raise Exception("""noise must be between 0 and 0.1.""")
else:
problem.noise = noise
# Set random seed
if isinstance(random_state, int) and random_state > 0:
np.random.seed(random_state)
fitness_curve = []
fitness_call_count = []
# Initialize problem, population and attempts counter
problem.reset()
problem.random_pop(pop_size)
if state_fitness_callback is not None:
# initial call with base data
state_fitness_callback(iteration=0,
state=problem.get_state(),
fitness=problem.get_adjusted_fitness(),
user_data=callback_user_info)
attempts = 0
iters = 0
continue_iterating = True
while (attempts < max_attempts) and (iters < max_iters):
iters += 1
# Get top n percent of population
problem.find_top_pct(keep_pct)
# Update probability estimates
problem.eval_node_probs()
# Generate new sample
new_sample = problem.sample_pop(pop_size)
problem.set_population(new_sample)
next_state = problem.best_child()
next_fitness = problem.eval_fitness(next_state)
# If best child is an improvement,
# move to that state and reset attempts counter
current_fitness = problem.get_fitness()
if next_fitness > current_fitness:
problem.set_state(next_state)
attempts = 0
else:
attempts += 1
if curve:
fitness_curve.append(problem.get_adjusted_fitness())
fitness_call_count.append(problem.fitness_call_counter.__reduce__()[1][0])
# invoke callback
if state_fitness_callback is not None:
max_attempts_reached = (attempts == max_attempts) or (iters == max_iters) or problem.can_stop()
continue_iterating = state_fitness_callback(iteration=iters,
attempt=attempts + 1,
done=max_attempts_reached,
state=problem.get_state(),
fitness=problem.get_adjusted_fitness(),
curve=np.asarray(fitness_curve) if curve else None,
user_data=callback_user_info)
# break out if requested
if not continue_iterating:
break
best_fitness = problem.get_maximize()*problem.get_fitness()
best_state = problem.get_state().astype(int)
problem.fitness_call_counter = itertools.count()
if curve:
return best_state, best_fitness, np.asarray(fitness_curve), np.asarray(fitness_call_count)
return best_state, best_fitness, None
|
the-stack_106_24872
|
import logging
from typing import Generator, Any, Union, Dict
def get_inner_dict(source_dict, path_as_list):
result = source_dict
for index in path_as_list:
result = result[index]
return result
def merge_dicts(*dicts: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Merges two or more dicts. If there are duplicate keys, later dict arguments take precedence.
Null, empty, or non-dict arguments are qiuetly skipped.
:param dicts:
:return:
"""
res: Dict[Any, Any] = {}
for d in dicts:
if not d or not isinstance(d, dict):
continue
res = {**res, **d}
return res
def generator_reader_wrapper(g: Generator) -> Union[None, Any]:
try:
return next(g)
except StopIteration:
return
def search_deep_keys(searchText, obj, path):
"""Search deep for keys and get their values"""
keys = []
if isinstance(obj, dict):
for key in obj:
pathprop = path[:]
pathprop.append(key)
if key == searchText:
pathprop.append(obj[key])
keys.append(pathprop)
# pop the last element off for nesting of found elements for
# dict and list checks
pathprop = pathprop[:-1]
if isinstance(obj[key], dict):
if key != 'parent_metadata':
# Don't go back to the parent metadata, it is scanned for the parent
keys.extend(search_deep_keys(searchText, obj[key], pathprop))
elif isinstance(obj[key], list):
for index, item in enumerate(obj[key]):
pathproparr = pathprop[:]
pathproparr.append(index)
keys.extend(search_deep_keys(searchText, item, pathproparr))
elif isinstance(obj, list):
for index, item in enumerate(obj):
pathprop = path[:]
pathprop.append(index)
keys.extend(search_deep_keys(searchText, item, pathprop))
return keys
def find_in_dict(input_dict: Dict[str, Any], key_path: str) -> Any:
"""Tries to retrieve the value under the given 'key_path', otherwise returns None."""
value = input_dict
key_list = key_path.split("/")
try:
for key in key_list:
if key.startswith("[") and key.endswith("]"):
if isinstance(value, list):
idx = int(key[1:-1])
value = value[idx]
continue
else:
return None
value = value.get(key)
if value is None:
return None
except (AttributeError, IndexError, KeyError, TypeError, ValueError):
logging.debug(f"Could not find {key_path} in dict")
return None
return value
|
the-stack_106_24874
|
# -*- coding: utf-8 -*-
'''
Edit ini files
:maintainer: <[email protected]>
:maturity: new
:depends: re
:platform: all
(for example /etc/sysctl.conf)
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
# Import Salt libs
import salt.utils.files
import salt.utils.json
from salt.exceptions import CommandExecutionError
from salt.utils.odict import OrderedDict
# Import 3rd-party libs
from salt.ext import six
__virtualname__ = 'ini'
def __virtual__():
'''
Rename to ini
'''
return __virtualname__
ini_regx = re.compile(r'^\s*\[(.+?)\]\s*$', flags=re.M)
com_regx = re.compile(r'^\s*(#|;)\s*(.*)')
indented_regx = re.compile(r'(\s+)(.*)')
def set_option(file_name, sections=None, separator='='):
'''
Edit an ini file, replacing one or more sections. Returns a dictionary
containing the changes made.
file_name
path of ini_file
sections : None
A dictionary representing the sections to be edited ini file
The keys are the section names and the values are the dictionary
containing the options
If the ini file does not contain sections the keys and values represent
the options
separator : =
A character used to separate keys and values. Standard ini files use
the "=" character.
.. versionadded:: 2016.11.0
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.set_option',
['path_to_ini_file', '{"section_to_change": {"key": "value"}}'])
CLI Example:
.. code-block:: bash
salt '*' ini.set_option /path/to/ini '{section_foo: {key: value}}'
'''
sections = sections or {}
changes = {}
inifile = _Ini.get_ini_file(file_name, separator=separator)
changes = inifile.update(sections)
inifile.flush()
return changes
def get_option(file_name, section, option, separator='='):
'''
Get value of a key from a section in an ini file. Returns ``None`` if
no matching key was found.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_option',
[path_to_ini_file, section_name, option])
CLI Example:
.. code-block:: bash
salt '*' ini.get_option /path/to/ini section_name option_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
return inifile.get(section, {}).get(option, None)
def remove_option(file_name, section, option, separator='='):
'''
Remove a key/value pair from a section in an ini file. Returns the value of
the removed key, or ``None`` if nothing was removed.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.remove_option',
[path_to_ini_file, section_name, option])
CLI Example:
.. code-block:: bash
salt '*' ini.remove_option /path/to/ini section_name option_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
value = inifile.get(section, {}).pop(option, None)
inifile.flush()
return value
def get_section(file_name, section, separator='='):
'''
Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
ret = {}
for key, value in six.iteritems(inifile.get(section, {})):
if key[0] != '#':
ret.update({key: value})
return ret
def remove_section(file_name, section, separator='='):
'''
Remove a section in an ini file. Returns the removed section as dictionary,
or ``None`` if nothing was removed.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.remove_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.remove_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
section = inifile.pop(section, {})
inifile.flush()
ret = {}
for key, value in six.iteritems(section):
if key[0] != '#':
ret.update({key: value})
return ret
class _Section(OrderedDict):
def __init__(self, name, inicontents='', separator='=', commenter='#'):
super(_Section, self).__init__(self)
self.name = name
self.inicontents = inicontents
self.sep = separator
self.com = commenter
opt_regx_prefix = r'(\s*)(.+?)\s*'
opt_regx_suffix = r'\s*(.*)\s*'
self.opt_regx_str = r'{0}(\{1}){2}'.format(
opt_regx_prefix, self.sep, opt_regx_suffix
)
self.opt_regx = re.compile(self.opt_regx_str)
def refresh(self, inicontents=None):
comment_count = 1
unknown_count = 1
curr_indent = ''
inicontents = inicontents or self.inicontents
inicontents = inicontents.strip(os.linesep)
if not inicontents:
return
for opt in self:
self.pop(opt)
for opt_str in inicontents.split(os.linesep):
# Match comments
com_match = com_regx.match(opt_str)
if com_match:
name = '#comment{0}'.format(comment_count)
self.com = com_match.group(1)
comment_count += 1
self.update({name: opt_str})
continue
# Add indented lines to the value of the previous entry.
indented_match = indented_regx.match(opt_str)
if indented_match:
indent = indented_match.group(1).replace('\t', ' ')
if indent > curr_indent:
options = list(self)
if options:
prev_opt = options[-1]
value = self.get(prev_opt)
self.update({prev_opt: os.linesep.join((value, opt_str))})
continue
# Match normal key+value lines.
opt_match = self.opt_regx.match(opt_str)
if opt_match:
curr_indent, name, self.sep, value = opt_match.groups()
curr_indent = curr_indent.replace('\t', ' ')
self.update({name: value})
continue
# Anything remaining is a mystery.
name = '#unknown{0}'.format(unknown_count)
self.update({name: opt_str})
unknown_count += 1
def _uncomment_if_commented(self, opt_key):
# should be called only if opt_key is not already present
# will uncomment the key if commented and create a place holder
# for the key where the correct value can be update later
# used to preserve the ordering of comments and commented options
# and to make sure options without sectons go above any section
options_backup = OrderedDict()
comment_index = None
for key, value in six.iteritems(self):
if comment_index is not None:
options_backup.update({key: value})
continue
if '#comment' not in key:
continue
opt_match = self.opt_regx.match(value.lstrip('#'))
if opt_match and opt_match.group(2) == opt_key:
comment_index = key
for key in options_backup:
self.pop(key)
self.pop(comment_index, None)
super(_Section, self).update({opt_key: None})
for key, value in six.iteritems(options_backup):
super(_Section, self).update({key: value})
def update(self, update_dict):
changes = {}
for key, value in six.iteritems(update_dict):
# Ensure the value is either a _Section or a string
if isinstance(value, (dict, OrderedDict)):
sect = _Section(
name=key, inicontents='',
separator=self.sep, commenter=self.com
)
sect.update(value)
value = sect
value_plain = value.as_dict()
else:
value = str(value)
value_plain = value
if key not in self:
changes.update({key: {'before': None,
'after': value_plain}})
# If it's not a section, it may already exist as a
# commented-out key/value pair
if not isinstance(value, _Section):
self._uncomment_if_commented(key)
super(_Section, self).update({key: value})
else:
curr_value = self.get(key, None)
if isinstance(curr_value, _Section):
sub_changes = curr_value.update(value)
if sub_changes:
changes.update({key: sub_changes})
else:
if curr_value != value:
changes.update({key: {'before': curr_value,
'after': value_plain}})
super(_Section, self).update({key: value})
return changes
def gen_ini(self):
yield '{0}[{1}]{0}'.format(os.linesep, self.name)
sections_dict = OrderedDict()
for name, value in six.iteritems(self):
# Handle Comment Lines
if com_regx.match(name):
yield '{0}{1}'.format(value, os.linesep)
# Handle Sections
elif isinstance(value, _Section):
sections_dict.update({name: value})
# Key / Value pairs
# Adds spaces between the separator
else:
yield '{0}{1}{2}{3}'.format(
name,
' {0} '.format(self.sep) if self.sep != ' ' else self.sep,
value,
os.linesep
)
for name, value in six.iteritems(sections_dict):
for line in value.gen_ini():
yield line
def as_ini(self):
return ''.join(self.gen_ini())
def as_dict(self):
return dict(self)
def dump(self):
print(str(self))
def __repr__(self, _repr_running=None):
_repr_running = _repr_running or {}
super_repr = super(_Section, self).__repr__(_repr_running)
return os.linesep.join((super_repr, salt.utils.json.dumps(self, indent=4)))
def __str__(self):
return salt.utils.json.dumps(self, indent=4)
def __eq__(self, item):
return (isinstance(item, self.__class__) and
self.name == item.name)
def __ne__(self, item):
return not (isinstance(item, self.__class__) and
self.name == item.name)
class _Ini(_Section):
def __init__(self, name, inicontents='', separator='=', commenter='#'):
super(_Ini, self).__init__(name, inicontents, separator, commenter)
def refresh(self, inicontents=None):
if inicontents is None:
try:
with salt.utils.files.fopen(self.name) as rfh:
inicontents = rfh.read()
except (OSError, IOError) as exc:
if __opts__['test'] is False:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(self.name, exc)
)
if not inicontents:
return
# Remove anything left behind from a previous run.
self.clear()
inicontents = ini_regx.split(inicontents)
inicontents.reverse()
# Pop anything defined outside of a section (ie. at the top of
# the ini file).
super(_Ini, self).refresh(inicontents.pop())
for section_name, sect_ini in self._gen_tuples(inicontents):
sect_obj = _Section(
section_name, sect_ini, separator=self.sep
)
sect_obj.refresh()
self.update({sect_obj.name: sect_obj})
def flush(self):
try:
with salt.utils.files.fopen(self.name, 'w') as outfile:
ini_gen = self.gen_ini()
next(ini_gen)
outfile.writelines(ini_gen)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. "
"Exception: {1}".format(self.name, exc)
)
@staticmethod
def get_ini_file(file_name, separator='='):
inifile = _Ini(file_name, separator=separator)
inifile.refresh()
return inifile
@staticmethod
def _gen_tuples(list_object):
while True:
try:
key = list_object.pop()
value = list_object.pop()
except IndexError:
raise StopIteration
else:
yield key, value
|
the-stack_106_24875
|
import torch as T
import sys
sys.path.append('/content/drive/MyDrive/xyz_master/master_thesis')
from xyzModel import CriticNetwork,ActorNetwork
class Agent:
def __init__(self, actor_dims, critic_dims, n_actions, n_agents, agent_idx, chkpt_dir,
alpha, beta, fc1,
fc2, gamma, tau):
self.gamma = gamma
self.tau = tau
self.n_actions = n_actions
self.agent_name = 'agent_%s' % agent_idx
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
self.actor = ActorNetwork(alpha, actor_dims, fc1, fc2, n_actions,
chkpt_dir=chkpt_dir, name=self.agent_name+'_actor.pth').to(device)
self.critic = CriticNetwork(beta, critic_dims,
fc1, fc2, n_agents, n_actions,
chkpt_dir=chkpt_dir, name=self.agent_name+'_critic.pth').to(device)
self.target_actor = ActorNetwork(alpha, actor_dims, fc1, fc2, n_actions,
chkpt_dir=chkpt_dir, name=self.agent_name+'_target_actor.pth').to(device)
self.target_critic = CriticNetwork(beta, critic_dims,
fc1, fc2, n_agents, n_actions,
chkpt_dir=chkpt_dir, name=self.agent_name+'_target_critic.pth').to(device)
#device = T.device("cuda:0" if T.cuda.is_available() else "cpu")
# self.actor.to(device)
# self.critic.to(device)
# self.target_actor.to(device)
# self.target_critic.to(device)
self.update_network_parameters(tau=1)
def update_network_parameters(self, tau=None):
if tau is None:
tau = self.tau
target_actor_params = self.target_actor.named_parameters()
actor_params = self.actor.named_parameters()
target_actor_state_dict = dict(target_actor_params)
actor_state_dict = dict(actor_params)
for name in actor_state_dict:
actor_state_dict[name] = tau * actor_state_dict[name].clone() + \
(1 - tau) * target_actor_state_dict[name].clone()
self.target_actor.load_state_dict(actor_state_dict)
target_critic_params = self.target_critic.named_parameters()
critic_params = self.critic.named_parameters()
target_critic_state_dict = dict(target_critic_params)
critic_state_dict = dict(critic_params)
for name in critic_state_dict:
critic_state_dict[name] = tau * critic_state_dict[name].clone() + \
(1 - tau) * target_critic_state_dict[name].clone()
self.target_critic.load_state_dict(critic_state_dict)
def choose_action(self, observation):
state = T.tensor([observation], dtype=T.float).to(self.actor.device)#(1,18)
actions = self.actor.forward(state)#(1,3)
# noise = T.randn(self.n_actions).to(self.actor.device)
# action = actions + noise
return actions.detach().cpu().numpy()[0]
# def choose_action_evaluation(self, observation):
# state = T.tensor([observation], dtype=T.float).to(self.actor.device)
# actions = self.actor.forward(state)
#
#
# return actions.detach().cpu().numpy()[0]
def save_models(self):
self.actor.save_checkpoint()
self.target_actor.save_checkpoint()
self.critic.save_checkpoint()
self.target_critic.save_checkpoint()
def load_models(self):
self.actor.load_checkpoint()
self.critic.load_checkpoint()
self.target_critic.load_checkpoint()
self.target_actor.load_checkpoint()
|
the-stack_106_24876
|
import numpy
import h5py
import csv
def h5_to_csv(infilepath, outfilepath):
fin = h5py.File(filepath, 'r')
data = fin["rows"]
num_rows = data.shape[0]
print('The h5 data have ' + str(num_rows) + ' rows and ' + str(data.shape[1]) + ' columns.')
writer = csv.writer(open(outfilepath, 'w'))
for i in range(num_rows):
writer.writerow(data[i])
if i % 50 == 0:
print(str(i+1) + " rows has been processed!")
def test_csv(outfilepath):
reader = csv.reader(open(outfilepath, 'r'))
count = 0
for row in reader:
print(row[0:10])
count += 1
if count > 100:
break
if __name__ == '__main__':
infilepath = '/global/cscratch1/sd/wss/mjo/Precipitation_rate_1979_to_1983.h5'
outfilepath = '/global/cscratch1/sd/wss/mjo/Precipitation_rate_1979_to_1983.csv'
h5_to_csv(infilepath, outfilepath)
|
the-stack_106_24883
|
import os
import subprocess as sp
import warnings
import numpy as np
from moviepy.config import FFMPEG_BINARY
from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
class FFMPEG_AudioReader:
"""
A class to read the audio in either video files or audio files
using ffmpeg. ffmpeg will read any audio and transform them into
raw data.
Parameters
------------
filename
Name of any video or audio file, like ``video.mp4`` or
``sound.wav`` etc.
buffersize
The size of the buffer to use. Should be bigger than the buffer
used by ``write_audiofile``
print_infos
Print the ffmpeg infos on the file being read (for debugging)
fps
Desired frames per second in the decoded signal that will be
received from ffmpeg
nbytes
Desired number of bytes (1,2,4) in the signal that will be
received from ffmpeg
"""
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos["duration"]
if "video_duration" in infos:
self.duration = infos["video_duration"]
else:
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.nframes = int(self.fps * self.duration)
self.buffersize = min(self.nframes + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
def initialize(self, start_time=0):
""" Opens the file, creates the pipe. """
self.close() # if any
if start_time != 0:
offset = min(1, start_time)
i_arg = [
"-ss",
"%.05f" % (start_time - offset),
"-i",
self.filename,
"-vn",
"-ss",
"%.05f" % offset,
]
else:
i_arg = ["-i", self.filename, "-vn"]
cmd = (
[FFMPEG_BINARY]
+ i_arg
+ [
"-loglevel",
"error",
"-f",
self.format,
"-acodec",
self.codec,
"-ar",
"%d" % self.fps,
"-ac",
"%d" % self.nchannels,
"-",
]
)
popen_params = {
"bufsize": self.buffersize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": sp.DEVNULL,
}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
self.pos = np.round(self.fps * start_time)
def skip_chunk(self, chunksize):
_ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
self.proc.stdout.flush()
self.pos = self.pos + chunksize
def read_chunk(self, chunksize):
# chunksize is not being autoconverted from float to int
chunksize = int(round(chunksize))
s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
data_type = {1: "int8", 2: "int16", 4: "int32"}[self.nbytes]
if hasattr(np, "frombuffer"):
result = np.frombuffer(s, dtype=data_type)
else:
result = np.fromstring(s, dtype=data_type)
result = (1.0 * result / 2 ** (8 * self.nbytes - 1)).reshape(
(int(len(result) / self.nchannels), self.nchannels)
)
# Pad the read chunk with zeros when there isn't enough audio
# left to read, so the buffer is always at full length.
pad = np.zeros((chunksize - len(result), self.nchannels), dtype=result.dtype)
result = np.concatenate([result, pad])
# self.proc.stdout.flush()
self.pos = self.pos + chunksize
return result
def seek(self, pos):
"""
Reads a frame at time t. Note for coders: getting an arbitrary
frame in the video with ffmpeg can be painfully slow if some
decoding has to be done. This function tries to avoid fectching
arbitrary frames whenever possible, by moving between adjacent
frames.
"""
if (pos < self.pos) or (pos > (self.pos + 1000000)):
t = 1.0 * pos / self.fps
self.initialize(t)
elif pos > self.pos:
# print pos
self.skip_chunk(pos - self.pos)
# last case standing: pos = current pos
self.pos = pos
def get_frame(self, tt):
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt >= 0) & (tt < self.duration)
# Check that the requested time is in the valid range
if not in_time.any():
raise IOError(
"Error in file %s, " % (self.filename)
+ "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "with clip duration=%f seconds, " % self.duration
)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps * tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt), self.nchannels))
indices = frames - self.buffer_startframe
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
warnings.warn(
"Error in file %s, " % (self.filename)
+ "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "indices wanted: %d-%d, " % (indices.min(), indices.max())
+ "but len(buffer)=%d\n" % (len(self.buffer))
+ str(error),
UserWarning,
)
# repeat the last frame instead
indices[indices >= len(self.buffer)] = len(self.buffer) - 1
result[in_time] = self.buffer[indices]
return result
else:
ind = int(self.fps * tt)
if ind < 0 or ind > self.nframes: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
def buffer_around(self, frame_number):
"""
Fills the buffer with frames, centered on ``frame_number``
if possible
"""
# start-frame for the buffer
new_bufferstart = max(0, frame_number - self.buffersize // 2)
if self.buffer is not None:
current_f_end = self.buffer_startframe + self.buffersize
if new_bufferstart < current_f_end < new_bufferstart + self.buffersize:
# We already have part of what must be read
conserved = current_f_end - new_bufferstart
chunksize = self.buffersize - conserved
array = self.read_chunk(chunksize)
self.buffer = np.vstack([self.buffer[-conserved:], array])
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
self.buffer_startframe = new_bufferstart
def close(self):
if self.proc:
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
self.proc.wait()
self.proc = None
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close()
|
the-stack_106_24886
|
import logging
import socket
import subprocess
import time
from datetime import datetime
from .adapter import Adapter
MINICAP_REMOTE_ADDR = "localabstract:minicap"
ROTATION_CHECK_INTERVAL_S = 1 # Check rotation once per second
class MinicapException(Exception):
"""
Exception in minicap connection
"""
pass
class Minicap(Adapter):
"""
a connection with target device through minicap.
"""
def __init__(self, device=None):
"""
initiate a minicap connection
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.host = "localhost"
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.port = self.device.get_random_port()
self.remote_minicap_path = "/data/local/tmp/minicap-devel"
self.sock = None
self.connected = False
self.minicap_process = None
self.banner = None
self.width = -1
self.height = -1
self.orientation = -1
self.last_screen = None
self.last_screen_time = None
self.last_views = []
self.last_rotation_check_time = datetime.now()
def set_up(self):
device = self.device
try:
minicap_files = device.adb.shell("ls %s 2>/dev/null" % self.remote_minicap_path).split()
if "minicap.so" in minicap_files and ("minicap" in minicap_files or "minicap-nopie" in minicap_files):
self.logger.debug("minicap was already installed.")
return
except:
pass
if device is not None:
# install minicap
import pkg_resources
local_minicap_path = pkg_resources.resource_filename("droidbot", "resources/minicap")
try:
device.adb.shell("mkdir %s 2>/dev/null" % self.remote_minicap_path)
except Exception:
pass
abi = device.adb.get_property('ro.product.cpu.abi')
sdk = device.get_sdk_version()
if sdk >= 16:
minicap_bin = "minicap"
else:
minicap_bin = "minicap-nopie"
device.push_file(local_file="%s/libs/%s/%s" % (local_minicap_path, abi, minicap_bin),
remote_dir=self.remote_minicap_path)
device.push_file(local_file="%s/jni/libs/android-%s/%s/minicap.so" % (local_minicap_path, sdk, abi),
remote_dir=self.remote_minicap_path)
self.logger.debug("minicap installed.")
def tear_down(self):
try:
delete_minicap_cmd = "adb -s %s shell rm -r %s" % (self.device.serial, self.remote_minicap_path)
p = subprocess.Popen(delete_minicap_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception:
pass
def connect(self):
device = self.device
display = device.get_display_info(refresh=True)
if 'width' not in display or 'height' not in display or 'orientation' not in display:
self.logger.warning("Cannot get the size of current device.")
return
w = display['width']
h = display['height']
if w > h:
temp = w
w = h
h = temp
o = display['orientation'] * 90
self.width = w
self.height = h
self.orientation = o
size_opt = "%dx%d@%dx%d/%d" % (w, h, w, h, o)
grant_minicap_perm_cmd = "adb -s %s shell chmod -R a+x %s" % \
(device.serial, self.remote_minicap_path)
start_minicap_cmd = "adb -s %s shell LD_LIBRARY_PATH=%s %s/minicap -P %s" % \
(device.serial, self.remote_minicap_path, self.remote_minicap_path, size_opt)
self.logger.debug("starting minicap: " + start_minicap_cmd)
p = subprocess.Popen(grant_minicap_perm_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
self.minicap_process = subprocess.Popen(start_minicap_cmd.split(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Wait 2 seconds for starting minicap
time.sleep(2)
self.logger.debug("minicap started.")
try:
# forward host port to remote port
forward_cmd = "adb -s %s forward tcp:%d %s" % (device.serial, self.port, MINICAP_REMOTE_ADDR)
subprocess.check_call(forward_cmd.split())
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
import threading
listen_thread = threading.Thread(target=self.listen_messages)
listen_thread.start()
except socket.error as e:
self.connected = False
self.logger.warning(e)
raise MinicapException()
def listen_messages(self):
self.logger.debug("start listening minicap images ...")
CHUNK_SIZE = 4096
readBannerBytes = 0
bannerLength = 2
readFrameBytes = 0
frameBodyLength = 0
frameBody = bytearray()
banner = {
"version": 0,
"length": 0,
"pid": 0,
"realWidth": 0,
"realHeight": 0,
"virtualWidth": 0,
"virtualHeight": 0,
"orientation": 0,
"quirks": 0,
}
self.connected = True
while self.connected:
chunk = bytearray(self.sock.recv(CHUNK_SIZE))
if not chunk:
continue
chunk_len = len(chunk)
cursor = 0
while cursor < chunk_len and self.connected:
if readBannerBytes < bannerLength:
if readBannerBytes == 0:
banner['version'] = chunk[cursor]
elif readBannerBytes == 1:
banner['length'] = bannerLength = chunk[cursor]
elif 2 <= readBannerBytes <= 5:
banner['pid'] += (chunk[cursor] << ((readBannerBytes - 2) * 8))
elif 6 <= readBannerBytes <= 9:
banner['realWidth'] += (chunk[cursor] << ((readBannerBytes - 6) * 8))
elif 10 <= readBannerBytes <= 13:
banner['realHeight'] += (chunk[cursor] << ((readBannerBytes - 10) * 8))
elif 14 <= readBannerBytes <= 17:
banner['virtualWidth'] += (chunk[cursor] << ((readBannerBytes - 14) * 8))
elif 18 <= readBannerBytes <= 21:
banner['virtualHeight'] += (chunk[cursor] << ((readBannerBytes - 18) * 8))
elif readBannerBytes == 22:
banner['orientation'] += chunk[cursor] * 90
elif readBannerBytes == 23:
banner['quirks'] = chunk[cursor]
cursor += 1
readBannerBytes += 1
if readBannerBytes == bannerLength:
self.banner = banner
self.logger.debug("minicap initialized: %s" % banner)
elif readFrameBytes < 4:
frameBodyLength += (chunk[cursor] << (readFrameBytes * 8))
cursor += 1
readFrameBytes += 1
else:
if chunk_len - cursor >= frameBodyLength:
frameBody += chunk[cursor: cursor + frameBodyLength]
self.handle_image(frameBody)
cursor += frameBodyLength
frameBodyLength = readFrameBytes = 0
frameBody = bytearray()
else:
frameBody += chunk[cursor:]
frameBodyLength -= chunk_len - cursor
readFrameBytes += chunk_len - cursor
cursor = chunk_len
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def handle_image(self, frameBody):
# Sanity check for JPG header, only here for debugging purposes.
if frameBody[0] != 0xFF or frameBody[1] != 0xD8:
self.logger.warning("Frame body does not start with JPG header")
self.last_screen = frameBody
self.last_screen_time = datetime.now()
self.last_views = None
self.logger.debug("Received an image at %s" % self.last_screen_time)
self.check_rotation()
def check_rotation(self):
current_time = datetime.now()
if (current_time - self.last_rotation_check_time).total_seconds() < ROTATION_CHECK_INTERVAL_S:
return
display = self.device.get_display_info(refresh=True)
if 'orientation' in display:
cur_orientation = display['orientation'] * 90
if cur_orientation != self.orientation:
self.device.handle_rotation()
self.last_rotation_check_time = current_time
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
if not self.connected:
return False
if self.last_screen_time is None:
return False
return True
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
if self.sock is not None:
try:
self.sock.close()
except Exception as e:
print(e)
if self.minicap_process is not None:
try:
self.minicap_process.terminate()
except Exception as e:
print(e)
try:
forward_remove_cmd = "adb -s %s forward --remove tcp:%d" % (self.device.serial, self.port)
p = subprocess.Popen(forward_remove_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception as e:
print(e)
def get_views(self):
"""
get UI views using cv module
opencv-python need to be installed for this function
:return: a list of views
"""
if not self.last_screen:
self.logger.warning("last_screen is None")
return None
if self.last_views:
return self.last_views
import cv
img = cv.load_image_from_buf(self.last_screen)
view_bounds = cv.find_views(img)
root_view = {
"class": "CVViewRoot",
"bounds": [[0, 0], [self.width, self.height]],
"enabled": True,
"temp_id": 0
}
views = [root_view]
temp_id = 1
for x,y,w,h in view_bounds:
view = {
"class": "CVView",
"bounds": [[x,y], [x+w, y+h]],
"enabled": True,
"temp_id": temp_id,
"signature": cv.calculate_dhash(img[y:y+h, x:x+w]),
"parent": 0
}
views.append(view)
temp_id += 1
root_view["children"] = list(range(1, temp_id))
self.last_views = views
return views
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
minicap = Minicap()
try:
minicap.set_up()
minicap.connect()
except:
minicap.disconnect()
minicap.tear_down()
minicap.device.disconnect()
|
the-stack_106_24887
|
#!/usr/bin/env python3
"""
Requires:
python-mnist
numpy
sklearn
"""
import mnist
import numpy as np
from numpy.linalg import norm as l21_norm
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
gamma = .005
epsilon = 1e-5
# Download t10k_* from http://yann.lecun.com/exdb/mnist/
# Change to directory containing unzipped MNIST data
mndata = mnist.MNIST('/data/science/MNIST/')
def solve_huang_eq_24(u):
n = len(u)
def f(x):
return np.clip(x - u, 0, None).sum() / n - x
def df(x):
return (x > u).sum() / n - 1
EPS = 1e-4
lamb = np.min(u)
while True:
new_lamb = lamb - f(lamb) / df(lamb)
if np.abs(new_lamb - lamb) < EPS:
return new_lamb
lamb = new_lamb
def solve_huang_eq_13(v):
"""
min || alpha - v ||^2, subject to \sum{alpha}=1, alpha >= 0
"""
n = len(v)
u = v - np.ones((n, n)) @ v / (n) + np.ones(n) / (n)
lambda_bar_star = solve_huang_eq_24(u)
lambda_star = (lambda_bar_star - u) + np.clip(u - lambda_bar_star, 0, None)
return u + lambda_star - lambda_bar_star * np.ones(n)
def welsch_func(x):
result = (1 - np.exp(- epsilon * x ** 2)) / epsilon
return result
def solve_U(x, v, gamma):
U = np.zeros((N, C))
for i in range(N):
xi = np.repeat(x[i, :].reshape((1, ndim)), C, axis=0)
h = welsch_func(l21_norm(xi - v, axis=1))
h = (-h) / (2 * gamma)
U[i, :] = solve_huang_eq_13(h)
return U
def update_V(v, u, x):
W = np.zeros((N, C))
for i in range(N):
for k in range(C):
W[i, k] = u[i, k] * np.exp(-epsilon * l21_norm(x[i, :] - v[k, :])**2)
new_v = np.zeros(v.shape)
for k in range(C):
denominator = W[:, k].sum()
# Avoid division by zero
if denominator == 0:
denominator = 1
new_v[k, :] = W[:, k].reshape((1, N)) @ x / denominator
return new_v
def NMI(U):
return nmi(labels, np.argmax(U, axis=1))
if __name__ == '__main__':
images, labels = mndata.load_testing()
ndim = 784
N = size = len(labels)
C = 10
X = np.array(images).reshape((size, ndim)) / 255
t = 0
V = np.random.random((C, ndim))
U = np.zeros((size, C))
for i in range(size):
xi = np.repeat(X[i, :].reshape((1, ndim)), C, axis=0)
U[i, np.argmin(l21_norm(xi - V, axis=1))] = 1
S = np.ones((size, C))
while True:
print('-------------')
print('== t = ', t)
new_V = update_V(V, U, X)
delta_V = l21_norm(new_V - V)
V = new_V
print('DELTA', delta_V)
delta = 100
while delta > 1:
new_U = solve_U(X, V, gamma)
delta = l21_norm(U - new_U)
U = new_U
print('NMI', NMI(U))
if delta_V < 1e-1:
print('Converged at step', t)
print('NMI', NMI(U))
break
t += 1
|
the-stack_106_24891
|
from flask import abort, request, Blueprint
import contextlib
import hmac
from ..utils import warn, parse_lease_seconds, calculate_hmac
NOT_FOUND = "Could not found subscription with callback id '%s'"
def build_blueprint(subscriber, url_prefix):
name = 'websub_callbacks' + url_prefix.replace('/', '_')
callbacks = Blueprint(name, __name__, url_prefix=url_prefix)
@callbacks.route('/<callback_id>', methods=['GET'])
def subscription_confirmation(callback_id):
mode = get_query_arg('hub.mode')
if mode == 'denied':
return subscription_denied(callback_id)
elif mode in ['subscribe', 'unsubscribe']:
return confirm_subscription(callback_id)
else:
abort(400, "Invalid mode")
def subscription_denied(callback_id):
try:
subscription_request = subscriber.temp_storage.pop(callback_id)
except KeyError:
with warn_and_abort_on_error(callback_id):
subscription_request = subscriber.storage.pop(callback_id)
# 5.2 Subscription Validation
# TODO: support Location header? It's a MAY, but a nice feature. Maybe
# later, behind a config option.
reason = request.args.get('hub.reason', 'denied')
subscriber.call_all('error_handlers',
subscription_request['topic_url'], callback_id,
reason)
return "'denied' acknowledged\n"
def confirm_subscription(callback_id):
with warn_and_abort_on_error(callback_id):
subscription_request = subscriber.temp_storage.pop(callback_id)
mode = get_query_arg('hub.mode')
topic_url = get_query_arg('hub.topic')
if mode != subscription_request['mode']:
abort(404, "Mode does not match with last request")
if topic_url != subscription_request['topic_url']:
abort(404, "Topic url does not match")
if mode == 'subscribe':
lease = parse_lease_seconds(get_query_arg('hub.lease_seconds'))
subscription_request['lease_seconds'] = lease
# this is the point where the subscription request is turned into
# a subscription:
subscriber.storage[callback_id] = subscription_request
else: # unsubscribe
del subscriber.storage[callback_id]
subscriber.call_all('success_handlers', topic_url, callback_id, mode)
return get_query_arg('hub.challenge'), 200
@callbacks.route('/<callback_id>', methods=['POST'])
def callback(callback_id):
try:
subscription = subscriber.storage[callback_id]
except KeyError:
abort(404)
# 1 MiB by default
max_body_size = subscriber.config.get('MAX_BODY_SIZE', 1024 * 1024)
if request.content_length > max_body_size:
abort(400, "Body too large")
body = request.get_data()
if body_is_valid(subscription, body):
subscriber.call_all('listeners', subscription['topic_url'],
callback_id, body)
return 'Content received\n'
return name, callbacks
def get_query_arg(name):
try:
return request.args[name]
except KeyError:
abort(400, "Missing query argument: " + name)
def body_is_valid(subscription, body):
if not subscription['secret']:
return True
try:
algo, signature = request.headers['X-Hub-Signature'].split('=')
expected_signature = calculate_hmac(algo, subscription['secret'], body)
except KeyError as e:
warn("X-Hub-Signature header expected but not set", e)
except ValueError as e:
warn("X-Hub-Signature header is invalid", e)
except AttributeError as e:
warn("Invalid algorithm in X-Hub-Signature", e)
else:
if hmac.compare_digest(signature, expected_signature):
return True
return False
@contextlib.contextmanager
def warn_and_abort_on_error(callback_id):
try:
yield
except KeyError as e:
warn(NOT_FOUND % callback_id, e)
abort(404)
|
the-stack_106_24892
|
from __future__ import annotations
from typing import Callable
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from ...metrics import misclassification_error
def default_callback(fit: Perceptron, x: np.ndarray, y: int):
pass
class Perceptron(BaseEstimator):
"""
Perceptron half-space classifier
Finds a separating hyperplane for given linearly separable data.
Attributes
----------
include_intercept: bool, default = True
Should fitted model include an intercept or not
max_iter_: int, default = 1000
Maximum number of passes over training data
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by Perceptron algorithm. To be set in
`Perceptron.fit` function.
training_loss_: array of floats
holds the loss value of the algorithm during training.
training_loss_[i] is the loss value of the i'th training iteration.
to be filled in `Perceptron.fit` function.
"""
def __init__(self,
include_intercept: bool = True,
max_iter: int = 1000,
callback: Callable[[Perceptron, np.ndarray, int], None] =
default_callback):
"""
Instantiate a Perceptron classifier
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
max_iter: int, default = 1000
Maximum number of passes over training data
callback: Callable[[Perceptron, np.ndarray, int], None]
A callable to be called after each update of the model while
fitting to given data
Callable function should receive as input a Perceptron instance,
current sample and current response
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
max_iter): int, default = 1000
Maximum number of passes over training data
callback_: Callable[[Perceptron, np.ndarray, int], None]
A callable to be called after each update of the model while
fitting to given data
Callable function should receive as input a Perceptron instance,
current sample and current response
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by Perceptron. To be set in
`Perceptron.fit` function.
"""
super().__init__()
self.include_intercept_ = include_intercept
self.max_iter_ = max_iter
self.callback_ = callback
self.coefs_ = None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit a halfspace to to given samples. Iterate over given data as long
as there exists a sample misclassified
or that did not reach `self.max_iter_`
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of
`self.fit_intercept_`
"""
if self.include_intercept_:
X = np.insert(X, 0, 1, axis=1)
self.coefs_ = np.zeros(X.shape[1])
self.training_loss_ = []
for i in range(self.max_iter_):
error = self.find_error(X, y)
if error >= 0:
self.coefs_ += (X[error] * y[error])
self.fitted_ = True
self.callback_(self, X[error], y[error])
else:
break
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
if self.include_intercept_:
X = np.insert(X, 0, 1, axis=1)
return np.sign(X @ self.coefs_)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under misclassification loss function
"""
return misclassification_error(y, self._predict(X))
def find_error(self, X, y):
"""
finds the first index where there is an error.
if there isn"t an error, returns -1.
"""
errors = y * (X @ self.coefs_)
i = 0
while i < y.size:
if errors[i] <= 0:
return i
i += 1
return -1
|
the-stack_106_24893
|
# Based on http://www.nytimes.com/interactive/2012/08/05/sports/olympics/the-100-meter-dash-one-race-every-medalist-ever.html
from bokeh.plotting import figure, show, output_file
from bokeh.models import (
ColumnDataSource,
Range1d, DataRange1d,
LinearAxis, SingleIntervalTicker, FixedTicker,
Label, Arrow, NormalHead,
HoverTool, TapTool, CustomJS)
from bokeh.sampledata.sprint import sprint
abbrev_to_country = {
"USA": "United States",
"GBR": "Britain",
"JAM": "Jamaica",
"CAN": "Canada",
"TRI": "Trinidad and Tobago",
"AUS": "Australia",
"GER": "Germany",
"CUB": "Cuba",
"NAM": "Namibia",
"URS": "Soviet Union",
"BAR": "Barbados",
"BUL": "Bulgaria",
"HUN": "Hungary",
"NED": "Netherlands",
"NZL": "New Zealand",
"PAN": "Panama",
"POR": "Portugal",
"RSA": "South Africa",
"EUA": "United Team of Germany",
}
gold_fill = "#efcf6d"
gold_line = "#c8a850"
silver_fill = "#cccccc"
silver_line = "#b0b0b1"
bronze_fill = "#c59e8a"
bronze_line = "#98715d"
fill_color = { "gold": gold_fill, "silver": silver_fill, "bronze": bronze_fill }
line_color = { "gold": gold_line, "silver": silver_line, "bronze": bronze_line }
def selected_name(name, medal, year):
return name if medal == "gold" and year in [1988, 1968, 1936, 1896] else ""
t0 = sprint.Time[0]
sprint["Abbrev"] = sprint.Country
sprint["Country"] = sprint.Abbrev.map(lambda abbr: abbrev_to_country[abbr])
sprint["Medal"] = sprint.Medal.map(lambda medal: medal.lower())
sprint["Speed"] = 100.0/sprint.Time
sprint["MetersBack"] = 100.0*(1.0 - t0/sprint.Time)
sprint["MedalFill"] = sprint.Medal.map(lambda medal: fill_color[medal])
sprint["MedalLine"] = sprint.Medal.map(lambda medal: line_color[medal])
sprint["SelectedName"] = sprint[["Name", "Medal", "Year"]].apply(tuple, axis=1).map(lambda args: selected_name(*args))
source = ColumnDataSource(sprint)
xdr = Range1d(start=sprint.MetersBack.max()+2, end=0) # XXX: +2 is poor-man's padding (otherwise misses last tick)
ydr = DataRange1d(range_padding=4, range_padding_units="absolute")
plot = figure(
x_range=xdr, y_range=ydr,
plot_width=1000, plot_height=600,
toolbar_location=None,
outline_line_color=None, y_axis_type=None)
plot.title.text = "Usain Bolt vs. 116 years of Olympic sprinters"
plot.title.text_font_size = "14pt"
plot.xaxis.ticker = SingleIntervalTicker(interval=5, num_minor_ticks=0)
plot.xaxis.axis_line_color = None
plot.xaxis.major_tick_line_color = None
plot.xgrid.grid_line_dash = "dashed"
yticker = FixedTicker(ticks=[1900, 1912, 1924, 1936, 1952, 1964, 1976, 1988, 2000, 2012])
yaxis = LinearAxis(ticker=yticker, major_tick_in=-5, major_tick_out=10)
plot.add_layout(yaxis, "right")
medal = plot.circle(x="MetersBack", y="Year", radius=dict(value=5, units="screen"),
fill_color="MedalFill", line_color="MedalLine", fill_alpha=0.5, source=source, level="overlay")
plot.text(x="MetersBack", y="Year", x_offset=10, y_offset=-5, text="SelectedName",
text_align="left", text_baseline="middle", text_font_size="9pt", source=source)
no_olympics_label = Label(
x=7.5, y=1942,
text="No Olympics in 1940 or 1944",
text_align="center", text_baseline="middle",
text_font_size="9pt", text_font_style="italic", text_color="silver")
no_olympics = plot.add_layout(no_olympics_label)
x = sprint[sprint.Year == 1900].MetersBack.min() - 0.5
arrow = Arrow(x_start=x, x_end=5, y_start=1900, y_end=1900, start=NormalHead(fill_color="black", size=6), end=None, line_width=1.5)
plot.add_layout(arrow)
meters_back = Label(
x=5, x_offset=10, y=1900,
text="Meters behind 2012 Bolt",
text_align="left", text_baseline="middle",
text_font_size="10pt", text_font_style="bold")
plot.add_layout(meters_back)
disclaimer = Label(
x=0, y=0, x_units="screen", y_units="screen",
text="This chart includes medals for the United States and Australia in the \"Intermediary\" Games of 1906, which the I.O.C. does not formally recognize.",
text_font_size="8pt", text_color="silver")
plot.add_layout(disclaimer, "below")
tooltips = """
<div>
<span style="font-size: 15px;">@Name</span>
<span style="font-size: 10px; color: #666;">(@Abbrev)</span>
</div>
<div>
<span style="font-size: 17px; font-weight: bold;">@Time{0.00}</span>
<span style="font-size: 10px; color: #666;">@Year</span>
</div>
<div style="font-size: 11px; color: #666;">@{MetersBack}{0.00} meters behind</div>
"""
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[medal]))
open_url = CustomJS(args=dict(source=source), code="""
source.inspected._1d.indices.forEach(function(index) {
var name = source.data["Name"][index];
var url = "http://en.wikipedia.org/wiki/" + encodeURIComponent(name);
window.open(url);
});
""")
plot.add_tools(TapTool(callback=open_url, renderers=[medal], behavior="inspect"))
output_file("sprint.html", plot.title.text)
show(plot)
|
the-stack_106_24894
|
# -*- coding: utf-8 -*-
import os
import orjson
from pytest import fixture
FIXTURE_DIR = os.path.join("fixture_data")
@fixture
def showdown_format_teams(raw_team_data):
data = {}
for format_, team_list in raw_team_data.items():
data[format_] = []
for team_info in team_list:
with open(os.path.join(FIXTURE_DIR, team_info["showdown-file"])) as f:
data[format_].append(f.read())
return data
@fixture
def packed_format_teams(raw_team_data):
data = {}
for format_, team_list in raw_team_data.items():
data[format_] = []
for team_info in team_list:
data[format_].append(team_info["packed-format"])
return data
@fixture
def raw_team_data():
with open(os.path.join(FIXTURE_DIR, "teams.json")) as f:
return orjson.loads(f.read())
@fixture
def example_request():
with open(os.path.join(FIXTURE_DIR, "example_request.json")) as f:
return orjson.loads(f.read())
@fixture
def example_doubles_request():
with open(os.path.join(FIXTURE_DIR, "example_doubles_request.json")) as f:
return orjson.loads(f.read())
|
the-stack_106_24897
|
#coding: utf-8
from time import sleep
from flask import render_template, request, jsonify
from app import app
import panasonic_viera
import unidecode
import collections
def arrow_keys(command):
global rc
print(command)
if(command[0:9] == "seleciona"):
rc.send_key(panasonic_viera.Keys.enter)
sleep(0.5)
elif(command[0:4] == "cima"):
rc.send_key(panasonic_viera.Keys.up)
sleep(0.5)
elif(command[0:5] == "baixo"):
rc.send_key(panasonic_viera.Keys.down)
sleep(0.5)
elif(command[0:7] == "esquerd"):
rc.send_key(panasonic_viera.Keys.left)
sleep(0.5)
elif(command[0:6] == "direit"):
rc.send_key(panasonic_viera.Keys.right)
sleep(0.5)
def bfs(grid, start, goal, wall):
global width, height
queue = collections.deque([[start]])
seen = set([start])
while queue:
path = queue.popleft()
x, y = path[-1]
if grid[y][x] == goal:
return [path, directions(path)]
for x2, y2 in ((x+1,y), (x-1,y), (x,y+1), (x,y-1)):
if 0 <= x2 < width and 0 <= y2 < height and grid[y2][x2] != wall and (x2, y2) not in seen:
queue.append(path + [(x2, y2)])
seen.add((x2, y2))
def directions(path):
result = []
index = 1
max_index = len(path)
if(max_index == 1):
result.append("selecionar")
return result
while(index < max_index):
before = path[index-1]
after = path[index]
if(before[0] == after[0]):
if(before[1] > after[1]):
result.append("cima")
else:
result.append("baixo")
else:
if(before[0] > after[0]):
result.append("esquerda")
else:
result.append("direita")
index+=1
return result
def escrever(word):
global width, height
width, height = 6, 7
grid = ["#$####",
"abcdef",
"ghijkl",
"mnopqr",
"stuvwx",
"yz1234",
"567890"]
wall = "#"
words = word.split(" ")
index = 0
last_c = (0,1)
for w in words:
if(index != 0):
goal = "$"
res = bfs(grid, last_c, goal, wall)
if(res[1][-1] != "selecionar"):
res[1].append("selecionar")
print(' '.join(res[1]))
for c in res[1]:
arrow_keys(c)
last_c = res[0][-1]
for l in w:
goal = l
res = bfs(grid, last_c, goal, wall)
if(res[1][-1] != "selecionar"):
res[1].append("selecionar")
print(' '.join(res[1]))
for c in res[1]:
arrow_keys(c)
last_c = res[0][-1]
index += 1
@app.before_first_request
def connect_tv():
global rc
rc = panasonic_viera.RemoteControl("TV_IP", app_id="APP_ID", encryption_key="ENC_KEY")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/execute', methods=['POST'])
def execute():
command = request.form.get('command')
print(command)
if(command == "aplicativos" or command == "aplicativo"):
rc.send_key(panasonic_viera.Keys.apps)
elif(command[0:8] == "escrever"):
command = unidecode.unidecode(command)
command = command.split(" ")
command.pop(0)
command = " ".join(command)
print("vai escrever: %s" % command)
escrever(command)
elif(command == "voltar"):
rc.send_key(panasonic_viera.Keys.return_key)
elif(command == "pausar"):
rc.send_key(panasonic_viera.Keys.pause)
elif(command[0:9] == "seleciona"):
#rc.send_key(panasonic_viera.Keys.enter)
commands = command.split(" ")
for c in commands:
arrow_keys(c)
elif(command[0:4] == "cima"):
#rc.send_key(panasonic_viera.Keys.up)
commands = command.split(" ")
for c in commands:
arrow_keys(c)
elif(command[0:5] == "baixo"):
#rc.send_key(panasonic_viera.Keys.down)
commands = command.split(" ")
for c in commands:
arrow_keys(c)
elif(command[0:7] == "esquerd"):
#rc.send_key(panasonic_viera.Keys.left)
commands = command.split(" ")
for c in commands:
arrow_keys(c)
elif(command[0:6] == "direit"):
#rc.send_key(panasonic_viera.Keys.right)
commands = command.split(" ")
for c in commands:
arrow_keys(c)
elif(command == "sair"):
rc.send_key(panasonic_viera.Keys.exit)
elif(command == "netflix"):
rc.launch_app("0010000200180011")
elif(command == "prime"):
rc.launch_app("0010000100180011")
elif(command == "youtube"):
rc.launch_app("0070000200180011")
elif(command == "aumentar volume"):
volume = rc.get_volume()
rc.set_volume(volume + 10)
elif(command == "diminuir volume" or command == "baixar volume"):
volume = rc.get_volume()
rc.set_volume(volume - 10)
elif(command[0:6] == "volume"):
value = command.split(" ")[1]
try:
value = int(value)
rc.set_volume(value)
except:
print('Erro ao setar volume!')
return jsonify({'msg':'Sucesso!'})
|
the-stack_106_24898
|
import discord
import json
from random import randint
from discord.ext import commands
from gex import *
#
# Get Discord bot token
#
try:
# NOTE: add auth.json at root with the token as the only field
# e.g. { "token": "some_random_token_value" }
with open('auth.json', 'r') as file:
auth_token = json.loads(file.read())['token']
except FileNotFoundError:
print('Cannot start bot without auth token, aborting')
raise
#
# Set basic parameters for Discord bot
#
description = "This bot reminds me of playing Scrabble at Lenny Kravitz's summer house."
# use default Discord intents
intents = discord.Intents.default()
# create bot with parameters
bot = commands.Bot(command_prefix='!', help_command=None, description=description, intents=intents)
#
# Implement common bot events
#
GEX_MAX_INT = len(ALL_GEX_TEXT) - 1
@bot.event
async def on_ready():
print('Logged in as', bot.user.name)
print('------')
#
# Implement gex command
#
@bot.command()
async def gex(ctx):
gex_magic_int = randint(0, GEX_MAX_INT)
gex_text = ALL_GEX_TEXT[gex_magic_int]
await ctx.send(gex_text)
@gex.error
async def gex_error(ctx, error):
if (isinstance(error, commands.MissingRequiredArgument)):
print(commands.MissingRequiredArgument)
#
# Run bot
#
bot.run(auth_token)
|
the-stack_106_24899
|
import matplotlib.pyplot as plt
import numpy as np
import pyvista as pv
sst = pv.read("pdata_xy_sst_t0.vtk")
cmap = "fire" # colorcet (perceptually accurate) color maps
sargs = dict(
shadow=True,
n_labels=5,
italic=False,
fmt="%.1f",
font_family="courier",
# nan_annotation=True,
vertical=True,
)
p = pv.BackgroundPlotter()
#p.add_mesh(sst, scalars="faces", show_edges=True, cmap=cmap, show_scalar_bar=True)
p.add_mesh_threshold(sst, scalars="lons", invert=True, title="Longitude", cmap=cmap, show_edges=True, show_scalar_bar=True, scalar_bar_args=sargs)
p.add_text("C48 Longitude Threshold", font_size=10, shadow=True, font="courier")
p.show_axes()
#p.show_grid()
p.scalar_bar.SetTitle("Longitude")
#p.add_scalar_bar(**sargs)
p.camera_position = "yz"
|
the-stack_106_24900
|
# created by Chirath R <[email protected]>
from django import forms
from technical_resources.models import Category, File, Link
class CategoryForm(forms.ModelForm):
name = forms.CharField(label='Category name', help_text='Enter category name, example: Python, C, OS...',
widget=forms.TextInput(attrs={'placeholder': 'DBMS, Ruby, NodeJS....'}))
description = forms.CharField(label='Description', help_text='Describe about the category',
widget=forms.Textarea(attrs={'placeholder': 'Info about this category'}))
image = forms.ImageField(label='Image', help_text='Add an image')
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({'class': 'form-control'})
class Meta:
model = Category
fields = ['name', 'image', 'description']
class LinksForm(forms.ModelForm):
name = forms.CharField(label='Link name', help_text='Enter a name to show for the link',
widget=forms.TextInput(attrs={'placeholder': 'Link name'}))
link = forms.URLField(label='Url', help_text='Enter the url',
widget=forms.URLInput(attrs={'placeholder': 'https://www.....'}))
def __init__(self, *args, **kwargs):
super(LinksForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({'class': 'form-control'})
class Meta:
model = Link
fields = ['name', 'link']
class FilesForm(forms.ModelForm):
name = forms.CharField(label='File name', help_text='Enter a name to show for the File',
widget=forms.TextInput(attrs={'placeholder': 'File name'}))
file = forms.FileField(label='Select file', help_text='Select a file')
def __init__(self, *args, **kwargs):
super(FilesForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({'class': 'form-control'})
class Meta:
model = File
fields = ['name', 'file']
|
the-stack_106_24901
|
# -*- coding: utf-8 -*-
"""Exemplo de CRUD com Python, SQLAlchemy e SQLite3."""
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Para debug utilizar ``echo=True``:
# engine = create_engine('sqlite:///db.sqlite3', echo=True)
engine = create_engine('sqlite:///db.sqlite3')
# Criar banco na memória
# engine = create_engine('sqlite://')
# Criando uma classe "Session" já configurada.
# Session é instanciado posteriormente para interação com a tabela.
Session = sessionmaker(bind=engine)
Base = declarative_base()
class TableName(Base):
"""Classe representa uma tabela do banco."""
# ``__tablename__`` - Define o nome da tabela.
# Se o nome da tabela não for definido é utilizado o nome da classe.
__tablename__ = 'table_name'
# Colunas da tabela.
user_id = Column('user_id', Integer, primary_key=True)
name = Column('name', String(32))
age = Column('age', Integer)
gender = Column('gender', String(10))
def __init__(self, name=None, age=None, gender=None):
"""Construtor.
Utilizando o construtor para passar os valores no momento em
que a classe é instanciada.
:param nome: (str) String com o valor que será salvo.
:param idade: (int) Numero inteiro com o valor que será salvo.
:param sexo: (str) String com o valor que será salvo.
"""
self.name = name
self.age = age
self.gender = gender
if __name__ == "__main__":
# Removendo todas as tabelas do banco.
Base.metadata.drop_all(engine)
# Criando todas as tabelas.
Base.metadata.create_all(engine)
# Criando uma sessão (add, commit, query, etc).
session = Session()
# Criando os dados que serão inseridos na tabela.
# Classe com o construtor.
user = TableName(name='Felipe', age=35, gender='Masculino')
# Inserindo registro na tabela.
session.add(user)
users = [
TableName(name='Helena', age=20, gender='Feminino'),
TableName(name='João', age=50, gender='Masculino'),
]
# Inserindo vários registros na tabela.
session.add_all(users)
# Caso não seja utilizado um construtor na classe
# os dados são passados depois de se criar a instancia.
another_user = TableName()
another_user.name = 'Camila'
another_user.age = 50
another_user.gender = 'Feminino'
session.add(another_user)
# Persistindo os dados.
session.commit()
# Consultar todos os registros.
records = session.query(TableName).all()
for row in records:
print(f'ID: {row.user_id} - Nome: {row.name} - Idade: {row.age} - '
f'Sexo: {row.gender}')
print('---\n')
# Consulta com filtro.
records = session.query(TableName).filter(TableName.age > 40).all()
for row in records:
print(f'ID: {row.user_id} - Nome: {row.name} - Idade: {row.age} - '
f'Sexo: {row.gender}')
print('---\n')
# Alterar registro.
print('ANTES da alteração:')
record = session.query(TableName).filter(TableName.user_id == 1).first()
print(f'ID: {record.user_id} - Nome: {record.name} - Idade: {record.age} - '
f'Sexo: {record.gender}')
new_data = {'name': 'Rafaela', 'age': 50, 'gender': 'Feminino'}
session.query(TableName).filter(TableName.user_id == 1).update(new_data)
session.commit()
print('DEPOIS da alteração:')
record = session.query(TableName).filter(TableName.user_id == 1).first()
print(f'ID: {record.user_id} - Nome: {record.name} - Idade: {record.age} - '
f'Sexo: {record.gender}')
print('---\n')
# Remover um registro da tabela.
print('ANTES da remoção:')
record = session.query(TableName).filter(TableName.user_id == 2).first()
print(f'ID: {record.user_id} - Nome: {record.name} - Idade: {record.age} - '
f'Sexo: {record.gender}')
session.query(TableName).filter(TableName.user_id == 2).delete()
session.commit()
print('DEPOIS da remoção:')
record = session.query(TableName).filter(TableName.user_id == 2).first()
print(record)
print('---\n')
# Fechando a sessão.
session.close()
|
the-stack_106_24902
|
class lazy_property(object):
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
class Test(object):
@lazy_property
def results(self):
print('init')
calcs = 5
return calcs
t = Test()
print(t.results)
print('')
print(t.results)
|
the-stack_106_24907
|
# -*- coding: utf-8 -*-
import threading
from iconThreadModule import PackThread
import file_operate
from time import sleep
class packThreadManager(object):
"""
"""
__instance = None
__Lock = threading.Lock()
__taskThreads = []
__finishChannel = []
__curworkDir = ''
def __init__(self):
pass
@staticmethod
def shareInstance():
packThreadManager.__Lock.acquire()
if packThreadManager.__instance == None:
packThreadManager.__instance = object.__new__(packThreadManager)
object.__init__(packThreadManager.__instance)
packThreadManager.__Lock.release()
return packThreadManager.__instance
def getIdleThread(self):
for thread in self.__taskThreads:
if thread.getStatus() == 0:
return thread
if len(self.__taskThreads) < 3:
pkThread = PackThread(len(self.__taskThreads), self.__curworkDir)
self.__taskThreads.append(pkThread)
return pkThread
def startTask(self, platform,arrayList):
iconLs = arrayList
for package in iconLs.keys():
idChannel = package
if idChannel in self.__finishChannel:
continue
pkThread = self.getIdleThread()
if pkThread is None:
return
pkThread.setPlatform(platform)
pkThread.assignPackTask(idChannel,iconLs[idChannel])
if not pkThread.isAlive():
pkThread.start()
self.__finishChannel.append(idChannel)
bOver = True
for thread in self.__taskThreads:
if thread.getStatus() != 0:
bOver = False
break
if bOver == True:
for thread in self.__taskThreads:
thread.stop()
self.__taskThreads.remove(thread)
def getFinishChannelLs(self):
return self.__finishChannel
def isRunning(self):
return len(self.__taskThreads)
def setCurWorkDir(self, workDir):
self.__curworkDir = workDir
file_operate.curDir = workDir
def stopAllTask(self):
for thread in self.__taskThreads:
thread.stop()
self.__taskThreads.remove(thread)
def clearRecord(self):
self.__finishChannel = []
|
the-stack_106_24908
|
#! /usr/bin/env python3
"""
The MIT License
Copyright (c) 2017 by Anthony Westbrook, University of New Hampshire <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Distribute PALADIN execution across a cluster
import os
import argparse
import shlex
import subprocess
import gzip
def plugin_connect(definition):
definition.name = "hpc"
definition.description = "Distribute PALADIN execution across a cluster"
definition.version_major = 1
definition.version_minor = 1
definition.version_revision = 0
definition.dependencies = ["aggregation"]
definition.callback_args = hpc_args
# definition.callback_init = hpc_init
definition.callback_main = hpc_main
def hpc_args(subargs):
# Parse arguments
arg_parser = argparse.ArgumentParser(description="PALADIN Pipeline Plugins: HPC", prog="hpc")
arg_parser.add_argument("reference", metavar="REFERENCE", type=str, help="Reference database")
arg_parser.add_argument("input", metavar="INPUT", type=str, help="Input reads")
arg_parser.add_argument("output", metavar="OUTPUT", type=str, help="Output name")
arg_parser.add_argument("options", metavar="OPTIONS", type=str, nargs=argparse.REMAINDER, help="PALADIN options")
return arg_parser.parse_known_args(shlex.split(subargs))
def hpc_main(args):
# Obtain MPI process info (import MPI here to avoid crashing on non-MPI systems)
from mpi4py import MPI
comm = MPI.COMM_WORLD
process_count = comm.Get_size()
process_idx = comm.Get_rank()
# Process 0 is responsible for splitting reads
if process_idx == 0:
split_reads(args[0].input, process_count)
# Sync to ensure all batches of reads ready for alignment
comm.Barrier()
# Execute PALADIN alignment
process_reads = "{0}-{1}".format(args[0].input, process_idx)
process_output = "{0}-{1}".format(args[0].output, process_idx)
execute(args[0].reference, process_reads, process_output, " ".join(args[0].options))
# Sync to ensure all alignments complete
comm.Barrier()
# Aggregate result
if process_idx == 0:
input_paths = ["{0}-{1}".format(args[0].output, idx) for idx in range(process_count)]
plugins.aggregation.process_data(input_paths, args[0].output)
# Remove process specific files
for idx in range(process_count):
os.remove("{0}-{1}".format(args[0].input, idx))
os.remove("{0}-{1}.sam".format(args[0].output, idx))
os.remove("{0}-{1}_uniprot.tsv".format(args[0].output, idx))
def execute(reference, input_name, output_name, options):
""" Execute PALADIN in the current process """
command = "paladin align {0} {1} -o {2} {3}".format(reference, input_name, output_name, options)
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
with open("{0}.log".format(output_name), "wb") as handle:
handle.write(output)
def split_reads(reads, count):
""" Split reads across the requested node count """
# Treat as gzip file with gz extension
if reads.endswith("gz"):
input_handle = gzip.open(reads, "rb")
else:
input_handle = open(reads, "rb")
# Open reads and outputs
with input_handle:
output_handles = list()
for out_idx in range(count):
output_handles.append(open("{0}-{1}".format(reads, out_idx), "w"))
mode = -1
out_idx = -1
parse_idx = -1
for line in input_handle:
line = line.decode("utf-8")
if line.rstrip() == "":
continue
# _initially detect input type (0 = fasta, 1 = fastq)
if mode == -1:
mode = (1, 0)[line.startswith(">")]
if mode == 0:
# FastA mode
if line.startswith(">"):
out_idx = (out_idx + 1) % count
output_handles[out_idx].write(line)
else:
# FastQ mode
parse_idx = (parse_idx + 1) % 4
if parse_idx == 0:
out_idx = (out_idx + 1) % count
output_handles[out_idx].write(line)
# Close output handles
for handle in output_handles:
handle.close()
|
the-stack_106_24912
|
#!/usr/bin/python3
from __future__ import unicode_literals
import yahoogroupsapi
from yahoogroupsapi import YahooGroupsAPI
import argparse
import codecs
import datetime
import json
import logging
import math
import os
import re
import requests.exceptions
import sys
import unicodedata
from os.path import basename
from collections import OrderedDict
from requests.cookies import RequestsCookieJar, create_cookie
if (sys.version_info < (3, 0)):
from cookielib import LWPCookieJar
from urllib import unquote
from HTMLParser import HTMLParser
hp = HTMLParser()
html_unescape = hp.unescape
text = unicode # noqa: F821
else:
from http.cookiejar import LWPCookieJar
from urllib.parse import unquote
from html import unescape as html_unescape
text = str
# WARC metadata params
WARC_META_PARAMS = OrderedDict([('software', 'yahoo-group-archiver'),
('version','20191121.02'),
('format', 'WARC File Format 1.0'),
])
def get_best_photoinfo(photoInfoArr, exclude=[]):
logger = logging.getLogger(name="get_best_photoinfo")
rs = {'tn': 0, 'sn': 1, 'hr': 2, 'or': 3}
# exclude types we're not interested in
for x in exclude:
if x in rs:
rs[x] = -1
best = photoInfoArr[0]
for info in photoInfoArr:
if info['photoType'] not in rs:
logger.error("photoType '%s' not known", info['photoType'])
continue
if rs[info['photoType']] >= rs[best['photoType']]:
best = info
if rs[best['photoType']] == -1:
return None
else:
return best
def archive_messages_metadata(yga):
logger = logging.getLogger('archive_message_metadata')
params = {'sortOrder': 'asc', 'direction': 1, 'count': 1000}
message_ids = []
next_page_start = float('inf')
page_count = 0
logger.info("Archiving message metadata...")
last_next_page_start = 0
while next_page_start > 0:
msgs = yga.messages(**params)
with open("message_metadata_%s.json" % page_count, 'wb') as f:
json.dump(msgs, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
message_ids += [msg['messageId'] for msg in msgs['messages']]
logger.info("Archived message metadata records (%d of %d)", len(message_ids), msgs['totalRecords'])
page_count += 1
next_page_start = params['start'] = msgs['nextPageStart']
if next_page_start == last_next_page_start:
break
last_next_page_start = next_page_start
return message_ids
def archive_message_content(yga, id, status="", skipHTML=False, skipRaw=False):
logger = logging.getLogger('archive_message_content')
if skipRaw is False:
try:
logger.info("Fetching raw message id: %d %s", id, status)
raw_json = yga.messages(id, 'raw')
with open("%s_raw.json" % (id,), 'wb') as f:
json.dump(raw_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
except Exception:
logger.exception("Raw grab failed for message %d", id)
if skipHTML is False:
try:
logger.info("Fetching html message id: %d %s", id, status)
html_json = yga.messages(id)
with open("%s.json" % (id,), 'wb') as f:
json.dump(html_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
if 'attachmentsInfo' in html_json and len(html_json['attachmentsInfo']) > 0:
with Mkchdir("%d_attachments" % id):
process_single_attachment(yga, html_json['attachmentsInfo'])
except Exception:
logger.exception("HTML grab failed for message %d", id)
def archive_email(yga, message_subset=None, start=None, stop=None, skipHTML=False, skipRaw=False):
logger = logging.getLogger('archive_email')
try:
# Grab messages for initial counts and permissions check
init_messages = yga.messages()
except yahoogroupsapi.AuthenticationError:
logger.error("Couldn't access Messages functionality for this group")
return
except Exception:
logger.exception("Unknown error archiving messages")
return
if start is not None or stop is not None:
start = start or 1
stop = stop or init_messages['lastRecordId']
stop = min(stop, init_messages['lastRecordId'])
r = range(start, stop + 1)
if message_subset is None:
message_subset = list(r)
else:
s = set(r).union(message_subset)
message_subset = list(s)
message_subset.sort()
if not message_subset:
message_subset = archive_messages_metadata(yga)
logger.info("Group has %s messages (maximum id: %s), fetching all",
len(message_subset), (message_subset or ['n/a'])[-1])
n = 1
for id in message_subset:
status = "(%d of %d)" % (n, len(message_subset))
n += 1
try:
archive_message_content(yga, id, status, skipHTML, skipRaw)
except Exception:
logger.exception("Failed to get message id: %d", id)
continue
def archive_topics(yga, start=None, alsoDownloadingEmail = False, getRaw=False):
logger = logging.getLogger('archive_topics')
# Grab messages for initial counts and permissions check
logger.info("Initializing messages.")
try:
init_messages = yga.messages()
except yahoogroupsapi.AuthenticationError:
logger.error("Couldn't access Messages functionality for this group")
return
expectedTopics = init_messages['numTopics']
totalRecords = init_messages['totalRecords']
lastRecordId = init_messages['lastRecordId']
if lastRecordId == 0:
logger.error("ERROR: no messages available.")
return
# There may be fewer than totalRecords messages, likely due to deleted messages.
# We also found a group where expectedTopics was 1 less than the actual number of topics available, but the script still downloaded everything.
logger.info("Expecting %d topics and up to %d messages.",expectedTopics,totalRecords)
unretrievableTopicIds = set()
unretrievableMessageIds = set()
retrievedTopicIds = set()
retrievedMessageIds = set()
potentialMessageIds = set(range(1,lastRecordId+1))
# We need to find a valid topic ID to start the process, which we'll get by downloading a message. lastRecordId should be available.
startingTopicId = None
logger.info("Checking message ID %d to find topic.",lastRecordId)
try:
html_json = yga.messages(lastRecordId)
startingTopicId = html_json.get("topicId")
logger.info("The message is part of topic ID %d", startingTopicId)
except:
logger.exception("HTML grab failed for message %d", lastRecordId)
potentialMessageIds.remove(lastRecordId)
unretrievableMessageIds.add(lastRecordId)
# We couldn't get lastRecordId for some reason, so try to find a topic using other messages.
if startingTopicId is None:
startingTopicId = find_topic_id(unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds)
# No valid messages for identifying topic.
if startingTopicId is None:
logger.error("ERROR: Couldn't retrieve any messages.")
return
# Download the starting topic and everything surrounding it (unless we hit a failure).
logger.info("Starting topic archiving with topic ID %d.",startingTopicId)
process_surrounding_topics(startingTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics)
# If we got through the first run of process_surrounding_topics with no failures, assume we got everything.
if not unretrievableTopicIds:
logger.info("Topic archiving completed with no topic failures.")
# Otherwise, continue trying to grab topics and messages until all potential messages are retrieved or found to be unretrievable.
else:
logger.info("Recovering from topic failures.")
while potentialMessageIds:
startingTopicId = find_topic_id(unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds)
if startingTopicId is not None:
process_surrounding_topics(startingTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics)
logger.info("There are %d retrieved topic(s).",len(retrievedTopicIds))
logger.info("There are %d retrieved message(s).",len(retrievedMessageIds))
logger.info("There are %d unretrievable topic(s).",len(unretrievableTopicIds))
logger.info("There are %d unretrievable message(s).",len(unretrievableMessageIds))
# Save the tracking sets.
with open("retrievedTopicIds.json", 'wb') as f:
json.dump(list(retrievedTopicIds), codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
with open("retrievedMessageIds.json", 'wb') as f:
json.dump(list(retrievedMessageIds), codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
with open("unretrievableTopicIds.json", 'wb') as f:
json.dump(list(unretrievableTopicIds), codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
with open("unretrievableMessageIds.json", 'wb') as f:
json.dump(list(unretrievableMessageIds), codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
# If requested, get the raw versions of every available message one at a time. There doesn't appear to be a raw view of an entire topic, so this is slower than the topic download.
if getRaw is True and alsoDownloadingEmail is False:
logger.info("Downloading raw versions of %d messages.",len(retrievedMessageIds))
with Mkchdir('email'):
archive_email(yga,retrievedMessageIds,skipHTML=True)
# Find a topic ID from among potentialMessageIds to start topic archiving with.
# Also save messages from unretrievable topics when possible.
def find_topic_id(unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds):
logger = logging.getLogger('find_topic_id')
# Keep looking as long as the set of potential message IDs is not emty.
while potentialMessageIds:
# Check an arbitrary message.
msgId = potentialMessageIds.pop()
logger.info("Checking message ID %d to find topic.",msgId)
try:
html_json = yga.messages(msgId)
topicId = html_json.get("topicId")
logger.info("The message is part of topic ID %d", topicId)
# We've already retrieved this topic. This could indicate a bug.
if topicId in retrievedTopicIds:
logger.error("ERROR: This topic has already been archived.")
retrievedTopicIds.add(topicId)
continue
# We've previously tried getting this topic, and it's no good.
# But sometimes Yahoo will give you a message in an unretrievable topic through the messages API.
# Since this is the only way to get the message, go ahead and save it.
if topicId in unretrievableTopicIds:
logger.info("This topic is known to be unretrievable. Saving individual message.")
retrievedMessageIds.add(msgId)
with Mkchdir('email'):
with open("%s.json" % (msgId,), 'wb') as f:
json.dump(html_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
if 'attachmentsInfo' in html_json and len(html_json['attachmentsInfo']) > 0:
with Mkchdir("%d_attachments" % msgId):
process_single_attachment(yga, html_json['attachmentsInfo'])
logger.info("%d total messages downloaded.",len(retrievedMessageIds))
continue
# We found a valid topic. Put msgId back in potentialMessageIds since it should be archived with the topic.
potentialMessageIds.add(msgId)
return topicId
except:
logger.exception("HTML grab failed for message %d", msgId)
unretrievableMessageIds.add(msgId)
# Ran out of messages to check.
return None
def process_surrounding_topics(startingTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics):
logger = logging.getLogger(name="process_surrounding_topics")
topicResults = process_single_topic(startingTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics)
if topicResults["gotTopic"] is False:
return
nextTopicId = topicResults["nextTopicId"]
prevTopicId = topicResults["prevTopicId"]
if nextTopicId > 0:
logger.info("The next topic ID is %d.",nextTopicId)
else:
logger.info("There are no later topics.")
if prevTopicId > 0:
logger.info("The previous topic ID is %d.",prevTopicId)
else:
logger.info("There are no previous topics.")
# Grab all previous topics from the starting topic back.
logger.info("Retrieving previous topics.")
while prevTopicId > 0:
if prevTopicId in unretrievableTopicIds:
logger.info("Reached known unretrievable topic ID %d",prevTopicId)
break
topicResults = process_single_topic(prevTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics)
prevTopicId = topicResults["prevTopicId"]
# Grab all later topics from the starting topic forward.
logger.info("Retrieving later topics.")
while nextTopicId > 0:
if nextTopicId in unretrievableTopicIds:
logger.info("Reached known unretrievable topic ID %d",nextTopicId)
break
topicResults = process_single_topic(nextTopicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics)
nextTopicId = topicResults["nextTopicId"]
def process_single_topic(topicId,unretrievableTopicIds,unretrievableMessageIds,retrievedTopicIds,retrievedMessageIds,potentialMessageIds,expectedTopics):
logger = logging.getLogger(name="process_single_topic")
topicResults = {
"gotTopic": False,
"nextTopicId": 0,
"prevTopicId": 0
}
# Grab the topic.
try:
logger.info("Fetching topic id %d", topicId)
topic_json = yga.topics(topicId,maxResults=999999)
with open("%s.json" % (topicId,), 'wb') as f:
json.dump(topic_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
retrievedTopicIds.add(topicId)
topicResults["gotTopic"] = True
topicResults["nextTopicId"] = topic_json.get("nextTopicId")
topicResults["prevTopicId"] = topic_json.get("prevTopicId")
messages = topic_json.get("messages")
for message in messages:
# Track what messages we've gotten.
msgId = message.get("msgId")
retrievedMessageIds.add(msgId)
try:
potentialMessageIds.remove(msgId)
except:
logger.exception("ERROR: Tried to remove msgId %d from potentialMessageIds when it wasn't there.",msgId)
# Download messsage attachments if there are any.
if 'attachmentsInfo' in message and len(message['attachmentsInfo']) > 0:
with Mkchdir("%d_attachments" % msgId):
process_single_attachment(yga, message['attachmentsInfo'])
logger.info("Fetched topic ID %d with message count %d (topic %d of %d). %d total messages downloaded.",topicId,topic_json.get("totalMsgInTopic"),len(retrievedTopicIds),expectedTopics,len(retrievedMessageIds))
except:
logger.exception("ERROR downloading topic ID %d", topicId)
unretrievableTopicIds.add(topicId)
return topicResults
def process_single_attachment(yga, attach):
logger = logging.getLogger(name="process_single_attachment")
for frec in attach:
logger.info("Fetching attachment '%s'", frec['filename'])
fname = "%s-%s" % (frec['fileId'], frec['filename'])
with open(sanitise_file_name(fname), 'wb') as f:
if 'link' in frec:
# try and download the attachment
# (sometimes yahoo doesn't keep them)
try:
yga.download_file(frec['link'], f=f)
except requests.exceptions.HTTPError as err:
logger.error("ERROR downloading attachment '%s': %s", frec['link'], err)
continue
elif 'photoInfo' in frec:
process_single_photo(frec['photoInfo'],f)
def process_single_photo(photoinfo,f):
logger = logging.getLogger(name="process_single_photo")
# keep retrying until we find the largest image size we can download
# (sometimes yahoo doesn't keep the originals)
exclude = []
ok = False
while not ok:
# find best photoinfo (largest size)
bestPhotoinfo = get_best_photoinfo(photoinfo, exclude)
if bestPhotoinfo is None:
logger.error("Can't find a viable copy of this photo")
break
# try and download it
try:
yga.download_file(bestPhotoinfo['displayURL'], f=f)
ok = True
except requests.exceptions.HTTPError as err:
# yahoo says no. exclude this size and try for another.
logger.error("ERROR downloading '%s' variant %s: %s", bestPhotoinfo['displayURL'],
bestPhotoinfo['photoType'], err)
exclude.append(bestPhotoinfo['photoType'])
def archive_files(yga, subdir=None):
logger = logging.getLogger(name="archive_files")
try:
if subdir:
file_json = yga.files(sfpath=subdir)
else:
file_json = yga.files()
except Exception:
logger.error("Couldn't access Files functionality for this group")
return
with open('fileinfo.json', 'wb') as f:
json.dump(file_json['dirEntries'], codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
n = 0
sz = len(file_json['dirEntries'])
for path in file_json['dirEntries']:
n += 1
if path['type'] == 0:
# Regular file
name = html_unescape(path['fileName'])
new_name = sanitise_file_name("%d_%s" % (n, name))
logger.info("Fetching file '%s' as '%s' (%d/%d)", name, new_name, n, sz)
with open(new_name, 'wb') as f:
yga.download_file(path['downloadURL'], f)
elif path['type'] == 1:
# Directory
name = html_unescape(path['fileName'])
new_name = "%d_%s" % (n, name)
logger.info("Fetching directory '%s' as '%s' (%d/%d)", name, sanitise_folder_name(new_name), n, sz)
with Mkchdir(new_name): # (new_name sanitised again by Mkchdir)
pathURI = unquote(path['pathURI'])
archive_files(yga, subdir=pathURI)
def archive_attachments(yga):
logger = logging.getLogger(name="archive_attachments")
try:
attachments_json = yga.attachments(count=999999)
except Exception:
logger.error("Couldn't access Attachments functionality for this group")
return
with open('allattachmentinfo.json', 'wb') as f:
json.dump(attachments_json['attachments'], codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
n = 0
for a in attachments_json['attachments']:
n += 1
with Mkchdir(a['attachmentId']):
try:
a_json = yga.attachments(a['attachmentId'])
except Exception:
logger.error("Attachment id %d inaccessible.", a['attachmentId'])
continue
with open('attachmentinfo.json', 'wb') as f:
json.dump(a_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
process_single_attachment(yga, a_json['files'])
def archive_photos(yga):
logger = logging.getLogger(name="archive_photos")
try:
nb_albums = yga.albums(count=5)['total'] + 1
except Exception:
logger.error("Couldn't access Photos functionality for this group")
return
albums = yga.albums(count=nb_albums)
n = 0
with open('albums.json', 'wb') as f:
json.dump(albums['albums'], codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
for a in albums['albums']:
n += 1
name = html_unescape(a['albumName'])
# Yahoo sometimes has an off-by-one error in the album count...
logger.info("Fetching album '%s' (%d/%d)", name, n, albums['total'])
folder = "%d-%s" % (a['albumId'], name)
with Mkchdir(folder):
photos = yga.albums(a['albumId'])
pages = int(photos['total'] / 100 + 1)
p = 0
for page in range(pages):
photos = yga.albums(a['albumId'], start=page*100, count=100)
with open('photos-%d.json' % page, 'wb') as f:
json.dump(photos['photos'], codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
for photo in photos['photos']:
p += 1
pname = html_unescape(photo['photoName'])
logger.info("Fetching photo '%s' (%d/%d)", pname, p, photos['total'])
fname = "%d-%s.jpg" % (photo['photoId'], pname)
with open(sanitise_file_name(fname), 'wb') as f:
process_single_photo(photo['photoInfo'],f)
def archive_db(yga):
logger = logging.getLogger(name="archive_db")
try:
db_json = yga.database()
except yahoogroupsapi.AuthenticationError:
db_json = None
# 401 or 403 error means Permission Denied; 307 means redirect to login. Retrying won't help.
logger.error("Couldn't access Database functionality for this group")
return
with open('databases.json', 'wb') as f:
json.dump(db_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
n = 0
nts = len(db_json['tables'])
for table in db_json['tables']:
n += 1
logger.info("Downloading database table '%s' (%d/%d)", table['name'], n, nts)
name = "%s_%s.csv" % (table['tableId'], table['name'])
uri = "https://groups.yahoo.com/neo/groups/%s/database/%s/records/export?format=csv" % (yga.group, table['tableId'])
with open(sanitise_file_name(name), 'wb') as f:
yga.download_file(uri, f)
records_json = yga.database(table['tableId'], 'records')
with open('%s_records.json' % table['tableId'], 'wb') as f:
json.dump(records_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
def archive_links(yga, subdir=''):
logger = logging.getLogger(name="archive_links")
try:
links = yga.links(linkdir=subdir)
except yahoogroupsapi.AuthenticationError:
logger.error("Couldn't access Links functionality for this group")
return
with open('links.json', 'wb') as f:
json.dump(links, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
logger.info("Written %d links from %s folder", links['numLink'], subdir)
n = 0
for a in links['dirs']:
n += 1
logger.info("Fetching links folder '%s' (%d/%d)", a['folder'], n, links['numDir'])
with Mkchdir(a['folder']):
archive_links(yga, "%s/%s" % (subdir, a['folder']))
def archive_calendar(yga):
logger = logging.getLogger(name="archive_calendar")
groupinfo = yga.HackGroupInfo()
if 'entityId' not in groupinfo:
logger.error("Couldn't download calendar/events: missing entityId")
return
entityId = groupinfo['entityId']
api_root = "https://calendar.yahoo.com/ws/v3"
# We get the wssid
tmpUri = "%s/users/%s/calendars/events/?format=json&dtstart=20000101dtend=20000201&wssid=Dummy" % (api_root, entityId)
logger.info("Getting wssid. Expecting 401 or 403 response.")
try:
yga.download_file(tmpUri) # We expect a 403 or 401 here
logger.error("Attempt to get wssid returned HTTP 200, which is unexpected!") # we should never hit this
return
except requests.exceptions.HTTPError as e:
if e.response.status_code == 403 or e.response.status_code == 401:
try:
tmpJson = json.loads(e.response.content)['calendarError']
except:
logger.exception("ERROR: Couldn't load wssid exception to get calendarError.")
else:
logger.error("Attempt to get wssid returned an unexpected response status %d" % e.response.status_code)
return
if 'wssid' not in tmpJson:
logger.error("Couldn't download calendar/events: missing wssid")
return
wssid = tmpJson['wssid']
# Getting everything since the launch of Yahoo! Groups (January 30, 2001)
archiveDate = datetime.datetime(2001, 1, 30)
endDate = datetime.datetime(2025, 1, 1)
while archiveDate < endDate:
jsonStart = archiveDate.strftime("%Y%m%d")
jsonEnd = (archiveDate + datetime.timedelta(days=1000)).strftime("%Y%m%d")
calURL = "%s/users/%s/calendars/events/?format=json&dtstart=%s&dtend=%s&wssid=%s" % \
(api_root, entityId, jsonStart, jsonEnd, wssid)
try:
logger.info("Trying to get events between %s and %s", jsonStart, jsonEnd)
calContentRaw = yga.download_file(calURL)
except requests.exception.HTTPError:
logger.error("Unrecoverable error getting events between %s and %s: URL %s", jsonStart, jsonEnd, calURL)
calContent = json.loads(calContentRaw)
if calContent['events']['count'] > 0:
filename = jsonStart + "-" + jsonEnd + ".json"
with open(filename, 'wb') as f:
logger.info("Got %d event(s)", calContent['events']['count'])
json.dump(calContent, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
archiveDate += datetime.timedelta(days=1000)
def archive_about(yga):
logger = logging.getLogger(name="archive_about")
groupinfo = yga.HackGroupInfo()
logger.info("Downloading group description data")
with open('about.json', 'wb') as f:
json.dump(groupinfo, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
statistics = yga.statistics()
with open('statistics.json', 'wb') as f:
json.dump(statistics, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
exclude = []
# Check if we really have a photo in the group description
if ('photoInfo' in statistics['groupHomePage'] and statistics['groupHomePage']['photoInfo']):
# Base filename on largest photo size.
bestphotoinfo = get_best_photoinfo(statistics['groupHomePage']['photoInfo'], exclude)
fname = 'GroupPhoto-%s' % basename(bestphotoinfo['displayURL']).split('?')[0]
logger.info("Downloading the photo in group description as %s", fname)
with open(sanitise_file_name(fname), 'wb') as f:
process_single_photo(statistics['groupHomePage']['photoInfo'],f)
if statistics['groupCoverPhoto']['hasCoverImage']:
# Base filename on largest photo size.
bestphotoinfo = get_best_photoinfo(statistics['groupCoverPhoto']['photoInfo'], exclude)
fname = 'GroupCover-%s' % basename(bestphotoinfo['displayURL']).split('?')[0]
logger.info("Downloading the group cover as %s", fname)
with open(sanitise_file_name(fname), 'wb') as f:
process_single_photo(statistics['groupCoverPhoto']['photoInfo'],f)
def archive_polls(yga):
logger = logging.getLogger(name="archive_polls")
try:
pollsList = yga.polls(count=100, sort='DESC')
except yahoogroupsapi.AuthenticationError:
logger.error("Couldn't access Polls functionality for this group")
return
if len(pollsList) == 100:
logger.info("Got 100 polls, checking if there are more ...")
endoflist = False
offset = 99
while not endoflist:
tmpList = yga.polls(count=100, sort='DESC', start=offset)
tmpCount = len(tmpList)
logger.info("Got %d more polls", tmpCount)
# Trivial case first
if tmpCount < 100:
endoflist = True
# Again we got 100 polls, increase the offset
if tmpCount == 100:
offset += 99
# Last survey
if pollsList[len(pollsList)-1]['surveyId'] == tmpList[len(tmpList)-1]['surveyId']:
logger.info("No new polls found with offset %d", offset)
endoflist = True
break
pollsList += tmpList
totalPolls = len(pollsList)
logger.info("Found %d polls to grab", totalPolls)
n = 1
for p in pollsList:
logger.info("Downloading poll %d [%d/%d]", p['surveyId'], n, totalPolls)
pollInfo = yga.polls(p['surveyId'])
fname = '%s-%s.json' % (n, p['surveyId'])
with open(fname, 'wb') as f:
json.dump(pollInfo, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
n += 1
def archive_members(yga):
logger = logging.getLogger(name="archive_members")
try:
confirmed_json = yga.members('confirmed')
except yahoogroupsapi.AuthenticationError:
logger.error("Couldn't access Members list functionality for this group")
return
n_members = confirmed_json['total']
# we can dump 100 member records at a time
all_members = []
for i in range(int(math.ceil(n_members)/100 + 1)):
confirmed_json = yga.members('confirmed', start=100*i, count=100)
all_members = all_members + confirmed_json['members']
with open('memberinfo_%d.json' % i, 'wb') as f:
json.dump(confirmed_json, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
all_json_data = {"total": n_members, "members": all_members}
with open('allmemberinfo.json', 'wb') as f:
json.dump(all_json_data, codecs.getwriter('utf-8')(f), ensure_ascii=False, indent=4)
logger.info("Saved members: Expected: %d, Actual: %d", n_members, len(all_members))
####
# Utility Functions
####
def sanitise_file_name(value):
"""
Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, periods or hyphens.
Also strip leading and trailing whitespace and periods.
"""
value = text(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s.-]', '', value).strip().strip('.')
return re.sub(r'[-\s]+', '-', value)
def sanitise_folder_name(name):
return sanitise_file_name(name).replace('.', '_')
class Mkchdir:
d = ""
def __init__(self, d, sanitize=True):
self.d = sanitise_folder_name(d) if sanitize else d
def __enter__(self):
try:
os.mkdir(self.d)
except OSError:
pass
os.chdir(self.d)
def __exit__(self, exc_type, exc_value, traceback):
os.chdir('..')
class CustomFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
if '%f' in datefmt:
datefmt = datefmt.replace('%f', '%03d' % record.msecs)
return logging.Formatter.formatTime(self, record, datefmt)
def init_cookie_jar(cookie_file=None, cookie_t=None, cookie_y=None, cookie_euconsent=None):
cookie_jar = LWPCookieJar(cookie_file) if cookie_file else RequestsCookieJar()
if cookie_file and os.path.exists(cookie_file):
cookie_jar.load(ignore_discard=True)
if args.cookie_t:
cookie_jar.set_cookie(create_cookie('T', cookie_t))
if cookie_y:
cookie_jar.set_cookie(create_cookie('Y', cookie_y))
if cookie_euconsent:
cookie_jar.set_cookie(create_cookie('EuConsent', cookie_euconsent))
if cookie_file:
cookie_jar.save(ignore_discard=True)
return cookie_jar
if __name__ == "__main__":
p = argparse.ArgumentParser()
pa = p.add_argument_group(title='Authentication Options')
pa.add_argument('-ct', '--cookie_t', type=str,
help='T authentication cookie from yahoo.com')
pa.add_argument('-cy', '--cookie_y', type=str,
help='Y authentication cookie from yahoo.com')
pa.add_argument('-ce', '--cookie_e', type=str, default='',
help='Additional EuConsent cookie is required in EU')
pa.add_argument('-cf', '--cookie-file', type=str,
help='File to store authentication cookies to. Cookies passed on the command line will overwrite '
'any already in the file.')
po = p.add_argument_group(title='What to archive', description='By default, all the below.')
po.add_argument('-e', '--email', action='store_true',
help='Only archive email and attachments (from email)')
po.add_argument('-at', '--attachments', action='store_true',
help='Only archive attachments (from attachments list)')
po.add_argument('-f', '--files', action='store_true',
help='Only archive files')
po.add_argument('-i', '--photos', action='store_true',
help='Only archive photo galleries')
po.add_argument('-t', '--topics', action='store_true',
help='Only archive HTML email and attachments through the topics API')
po.add_argument('-tr', '--topicsWithRaw', action='store_true',
help='Only archive both HTML and raw email and attachments through the topics API')
po.add_argument('-d', '--database', action='store_true',
help='Only archive database')
po.add_argument('-l', '--links', action='store_true',
help='Only archive links')
po.add_argument('-c', '--calendar', action='store_true',
help='Only archive events')
po.add_argument('-p', '--polls', action='store_true',
help='Only archive polls')
po.add_argument('-a', '--about', action='store_true',
help='Only archive general info about the group')
po.add_argument('-m', '--members', action='store_true',
help='Only archive members')
pr = p.add_argument_group(title='Request Options')
pr.add_argument('--user-agent', type=str,
help='Override the default user agent used to make requests')
pc = p.add_argument_group(title='Message Range Options',
description='Options to specify which messages to download. Use of multiple options will '
'be combined. Note: These options will also try to fetch message IDs that may not exist '
'in the group.')
pc.add_argument('--start', type=int,
help='Email message id to start from (specifying this will cause only specified message contents to'
' be downloaded, and not message indexes). Default to 1, if end option provided.')
pc.add_argument('--stop', type=int,
help='Email message id to stop at (inclusive), defaults to last message ID available, if start '
'option provided.')
pc.add_argument('--ids', nargs='+', type=int,
help='Get email message by ID(s). Space separated, terminated by another flag or --')
pf = p.add_argument_group(title='Output Options')
pf.add_argument('-w', '--warc', action='store_true',
help='Output WARC file of raw network requests. [Requires warcio package installed]')
p.add_argument('-v', '--verbose', action='store_true')
p.add_argument('--colour', '--color', action='store_true', help='Colour log output to terminal')
p.add_argument('--delay', type=float, default=0.2, help='Minimum delay between requests (default 0.2s)')
p.add_argument('group', type=str)
args = p.parse_args()
# Setup logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
log_format = {'fmt': '%(asctime)s %(levelname)s %(name)s %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S.%f %Z'}
log_formatter = CustomFormatter(**log_format)
log_level = logging.DEBUG if args.verbose else logging.INFO
if args.colour:
try:
import coloredlogs
except ImportError as e:
print("Coloured logging output requires the 'coloredlogs' package to be installed.")
raise e
coloredlogs.install(level=log_level, **log_format)
else:
log_stdout_handler = logging.StreamHandler(sys.stdout)
log_stdout_handler.setLevel(log_level)
log_stdout_handler.setFormatter(log_formatter)
root_logger.addHandler(log_stdout_handler)
cookie_jar = init_cookie_jar(args.cookie_file, args.cookie_t, args.cookie_y, args.cookie_e)
headers = {}
if args.user_agent:
headers['User-Agent'] = args.user_agent
yga = YahooGroupsAPI(args.group, cookie_jar, headers, min_delay=args.delay)
if not (args.email or args.files or args.photos or args.database or args.links or args.calendar or args.about or
args.polls or args.attachments or args.members or args.topics or args.topicsWithRaw):
args.email = args.files = args.photos = args.database = args.links = args.calendar = args.about = \
args.polls = args.attachments = args.members = args.topics = args.topicsWithRaw = True
with Mkchdir(args.group, sanitize=False):
log_file_handler = logging.FileHandler('archive.log')
log_file_handler.setFormatter(log_formatter)
root_logger.addHandler(log_file_handler)
if args.warc:
try:
from warcio import WARCWriter
except ImportError:
logging.error('WARC output requires the warcio package to be installed.')
exit(1)
fhwarc = open('data.warc.gz', 'ab')
warc_writer = WARCWriter(fhwarc)
warcmeta = warc_writer.create_warcinfo_record(fhwarc.name, WARC_META_PARAMS)
warc_writer.write_record(warcmeta)
yga.set_warc_writer(warc_writer)
if args.email:
with Mkchdir('email'):
archive_email(yga, message_subset=args.ids, start=args.start, stop=args.stop)
if args.files:
with Mkchdir('files'):
archive_files(yga)
if args.photos:
with Mkchdir('photos'):
archive_photos(yga)
if args.topics:
with Mkchdir('topics'):
archive_topics(yga,start=args.start,alsoDownloadingEmail = args.email)
if args.topicsWithRaw:
with Mkchdir('topics'):
archive_topics(yga,start=args.start,alsoDownloadingEmail = args.email,getRaw=True)
if args.database:
with Mkchdir('databases'):
archive_db(yga)
if args.links:
with Mkchdir('links'):
archive_links(yga)
if args.calendar:
with Mkchdir('calendar'):
archive_calendar(yga)
if args.about:
with Mkchdir('about'):
archive_about(yga)
if args.polls:
with Mkchdir('polls'):
archive_polls(yga)
if args.attachments:
with Mkchdir('attachments'):
archive_attachments(yga)
if args.members:
with Mkchdir('members'):
archive_members(yga)
if args.warc:
fhwarc.close()
|
the-stack_106_24913
|
# Natural Language Toolkit: Logic
#
# Author: Dan Garrette <[email protected]>
#
# Copyright (C) 2001-2018 NLTK Project
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
A version of first order predicate logic, built on
top of the typed lambda calculus.
"""
from __future__ import print_function, unicode_literals
import re
import operator
from collections import defaultdict
from functools import reduce, total_ordering
from six import string_types
from nltk.util import Trie
from nltk.internals import Counter
from nltk.compat import python_2_unicode_compatible
APP = 'APP'
_counter = Counter()
class Tokens(object):
LAMBDA = '\\'; LAMBDA_LIST = ['\\']
#Quantifiers
EXISTS = 'exists'; EXISTS_LIST = ['some', 'exists', 'exist']
ALL = 'all'; ALL_LIST = ['all', 'forall']
#Punctuation
DOT = '.'
OPEN = '('
CLOSE = ')'
COMMA = ','
#Operations
NOT = '-'; NOT_LIST = ['not', '-', '!']
AND = '&'; AND_LIST = ['and', '&', '^']
OR = '|'; OR_LIST = ['or', '|']
IMP = '->'; IMP_LIST = ['implies', '->', '=>']
IFF = '<->'; IFF_LIST = ['iff', '<->', '<=>']
EQ = '='; EQ_LIST = ['=', '==']
NEQ = '!='; NEQ_LIST = ['!=']
#Collections of tokens
BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST
QUANTS = EXISTS_LIST + ALL_LIST
PUNCT = [DOT, OPEN, CLOSE, COMMA]
TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST
#Special
SYMBOLS = [x for x in TOKENS if re.match(r'^[-\\.(),!&^|>=<]*$', x)]
def boolean_ops():
"""
Boolean operators
"""
names = ["negation", "conjunction", "disjunction", "implication", "equivalence"]
for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]):
print("%-15s\t%s" % pair)
def equality_preds():
"""
Equality predicates
"""
names = ["equality", "inequality"]
for pair in zip(names, [Tokens.EQ, Tokens.NEQ]):
print("%-15s\t%s" % pair)
def binding_ops():
"""
Binding operators
"""
names = ["existential", "universal", "lambda"]
for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA]):
print("%-15s\t%s" % pair)
@python_2_unicode_compatible
class LogicParser(object):
"""A lambda calculus expression parser."""
def __init__(self, type_check=False):
"""
:param type_check: bool should type checking be performed?
to their types.
"""
assert isinstance(type_check, bool)
self._currentIndex = 0
self._buffer = []
self.type_check = type_check
"""A list of tuples of quote characters. The 4-tuple is comprised
of the start character, the end character, the escape character, and
a boolean indicating whether the quotes should be included in the
result. Quotes are used to signify that a token should be treated as
atomic, ignoring any special characters within the token. The escape
character allows the quote end character to be used within the quote.
If True, the boolean indicates that the final token should contain the
quote and escape characters.
This method exists to be overridden"""
self.quote_chars = []
self.operator_precedence = dict(
[(x,1) for x in Tokens.LAMBDA_LIST] + \
[(x,2) for x in Tokens.NOT_LIST] + \
[(APP,3)] + \
[(x,4) for x in Tokens.EQ_LIST+Tokens.NEQ_LIST] + \
[(x,5) for x in Tokens.QUANTS] + \
[(x,6) for x in Tokens.AND_LIST] + \
[(x,7) for x in Tokens.OR_LIST] + \
[(x,8) for x in Tokens.IMP_LIST] + \
[(x,9) for x in Tokens.IFF_LIST] + \
[(None,10)])
self.right_associated_operations = [APP]
def parse(self, data, signature=None):
"""
Parse the expression.
:param data: str for the input to be parsed
:param signature: ``dict<str, str>`` that maps variable names to type
strings
:returns: a parsed Expression
"""
data = data.rstrip()
self._currentIndex = 0
self._buffer, mapping = self.process(data)
try:
result = self.process_next_expression(None)
if self.inRange(0):
raise UnexpectedTokenException(self._currentIndex+1, self.token(0))
except LogicalExpressionException as e:
msg = '%s\n%s\n%s^' % (e, data, ' '*mapping[e.index-1])
raise LogicalExpressionException(None, msg)
if self.type_check:
result.typecheck(signature)
return result
def process(self, data):
"""Split the data into tokens"""
out = []
mapping = {}
tokenTrie = Trie(self.get_all_symbols())
token = ''
data_idx = 0
token_start_idx = data_idx
while data_idx < len(data):
cur_data_idx = data_idx
quoted_token, data_idx = self.process_quoted_token(data_idx, data)
if quoted_token:
if not token:
token_start_idx = cur_data_idx
token += quoted_token
continue
st = tokenTrie
c = data[data_idx]
symbol = ''
while c in st:
symbol += c
st = st[c]
if len(data)-data_idx > len(symbol):
c = data[data_idx+len(symbol)]
else:
break
if Trie.LEAF in st:
#token is a complete symbol
if token:
mapping[len(out)] = token_start_idx
out.append(token)
token = ''
mapping[len(out)] = data_idx
out.append(symbol)
data_idx += len(symbol)
else:
if data[data_idx] in ' \t\n': #any whitespace
if token:
mapping[len(out)] = token_start_idx
out.append(token)
token = ''
else:
if not token:
token_start_idx = data_idx
token += data[data_idx]
data_idx += 1
if token:
mapping[len(out)] = token_start_idx
out.append(token)
mapping[len(out)] = len(data)
mapping[len(out)+1] = len(data)+1
return out, mapping
def process_quoted_token(self, data_idx, data):
token = ''
c = data[data_idx]
i = data_idx
for start, end, escape, incl_quotes in self.quote_chars:
if c == start:
if incl_quotes:
token += c
i += 1
while data[i] != end:
if data[i] == escape:
if incl_quotes:
token += data[i]
i += 1
if len(data) == i: #if there are no more chars
raise LogicalExpressionException(None, "End of input reached. "
"Escape character [%s] found at end."
% escape)
token += data[i]
else:
token += data[i]
i += 1
if len(data) == i:
raise LogicalExpressionException(None, "End of input reached. "
"Expected: [%s]" % end)
if incl_quotes:
token += data[i]
i += 1
if not token:
raise LogicalExpressionException(None, 'Empty quoted token found')
break
return token, i
def get_all_symbols(self):
"""This method exists to be overridden"""
return Tokens.SYMBOLS
def inRange(self, location):
"""Return TRUE if the given location is within the buffer"""
return self._currentIndex+location < len(self._buffer)
def token(self, location=None):
"""Get the next waiting token. If a location is given, then
return the token at currentIndex+location without advancing
currentIndex; setting it gives lookahead/lookback capability."""
try:
if location is None:
tok = self._buffer[self._currentIndex]
self._currentIndex += 1
else:
tok = self._buffer[self._currentIndex+location]
return tok
except IndexError:
raise ExpectedMoreTokensException(self._currentIndex+1)
def isvariable(self, tok):
return tok not in Tokens.TOKENS
def process_next_expression(self, context):
"""Parse the next complete expression from the stream and return it."""
try:
tok = self.token()
except ExpectedMoreTokensException:
raise ExpectedMoreTokensException(self._currentIndex+1, message='Expression expected.')
accum = self.handle(tok, context)
if not accum:
raise UnexpectedTokenException(self._currentIndex, tok, message='Expression expected.')
return self.attempt_adjuncts(accum, context)
def handle(self, tok, context):
"""This method is intended to be overridden for logics that
use different operators or expressions"""
if self.isvariable(tok):
return self.handle_variable(tok, context)
elif tok in Tokens.NOT_LIST:
return self.handle_negation(tok, context)
elif tok in Tokens.LAMBDA_LIST:
return self.handle_lambda(tok, context)
elif tok in Tokens.QUANTS:
return self.handle_quant(tok, context)
elif tok == Tokens.OPEN:
return self.handle_open(tok, context)
def attempt_adjuncts(self, expression, context):
cur_idx = None
while cur_idx != self._currentIndex: #while adjuncts are added
cur_idx = self._currentIndex
expression = self.attempt_EqualityExpression(expression, context)
expression = self.attempt_ApplicationExpression(expression, context)
expression = self.attempt_BooleanExpression(expression, context)
return expression
def handle_negation(self, tok, context):
return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT))
def make_NegatedExpression(self, expression):
return NegatedExpression(expression)
def handle_variable(self, tok, context):
#It's either: 1) a predicate expression: sees(x,y)
# 2) an application expression: P(x)
# 3) a solo variable: john OR x
accum = self.make_VariableExpression(tok)
if self.inRange(0) and self.token(0) == Tokens.OPEN:
#The predicate has arguments
if not isinstance(accum, FunctionVariableExpression) and \
not isinstance(accum, ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
"'%s' is an illegal predicate name. "
"Individual variables may not be used as "
"predicates." % tok)
self.token() #swallow the Open Paren
#curry the arguments
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
while self.inRange(0) and self.token(0) == Tokens.COMMA:
self.token() #swallow the comma
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
self.assertNextToken(Tokens.CLOSE)
return accum
def get_next_token_variable(self, description):
try:
tok = self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, 'Variable expected.')
if isinstance(self.make_VariableExpression(tok), ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
"'%s' is an illegal variable name. "
"Constants may not be %s." % (tok, description))
return Variable(tok)
def handle_lambda(self, tok, context):
# Expression is a lambda expression
if not self.inRange(0):
raise ExpectedMoreTokensException(self._currentIndex+2,
message="Variable and Expression expected following lambda operator.")
vars = [self.get_next_token_variable('abstracted')]
while True:
if not self.inRange(0) or (self.token(0) == Tokens.DOT and not self.inRange(1)):
raise ExpectedMoreTokensException(self._currentIndex+2, message="Expression expected.")
if not self.isvariable(self.token(0)):
break
# Support expressions like: \x y.M == \x.\y.M
vars.append(self.get_next_token_variable('abstracted'))
if self.inRange(0) and self.token(0) == Tokens.DOT:
self.token() #swallow the dot
accum = self.process_next_expression(tok)
while vars:
accum = self.make_LambdaExpression(vars.pop(), accum)
return accum
def handle_quant(self, tok, context):
# Expression is a quantified expression: some x.M
factory = self.get_QuantifiedExpression_factory(tok)
if not self.inRange(0):
raise ExpectedMoreTokensException(self._currentIndex+2,
message="Variable and Expression expected following quantifier '%s'." % tok)
vars = [self.get_next_token_variable('quantified')]
while True:
if not self.inRange(0) or (self.token(0) == Tokens.DOT and not self.inRange(1)):
raise ExpectedMoreTokensException(self._currentIndex+2, message="Expression expected.")
if not self.isvariable(self.token(0)):
break
# Support expressions like: some x y.M == some x.some y.M
vars.append(self.get_next_token_variable('quantified'))
if self.inRange(0) and self.token(0) == Tokens.DOT:
self.token() #swallow the dot
accum = self.process_next_expression(tok)
while vars:
accum = self.make_QuanifiedExpression(factory, vars.pop(), accum)
return accum
def get_QuantifiedExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different quantifiers"""
if tok in Tokens.EXISTS_LIST:
return ExistsExpression
elif tok in Tokens.ALL_LIST:
return AllExpression
else:
self.assertToken(tok, Tokens.QUANTS)
def make_QuanifiedExpression(self, factory, variable, term):
return factory(variable, term)
def handle_open(self, tok, context):
#Expression is in parens
accum = self.process_next_expression(None)
self.assertNextToken(Tokens.CLOSE)
return accum
def attempt_EqualityExpression(self, expression, context):
"""Attempt to make an equality expression. If the next token is an
equality operator, then an EqualityExpression will be returned.
Otherwise, the parameter will be returned."""
if self.inRange(0):
tok = self.token(0)
if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority(tok, context):
self.token() #swallow the "=" or "!="
expression = self.make_EqualityExpression(expression, self.process_next_expression(tok))
if tok in Tokens.NEQ_LIST:
expression = self.make_NegatedExpression(expression)
return expression
def make_EqualityExpression(self, first, second):
"""This method serves as a hook for other logic parsers that
have different equality expression classes"""
return EqualityExpression(first, second)
def attempt_BooleanExpression(self, expression, context):
"""Attempt to make a boolean expression. If the next token is a boolean
operator, then a BooleanExpression will be returned. Otherwise, the
parameter will be returned."""
while self.inRange(0):
tok = self.token(0)
factory = self.get_BooleanExpression_factory(tok)
if factory and self.has_priority(tok, context):
self.token() #swallow the operator
expression = self.make_BooleanExpression(factory, expression,
self.process_next_expression(tok))
else:
break
return expression
def get_BooleanExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different boolean operators"""
if tok in Tokens.AND_LIST:
return AndExpression
elif tok in Tokens.OR_LIST:
return OrExpression
elif tok in Tokens.IMP_LIST:
return ImpExpression
elif tok in Tokens.IFF_LIST:
return IffExpression
else:
return None
def make_BooleanExpression(self, factory, first, second):
return factory(first, second)
def attempt_ApplicationExpression(self, expression, context):
"""Attempt to make an application expression. The next tokens are
a list of arguments in parens, then the argument expression is a
function being applied to the arguments. Otherwise, return the
argument expression."""
if self.has_priority(APP, context):
if self.inRange(0) and self.token(0) == Tokens.OPEN:
if not isinstance(expression, LambdaExpression) and \
not isinstance(expression, ApplicationExpression) and \
not isinstance(expression, FunctionVariableExpression) and \
not isinstance(expression, ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
("The function '%s" % expression) +
"' is not a Lambda Expression, an "
"Application Expression, or a "
"functional predicate, so it may "
"not take arguments.")
self.token() #swallow then open paren
#curry the arguments
accum = self.make_ApplicationExpression(expression, self.process_next_expression(APP))
while self.inRange(0) and self.token(0) == Tokens.COMMA:
self.token() #swallow the comma
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
self.assertNextToken(Tokens.CLOSE)
return accum
return expression
def make_ApplicationExpression(self, function, argument):
return ApplicationExpression(function, argument)
def make_VariableExpression(self, name):
return VariableExpression(Variable(name))
def make_LambdaExpression(self, variable, term):
return LambdaExpression(variable, term)
def has_priority(self, operation, context):
return self.operator_precedence[operation] < self.operator_precedence[context] or \
(operation in self.right_associated_operations and \
self.operator_precedence[operation] == self.operator_precedence[context])
def assertNextToken(self, expected):
try:
tok = self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, message="Expected token '%s'." % expected)
if isinstance(expected, list):
if tok not in expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
else:
if tok != expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
def assertToken(self, tok, expected):
if isinstance(expected, list):
if tok not in expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
else:
if tok != expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
def __repr__(self):
if self.inRange(0):
msg = 'Next token: ' + self.token(0)
else:
msg = 'No more tokens'
return '<' + self.__class__.__name__ + ': ' + msg + '>'
def read_logic(s, logic_parser=None, encoding=None):
"""
Convert a file of First Order Formulas into a list of {Expression}s.
:param s: the contents of the file
:type s: str
:param logic_parser: The parser to be used to parse the logical expression
:type logic_parser: LogicParser
:param encoding: the encoding of the input string, if it is binary
:type encoding: str
:return: a list of parsed formulas.
:rtype: list(Expression)
"""
if encoding is not None:
s = s.decode(encoding)
if logic_parser is None:
logic_parser = LogicParser()
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try:
statements.append(logic_parser.parse(line))
except LogicalExpressionException:
raise ValueError('Unable to parse line %s: %s' % (linenum, line))
return statements
@total_ordering
@python_2_unicode_compatible
class Variable(object):
def __init__(self, name):
"""
:param name: the name of the variable
"""
assert isinstance(name, string_types), "%s is not a string" % name
self.name = name
def __eq__(self, other):
return isinstance(other, Variable) and self.name == other.name
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Variable):
raise TypeError
return self.name < other.name
def substitute_bindings(self, bindings):
return bindings.get(self, self)
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
def __repr__(self):
return "Variable('%s')" % self.name
def unique_variable(pattern=None, ignore=None):
"""
Return a new, unique variable.
:param pattern: ``Variable`` that is being replaced. The new variable must
be the same type.
:param term: a set of ``Variable`` objects that should not be returned from
this function.
:rtype: Variable
"""
if pattern is not None:
if is_indvar(pattern.name):
prefix = 'z'
elif is_funcvar(pattern.name):
prefix = 'F'
elif is_eventvar(pattern.name):
prefix = 'e0'
else:
assert False, "Cannot generate a unique constant"
else:
prefix = 'z'
v = Variable("%s%s" % (prefix, _counter.get()))
while ignore is not None and v in ignore:
v = Variable("%s%s" % (prefix, _counter.get()))
return v
def skolem_function(univ_scope=None):
"""
Return a skolem function over the variables in univ_scope
param univ_scope
"""
skolem = VariableExpression(Variable('F%s' % _counter.get()))
if univ_scope:
for v in list(univ_scope):
skolem = skolem(VariableExpression(v))
return skolem
@python_2_unicode_compatible
class Type(object):
def __repr__(self):
return "%s" % self
def __hash__(self):
return hash("%s" % self)
@classmethod
def fromstring(cls, s):
return read_type(s)
@python_2_unicode_compatible
class ComplexType(Type):
def __init__(self, first, second):
assert(isinstance(first, Type)), "%s is not a Type" % first
assert(isinstance(second, Type)), "%s is not a Type" % second
self.first = first
self.second = second
def __eq__(self, other):
return isinstance(other, ComplexType) and \
self.first == other.first and \
self.second == other.second
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
if isinstance(other, ComplexType):
return self.first.matches(other.first) and \
self.second.matches(other.second)
else:
return self == ANY_TYPE
def resolve(self, other):
if other == ANY_TYPE:
return self
elif isinstance(other, ComplexType):
f = self.first.resolve(other.first)
s = self.second.resolve(other.second)
if f and s:
return ComplexType(f,s)
else:
return None
elif self == ANY_TYPE:
return other
else:
return None
def __str__(self):
if self == ANY_TYPE:
return "%s" % ANY_TYPE
else:
return '<%s,%s>' % (self.first, self.second)
def str(self):
if self == ANY_TYPE:
return ANY_TYPE.str()
else:
return '(%s -> %s)' % (self.first.str(), self.second.str())
class BasicType(Type):
def __eq__(self, other):
return isinstance(other, BasicType) and ("%s" % self) == ("%s" % other)
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
return other == ANY_TYPE or self == other
def resolve(self, other):
if self.matches(other):
return self
else:
return None
@python_2_unicode_compatible
class EntityType(BasicType):
def __str__(self):
return 'e'
def str(self):
return 'IND'
@python_2_unicode_compatible
class TruthValueType(BasicType):
def __str__(self):
return 't'
def str(self):
return 'BOOL'
@python_2_unicode_compatible
class EventType(BasicType):
def __str__(self):
return 'v'
def str(self):
return 'EVENT'
@python_2_unicode_compatible
class AnyType(BasicType, ComplexType):
def __init__(self):
pass
@property
def first(self): return self
@property
def second(self): return self
def __eq__(self, other):
return isinstance(other, AnyType) or other.__eq__(self)
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
return True
def resolve(self, other):
return other
def __str__(self):
return '?'
def str(self):
return 'ANY'
TRUTH_TYPE = TruthValueType()
ENTITY_TYPE = EntityType()
EVENT_TYPE = EventType()
ANY_TYPE = AnyType()
def read_type(type_string):
assert isinstance(type_string, string_types)
type_string = type_string.replace(' ', '') #remove spaces
if type_string[0] == '<':
assert type_string[-1] == '>'
paren_count = 0
for i,char in enumerate(type_string):
if char == '<':
paren_count += 1
elif char == '>':
paren_count -= 1
assert paren_count > 0
elif char == ',':
if paren_count == 1:
break
return ComplexType(read_type(type_string[1 :i ]),
read_type(type_string[i+1:-1]))
elif type_string[0] == "%s" % ENTITY_TYPE:
return ENTITY_TYPE
elif type_string[0] == "%s" % TRUTH_TYPE:
return TRUTH_TYPE
elif type_string[0] == "%s" % ANY_TYPE:
return ANY_TYPE
else:
raise LogicalExpressionException("Unexpected character: '%s'." % type_string[0])
class TypeException(Exception):
def __init__(self, msg):
super(TypeException, self).__init__(msg)
class InconsistentTypeHierarchyException(TypeException):
def __init__(self, variable, expression=None):
if expression:
msg = "The variable '%s' was found in multiple places with different"\
" types in '%s'." % (variable, expression)
else:
msg = "The variable '%s' was found in multiple places with different"\
" types." % (variable)
super(InconsistentTypeHierarchyException, self).__init__(msg)
class TypeResolutionException(TypeException):
def __init__(self, expression, other_type):
super(TypeResolutionException, self).__init__(
"The type of '%s', '%s', cannot be resolved with type '%s'" %
(expression, expression.type, other_type))
class IllegalTypeException(TypeException):
def __init__(self, expression, other_type, allowed_type):
super(IllegalTypeException, self).__init__(
"Cannot set type of %s '%s' to '%s'; must match type '%s'." %
(expression.__class__.__name__, expression, other_type,
allowed_type))
def typecheck(expressions, signature=None):
"""
Ensure correct typing across a collection of ``Expression`` objects.
:param expressions: a collection of expressions
:param signature: dict that maps variable names to types (or string
representations of types)
"""
#typecheck and create master signature
for expression in expressions:
signature = expression.typecheck(signature)
#apply master signature to all expressions
for expression in expressions[:-1]:
expression.typecheck(signature)
return signature
class SubstituteBindingsI(object):
"""
An interface for classes that can perform substitutions for
variables.
"""
def substitute_bindings(self, bindings):
"""
:return: The object that is obtained by replacing
each variable bound by ``bindings`` with its values.
Aliases are already resolved. (maybe?)
:rtype: (any)
"""
raise NotImplementedError()
def variables(self):
"""
:return: A list of all variables in this object.
"""
raise NotImplementedError()
@python_2_unicode_compatible
class Expression(SubstituteBindingsI):
"""This is the base abstract object for all logical expressions"""
_logic_parser = LogicParser()
_type_checking_logic_parser = LogicParser(type_check=True)
@classmethod
def fromstring(cls, s, type_check=False, signature=None):
if type_check:
return cls._type_checking_logic_parser.parse(s, signature)
else:
return cls._logic_parser.parse(s, signature)
def __call__(self, other, *additional):
accum = self.applyto(other)
for a in additional:
accum = accum(a)
return accum
def applyto(self, other):
assert isinstance(other, Expression), "%s is not an Expression" % other
return ApplicationExpression(self, other)
def __neg__(self):
return NegatedExpression(self)
def negate(self):
"""If this is a negated expression, remove the negation.
Otherwise add a negation."""
return -self
def __and__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return AndExpression(self, other)
def __or__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return OrExpression(self, other)
def __gt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return ImpExpression(self, other)
def __lt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return IffExpression(self, other)
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
return not self == other
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
:param other: an ``Expression`` to check equality against
:param prover: a ``nltk.inference.api.Prover``
"""
assert isinstance(other, Expression), "%s is not an Expression" % other
if prover is None:
from nltk.inference import Prover9
prover = Prover9()
bicond = IffExpression(self.simplify(), other.simplify())
return prover.prove(bicond)
def __hash__(self):
return hash(repr(self))
def substitute_bindings(self, bindings):
expr = self
for var in expr.variables():
if var in bindings:
val = bindings[var]
if isinstance(val, Variable):
val = self.make_VariableExpression(val)
elif not isinstance(val, Expression):
raise ValueError('Can not substitute a non-expression '
'value into an expression: %r' % (val,))
# Substitute bindings in the target value.
val = val.substitute_bindings(bindings)
# Replace var w/ the target value.
expr = expr.replace(var, val)
return expr.simplify()
def typecheck(self, signature=None):
"""
Infer and check types. Raise exceptions if necessary.
:param signature: dict that maps variable names to types (or string
representations of types)
:return: the signature, plus any additional type mappings
"""
sig = defaultdict(list)
if signature:
for key in signature:
val = signature[key]
varEx = VariableExpression(Variable(key))
if isinstance(val, Type):
varEx.type = val
else:
varEx.type = read_type(val)
sig[key].append(varEx)
self._set_type(signature=sig)
return dict((key, sig[key][0].type) for key in sig)
def findtype(self, variable):
"""
Find the type of the given variable as it is used in this expression.
For example, finding the type of "P" in "P(x) & Q(x,y)" yields "<e,t>"
:param variable: Variable
"""
raise NotImplementedError()
def _set_type(self, other_type=ANY_TYPE, signature=None):
"""
Set the type of this expression to be the given type. Raise type
exceptions where applicable.
:param other_type: Type
:param signature: dict(str -> list(AbstractVariableExpression))
"""
raise NotImplementedError()
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""
Replace every instance of 'variable' with 'expression'
:param variable: ``Variable`` The variable to replace
:param expression: ``Expression`` The expression with which to replace it
:param replace_bound: bool Should bound variables be replaced?
:param alpha_convert: bool Alpha convert automatically to avoid name clashes?
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
return self.visit_structured(lambda e: e.replace(variable, expression,
replace_bound, alpha_convert),
self.__class__)
def normalize(self, newvars=None):
"""Rename auto-generated unique variables"""
def get_indiv_vars(e):
if isinstance(e, IndividualVariableExpression):
return set([e])
elif isinstance(e, AbstractVariableExpression):
return set()
else:
return e.visit(get_indiv_vars,
lambda parts: reduce(operator.or_, parts, set()))
result = self
for i,e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)):
if isinstance(e,EventVariableExpression):
newVar = e.__class__(Variable('e0%s' % (i+1)))
elif isinstance(e,IndividualVariableExpression):
newVar = e.__class__(Variable('z%s' % (i+1)))
else:
newVar = e
result = result.replace(e.variable, newVar, True)
return result
def visit(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation:
return combinator(map(function, self.subexpressions))
Bound variables are neither applied upon by the function nor given to
the combinator.
:param function: ``Function<Expression,T>`` to call on each subexpression
:param combinator: ``Function<list<T>,R>`` to combine the results of the
function calls
:return: result of combination ``R``
"""
raise NotImplementedError()
def visit_structured(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation. The combinator must have
the same signature as the constructor. The function is not
applied to bound variables, but they are passed to the
combinator.
:param function: ``Function`` to call on each subexpression
:param combinator: ``Function`` with the same signature as the
constructor, to combine the results of the function calls
:return: result of combination
"""
return self.visit(function, lambda parts: combinator(*parts))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self)
def __str__(self):
return self.str()
def variables(self):
"""
Return a set of all the variables for binding substitution.
The variables returned include all free (non-bound) individual
variables and any variable starting with '?' or '@'.
:return: set of ``Variable`` objects
"""
return self.free() | set(p for p in self.predicates()|self.constants()
if re.match('^[?@]', p.name))
def free(self):
"""
Return a set of all the free (non-bound) variables. This includes
both individual and predicate variables, but not constants.
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.free(),
lambda parts: reduce(operator.or_, parts, set()))
def constants(self):
"""
Return a set of individual constants (non-predicates).
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.constants(),
lambda parts: reduce(operator.or_, parts, set()))
def predicates(self):
"""
Return a set of predicates (constants, not variables).
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.predicates(),
lambda parts: reduce(operator.or_, parts, set()))
def simplify(self):
"""
:return: beta-converted version of this expression
"""
return self.visit_structured(lambda e: e.simplify(), self.__class__)
def make_VariableExpression(self, variable):
return VariableExpression(variable)
@python_2_unicode_compatible
class ApplicationExpression(Expression):
r"""
This class is used to represent two related types of logical expressions.
The first is a Predicate Expression, such as "P(x,y)". A predicate
expression is comprised of a ``FunctionVariableExpression`` or
``ConstantExpression`` as the predicate and a list of Expressions as the
arguments.
The second is a an application of one expression to another, such as
"(\x.dog(x))(fido)".
The reason Predicate Expressions are treated as Application Expressions is
that the Variable Expression predicate of the expression may be replaced
with another Expression, such as a LambdaExpression, which would mean that
the Predicate should be thought of as being applied to the arguments.
The logical expression reader will always curry arguments in a application expression.
So, "\x y.see(x,y)(john,mary)" will be represented internally as
"((\x y.(see(x))(y))(john))(mary)". This simplifies the internals since
there will always be exactly one argument in an application.
The str() method will usually print the curried forms of application
expressions. The one exception is when the the application expression is
really a predicate expression (ie, underlying function is an
``AbstractVariableExpression``). This means that the example from above
will be returned as "(\x y.see(x,y)(john))(mary)".
"""
def __init__(self, function, argument):
"""
:param function: ``Expression``, for the function expression
:param argument: ``Expression``, for the argument
"""
assert isinstance(function, Expression), "%s is not an Expression" % function
assert isinstance(argument, Expression), "%s is not an Expression" % argument
self.function = function
self.argument = argument
def simplify(self):
function = self.function.simplify()
argument = self.argument.simplify()
if isinstance(function, LambdaExpression):
return function.term.replace(function.variable, argument).simplify()
else:
return self.__class__(function, argument)
@property
def type(self):
if isinstance(self.function.type, ComplexType):
return self.function.type.second
else:
return ANY_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
self.argument._set_type(ANY_TYPE, signature)
try:
self.function._set_type(ComplexType(self.argument.type, other_type), signature)
except TypeResolutionException:
raise TypeException(
"The function '%s' is of type '%s' and cannot be applied "
"to '%s' of type '%s'. Its argument must match type '%s'."
% (self.function, self.function.type, self.argument,
self.argument.type, self.function.type.first))
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if self.is_atom():
function, args = self.uncurry()
else:
#It's not a predicate expression ("P(x,y)"), so leave args curried
function = self.function
args = [self.argument]
found = [arg.findtype(variable) for arg in [function]+args]
unique = []
for f in found:
if f != ANY_TYPE:
if unique:
for u in unique:
if f.matches(u):
break
else:
unique.append(f)
if len(unique) == 1:
return list(unique)[0]
else:
return ANY_TYPE
def constants(self):
""":see: Expression.constants()"""
if isinstance(self.function, AbstractVariableExpression):
function_constants = set()
else:
function_constants = self.function.constants()
return function_constants | self.argument.constants()
def predicates(self):
""":see: Expression.predicates()"""
if isinstance(self.function, ConstantExpression):
function_preds = set([self.function.variable])
else:
function_preds = self.function.predicates()
return function_preds | self.argument.predicates()
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.function), function(self.argument)])
def __eq__(self, other):
return isinstance(other, ApplicationExpression) and \
self.function == other.function and \
self.argument == other.argument
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
# uncurry the arguments and find the base function
if self.is_atom():
function, args = self.uncurry()
arg_str = ','.join("%s" % arg for arg in args)
else:
#Leave arguments curried
function = self.function
arg_str = "%s" % self.argument
function_str = "%s" % function
parenthesize_function = False
if isinstance(function, LambdaExpression):
if isinstance(function.term, ApplicationExpression):
if not isinstance(function.term.function,
AbstractVariableExpression):
parenthesize_function = True
elif not isinstance(function.term, BooleanExpression):
parenthesize_function = True
elif isinstance(function, ApplicationExpression):
parenthesize_function = True
if parenthesize_function:
function_str = Tokens.OPEN + function_str + Tokens.CLOSE
return function_str + Tokens.OPEN + arg_str + Tokens.CLOSE
def uncurry(self):
"""
Uncurry this application expression
return: A tuple (base-function, arg-list)
"""
function = self.function
args = [self.argument]
while isinstance(function, ApplicationExpression):
#(\x.\y.sees(x,y)(john))(mary)
args.insert(0, function.argument)
function = function.function
return (function, args)
@property
def pred(self):
"""
Return uncurried base-function.
If this is an atom, then the result will be a variable expression.
Otherwise, it will be a lambda expression.
"""
return self.uncurry()[0]
@property
def args(self):
"""
Return uncurried arg-list
"""
return self.uncurry()[1]
def is_atom(self):
"""
Is this expression an atom (as opposed to a lambda expression applied
to a term)?
"""
return isinstance(self.pred, AbstractVariableExpression)
@total_ordering
@python_2_unicode_compatible
class AbstractVariableExpression(Expression):
"""This class represents a variable to be used as a predicate or entity"""
def __init__(self, variable):
"""
:param variable: ``Variable``, for the variable
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
self.variable = variable
def simplify(self):
return self
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
""":see: Expression.replace()"""
assert isinstance(variable, Variable), "%s is not an Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
if self.variable == variable:
return expression
else:
return self
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
resolution = other_type
for varEx in signature[self.variable.name]:
resolution = varEx.type.resolve(resolution)
if not resolution:
raise InconsistentTypeHierarchyException(self)
signature[self.variable.name].append(self)
for varEx in signature[self.variable.name]:
varEx.type = resolution
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if self.variable == variable:
return self.type
else:
return ANY_TYPE
def predicates(self):
""":see: Expression.predicates()"""
return set()
def __eq__(self, other):
"""Allow equality between instances of ``AbstractVariableExpression``
subtypes."""
return isinstance(other, AbstractVariableExpression) and \
self.variable == other.variable
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, AbstractVariableExpression):
raise TypeError
return self.variable < other.variable
__hash__ = Expression.__hash__
def __str__(self):
return "%s" % self.variable
class IndividualVariableExpression(AbstractVariableExpression):
"""This class represents variables that take the form of a single lowercase
character (other than 'e') followed by zero or more digits."""
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(ENTITY_TYPE):
raise IllegalTypeException(self, other_type, ENTITY_TYPE)
signature[self.variable.name].append(self)
def _get_type(self): return ENTITY_TYPE
type = property(_get_type, _set_type)
def free(self):
""":see: Expression.free()"""
return set([self.variable])
def constants(self):
""":see: Expression.constants()"""
return set()
class FunctionVariableExpression(AbstractVariableExpression):
"""This class represents variables that take the form of a single uppercase
character followed by zero or more digits."""
type = ANY_TYPE
def free(self):
""":see: Expression.free()"""
return set([self.variable])
def constants(self):
""":see: Expression.constants()"""
return set()
class EventVariableExpression(IndividualVariableExpression):
"""This class represents variables that take the form of a single lowercase
'e' character followed by zero or more digits."""
type = EVENT_TYPE
class ConstantExpression(AbstractVariableExpression):
"""This class represents variables that do not take the form of a single
character followed by zero or more digits."""
type = ENTITY_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if other_type == ANY_TYPE:
#entity type by default, for individuals
resolution = ENTITY_TYPE
else:
resolution = other_type
if self.type != ENTITY_TYPE:
resolution = resolution.resolve(self.type)
for varEx in signature[self.variable.name]:
resolution = varEx.type.resolve(resolution)
if not resolution:
raise InconsistentTypeHierarchyException(self)
signature[self.variable.name].append(self)
for varEx in signature[self.variable.name]:
varEx.type = resolution
def free(self):
""":see: Expression.free()"""
return set()
def constants(self):
""":see: Expression.constants()"""
return set([self.variable])
def VariableExpression(variable):
"""
This is a factory method that instantiates and returns a subtype of
``AbstractVariableExpression`` appropriate for the given variable.
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if is_indvar(variable.name):
return IndividualVariableExpression(variable)
elif is_funcvar(variable.name):
return FunctionVariableExpression(variable)
elif is_eventvar(variable.name):
return EventVariableExpression(variable)
else:
return ConstantExpression(variable)
class VariableBinderExpression(Expression):
"""This an abstract class for any Expression that binds a variable in an
Expression. This includes LambdaExpressions and Quantified Expressions"""
def __init__(self, variable, term):
"""
:param variable: ``Variable``, for the variable
:param term: ``Expression``, for the term
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(term, Expression), "%s is not an Expression" % term
self.variable = variable
self.term = term
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
""":see: Expression.replace()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
#if the bound variable is the thing being replaced
if self.variable == variable:
if replace_bound:
assert isinstance(expression, AbstractVariableExpression),\
"%s is not a AbstractVariableExpression" % expression
return self.__class__(expression.variable,
self.term.replace(variable, expression, True, alpha_convert))
else:
return self
else:
# if the bound variable appears in the expression, then it must
# be alpha converted to avoid a conflict
if alpha_convert and self.variable in expression.free():
self = self.alpha_convert(unique_variable(pattern=self.variable))
#replace in the term
return self.__class__(self.variable,
self.term.replace(variable, expression, replace_bound, alpha_convert))
def alpha_convert(self, newvar):
"""Rename all occurrences of the variable introduced by this variable
binder in the expression to ``newvar``.
:param newvar: ``Variable``, for the new variable
"""
assert isinstance(newvar, Variable), "%s is not a Variable" % newvar
return self.__class__(newvar,
self.term.replace(self.variable,
VariableExpression(newvar),
True))
def free(self):
""":see: Expression.free()"""
return self.term.free() - set([self.variable])
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if variable == self.variable:
return ANY_TYPE
else:
return self.term.findtype(variable)
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.term)])
def visit_structured(self, function, combinator):
""":see: Expression.visit_structured()"""
return combinator(self.variable, function(self.term))
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance. If we are comparing
\x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(self, other.__class__) or \
isinstance(other, self.__class__):
if self.variable == other.variable:
return self.term == other.term
else:
# Comparing \x.M and \y.N. Relabel y in N with x and continue.
varex = VariableExpression(self.variable)
return self.term == other.term.replace(other.variable, varex)
else:
return False
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
@python_2_unicode_compatible
class LambdaExpression(VariableBinderExpression):
@property
def type(self):
return ComplexType(self.term.findtype(self.variable),
self.term.type)
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
self.term._set_type(other_type.second, signature)
if not self.type.resolve(other_type):
raise TypeResolutionException(self, other_type)
def __str__(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
return Tokens.LAMBDA + ' '.join("%s" % v for v in variables) + \
Tokens.DOT + "%s" % term
@python_2_unicode_compatible
class QuantifiedExpression(VariableBinderExpression):
@property
def type(self): return TRUTH_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.term._set_type(TRUTH_TYPE, signature)
def __str__(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
return self.getQuantifier() + ' ' + ' '.join("%s" % v for v in variables) + \
Tokens.DOT + "%s" % term
class ExistsExpression(QuantifiedExpression):
def getQuantifier(self):
return Tokens.EXISTS
class AllExpression(QuantifiedExpression):
def getQuantifier(self):
return Tokens.ALL
@python_2_unicode_compatible
class NegatedExpression(Expression):
def __init__(self, term):
assert isinstance(term, Expression), "%s is not an Expression" % term
self.term = term
@property
def type(self): return TRUTH_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.term._set_type(TRUTH_TYPE, signature)
def findtype(self, variable):
assert isinstance(variable, Variable), "%s is not a Variable" % variable
return self.term.findtype(variable)
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.term)])
def negate(self):
""":see: Expression.negate()"""
return self.term
def __eq__(self, other):
return isinstance(other, NegatedExpression) and self.term == other.term
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
return Tokens.NOT + "%s" % self.term
@python_2_unicode_compatible
class BinaryExpression(Expression):
def __init__(self, first, second):
assert isinstance(first, Expression), "%s is not an Expression" % first
assert isinstance(second, Expression), "%s is not an Expression" % second
self.first = first
self.second = second
@property
def type(self): return TRUTH_TYPE
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
f = self.first.findtype(variable)
s = self.second.findtype(variable)
if f == s or s == ANY_TYPE:
return f
elif f == ANY_TYPE:
return s
else:
return ANY_TYPE
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.first), function(self.second)])
def __eq__(self, other):
return (isinstance(self, other.__class__) or \
isinstance(other, self.__class__)) and \
self.first == other.first and self.second == other.second
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
first = self._str_subex(self.first)
second = self._str_subex(self.second)
return Tokens.OPEN + first + ' ' + self.getOp() \
+ ' ' + second + Tokens.CLOSE
def _str_subex(self, subex):
return "%s" % subex
class BooleanExpression(BinaryExpression):
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.first._set_type(TRUTH_TYPE, signature)
self.second._set_type(TRUTH_TYPE, signature)
class AndExpression(BooleanExpression):
"""This class represents conjunctions"""
def getOp(self):
return Tokens.AND
def _str_subex(self, subex):
s = "%s" % subex
if isinstance(subex, AndExpression):
return s[1:-1]
return s
class OrExpression(BooleanExpression):
"""This class represents disjunctions"""
def getOp(self):
return Tokens.OR
def _str_subex(self, subex):
s = "%s" % subex
if isinstance(subex, OrExpression):
return s[1:-1]
return s
class ImpExpression(BooleanExpression):
"""This class represents implications"""
def getOp(self):
return Tokens.IMP
class IffExpression(BooleanExpression):
"""This class represents biconditionals"""
def getOp(self):
return Tokens.IFF
class EqualityExpression(BinaryExpression):
"""This class represents equality expressions like "(x = y)"."""
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.first._set_type(ENTITY_TYPE, signature)
self.second._set_type(ENTITY_TYPE, signature)
def getOp(self):
return Tokens.EQ
### Utilities
class LogicalExpressionException(Exception):
def __init__(self, index, message):
self.index = index
Exception.__init__(self, message)
class UnexpectedTokenException(LogicalExpressionException):
def __init__(self, index, unexpected=None, expected=None, message=None):
if unexpected and expected:
msg = "Unexpected token: '%s'. " \
"Expected token '%s'." % (unexpected, expected)
elif unexpected:
msg = "Unexpected token: '%s'." % unexpected
if message:
msg += ' '+message
else:
msg = "Expected token '%s'." % expected
LogicalExpressionException.__init__(self, index, msg)
class ExpectedMoreTokensException(LogicalExpressionException):
def __init__(self, index, message=None):
if not message:
message = 'More tokens expected.'
LogicalExpressionException.__init__(self, index, 'End of input found. ' + message)
def is_indvar(expr):
"""
An individual variable must be a single lowercase character other than 'e',
followed by zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^[a-df-z]\d*$', expr) is not None
def is_funcvar(expr):
"""
A function variable must be a single uppercase character followed by
zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^[A-Z]\d*$', expr) is not None
def is_eventvar(expr):
"""
An event variable must be a single lowercase 'e' character followed by
zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^e\d*$', expr) is not None
def demo():
lexpr = Expression.fromstring
print('='*20 + 'Test reader' + '='*20)
print(lexpr(r'john'))
print(lexpr(r'man(x)'))
print(lexpr(r'-man(x)'))
print(lexpr(r'(man(x) & tall(x) & walks(x))'))
print(lexpr(r'exists x.(man(x) & tall(x) & walks(x))'))
print(lexpr(r'\x.man(x)'))
print(lexpr(r'\x.man(x)(john)'))
print(lexpr(r'\x y.sees(x,y)'))
print(lexpr(r'\x y.sees(x,y)(a,b)'))
print(lexpr(r'(\x.exists y.walks(x,y))(x)'))
print(lexpr(r'exists x.x = y'))
print(lexpr(r'exists x.(x = y)'))
print(lexpr('P(x) & x=y & P(y)'))
print(lexpr(r'\P Q.exists x.(P(x) & Q(x))'))
print(lexpr(r'man(x) <-> tall(x)'))
print('='*20 + 'Test simplify' + '='*20)
print(lexpr(r'\x.\y.sees(x,y)(john)(mary)').simplify())
print(lexpr(r'\x.\y.sees(x,y)(john, mary)').simplify())
print(lexpr(r'all x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify())
print(lexpr(r'(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))').simplify())
print('='*20 + 'Test alpha conversion and binder expression equality' + '='*20)
e1 = lexpr('exists x.P(x)')
print(e1)
e2 = e1.alpha_convert(Variable('z'))
print(e2)
print(e1 == e2)
def demo_errors():
print('='*20 + 'Test reader errors' + '='*20)
demoException('(P(x) & Q(x)')
demoException('((P(x) &) & Q(x))')
demoException('P(x) -> ')
demoException('P(x')
demoException('P(x,')
demoException('P(x,)')
demoException('exists')
demoException('exists x.')
demoException('\\')
demoException('\\ x y.')
demoException('P(x)Q(x)')
demoException('(P(x)Q(x)')
demoException('exists x -> y')
def demoException(s):
try:
Expression.fromstring(s)
except LogicalExpressionException as e:
print("%s: %s" % (e.__class__.__name__, e))
def printtype(ex):
print("%s : %s" % (ex.str(), ex.type))
if __name__ == '__main__':
demo()
# demo_errors()
|
the-stack_106_24914
|
from sympy import Add, cos, sin, sqrt # noqa
import numpy as np
import pytest
from cached_property import cached_property
from conftest import skipif, EVAL # noqa
from devito import (NODE, Eq, Inc, Constant, Function, TimeFunction, SparseTimeFunction, # noqa
Dimension, SubDimension, Grid, Operator, norm, grad, div, dimensions,
switchconfig, configuration, centered, first_derivative, transpose)
from devito.exceptions import InvalidOperator
from devito.finite_differences.differentiable import diffify
from devito.ir import (DummyEq, Expression, Iteration, FindNodes, FindSymbols,
ParallelBlock, ParallelIteration, retrieve_iteration_tree)
from devito.passes.clusters.aliases import collect
from devito.passes.clusters.cse import _cse
from devito.symbolics import estimate_cost, pow_to_mul, indexify
from devito.tools import generator
from devito.types import Scalar, Array
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import demo_model, AcquisitionGeometry
from examples.seismic.tti import AnisotropicWaveSolver
def test_scheduling_after_rewrite():
"""Tests loop scheduling after expression hoisting."""
grid = Grid((10, 10))
u1 = TimeFunction(name="u1", grid=grid, save=10, time_order=2)
u2 = TimeFunction(name="u2", grid=grid, time_order=2)
sf1 = SparseTimeFunction(name='sf1', grid=grid, npoint=1, nt=10)
const = Function(name="const", grid=grid, space_order=2)
# Deliberately inject into u1, rather than u1.forward, to create a WAR
eqn1 = Eq(u1.forward, u1 + sin(const))
eqn2 = sf1.inject(u1.forward, expr=sf1)
eqn3 = Eq(u2.forward, u2 - u1.dt2 + sin(const))
op = Operator([eqn1] + eqn2 + [eqn3])
trees = retrieve_iteration_tree(op)
# Check loop nest structure
assert all(i.dim is j for i, j in zip(trees[0], grid.dimensions)) # time invariant
assert trees[1].root.dim is grid.time_dim
assert all(trees[1].root.dim is tree.root.dim for tree in trees[1:])
@pytest.mark.parametrize('exprs,expected', [
# Simple cases
(['Eq(tu, 2/(t0 + t1))', 'Eq(ti0, t0 + t1)', 'Eq(ti1, t0 + t1)'],
['t0 + t1', '2/r0', 'r0', 'r0']),
(['Eq(tu, 2/(t0 + t1))', 'Eq(ti0, 2/(t0 + t1) + 1)', 'Eq(ti1, 2/(t0 + t1) + 1)'],
['1/(t0 + t1)', '2*r5', 'r3 + 1', 'r3', 'r2', 'r2']),
(['Eq(tu, (tv + tw + 5.)*(ti0 + ti1) + (t0 + t1)*(ti0 + ti1))'],
['ti0[x, y, z] + ti1[x, y, z]',
'r0*(t0 + t1) + r0*(tv[t, x, y, z] + tw[t, x, y, z] + 5.0)']),
(['Eq(tu, t0/t1)', 'Eq(ti0, 2 + t0/t1)', 'Eq(ti1, 2 + t0/t1)'],
['t0/t1', 'r2 + 2', 'r2', 'r1', 'r1']),
# Across expressions
(['Eq(tu, tv*4 + tw*5 + tw*5*t0)', 'Eq(tv, tw*5)'],
['5*tw[t, x, y, z]', 'r0 + 5*t0*tw[t, x, y, z] + 4*tv[t, x, y, z]', 'r0']),
# Intersecting
pytest.param(['Eq(tu, ti0*ti1 + ti0*ti1*t0 + ti0*ti1*t0*t1)'],
['ti0*ti1', 'r0', 'r0*t0', 'r0*t0*t1'],
marks=pytest.mark.xfail),
# Divisions (== powers with negative exponenet) are always captured
(['Eq(tu, tv**-1*(tw*5 + tw*5*t0))', 'Eq(ti0, tv**-1*t0)'],
['1/tv[t, x, y, z]', 'r0*(5*t0*tw[t, x, y, z] + 5*tw[t, x, y, z])', 'r0*t0']),
# `compact_temporaries` must detect chains of isolated temporaries
(['Eq(t0, tv)', 'Eq(t1, t0)', 'Eq(t2, t1)', 'Eq(tu, t2)'],
['tv[t, x, y, z]']),
# Dimension-independent data dependences should be a stopper for CSE
(['Eq(tu.forward, tu.dx + 1)', 'Eq(tv.forward, tv.dx + 1)',
'Eq(tw.forward, tv.dt + 1)', 'Eq(tz.forward, tv.dt + 2)'],
['1/h_x', '1/dt', '-r1*tv[t, x, y, z]',
'-r2*tu[t, x, y, z] + r2*tu[t, x + 1, y, z] + 1',
'-r2*tv[t, x, y, z] + r2*tv[t, x + 1, y, z] + 1',
'r0 + r1*tv[t + 1, x, y, z] + 1',
'r0 + r1*tv[t + 1, x, y, z] + 2']),
# Fancy use case with lots of temporaries
(['Eq(tu.forward, tu.dx + 1)', 'Eq(tv.forward, tv.dx + 1)',
'Eq(tw.forward, tv.dt.dx2.dy2 + 1)', 'Eq(tz.forward, tv.dt.dy2.dx2 + 2)'],
['h_x**(-2)', 'h_y**(-2)', '1/h_x', '1/dt', '-r9*tv[t, x, y, z]',
'-r9*tv[t, x + 1, y, z] + r9*tv[t + 1, x + 1, y, z]',
'-r9*tv[t, x - 1, y, z] + r9*tv[t + 1, x - 1, y, z]',
'-r9*tv[t, x, y + 1, z] + r9*tv[t + 1, x, y + 1, z]',
'-r9*tv[t, x + 1, y + 1, z] + r9*tv[t + 1, x + 1, y + 1, z]',
'-r9*tv[t, x - 1, y + 1, z] + r9*tv[t + 1, x - 1, y + 1, z]',
'-r9*tv[t, x, y - 1, z] + r9*tv[t + 1, x, y - 1, z]',
'-r9*tv[t, x + 1, y - 1, z] + r9*tv[t + 1, x + 1, y - 1, z]',
'-r9*tv[t, x - 1, y - 1, z] + r9*tv[t + 1, x - 1, y - 1, z]',
'-r10*tu[t, x, y, z] + r10*tu[t, x + 1, y, z] + 1',
'-r10*tv[t, x, y, z] + r10*tv[t, x + 1, y, z] + 1',
'r11*(r0*r12 + r1*r12 - 2.0*r12*r2) + r11*(r12*r3 + r12*r4 - 2.0*r12*r5) - '
'2.0*r11*(r12*r6 + r12*r7 - 2.0*r12*(r8 + r9*tv[t + 1, x, y, z])) + 1',
'r12*(r0*r11 + r11*r3 - 2.0*r11*r6) + r12*(r1*r11 + r11*r4 - 2.0*r11*r7) - '
'2.0*r12*(r11*r2 + r11*r5 - 2.0*r11*(r8 + r9*tv[t + 1, x, y, z])) + 2']),
])
def test_cse(exprs, expected):
"""Test common subexpressions elimination."""
grid = Grid((3, 3, 3))
dims = grid.dimensions
tu = TimeFunction(name="tu", grid=grid, space_order=2) # noqa
tv = TimeFunction(name="tv", grid=grid, space_order=2) # noqa
tw = TimeFunction(name="tw", grid=grid, space_order=2) # noqa
tz = TimeFunction(name="tz", grid=grid, space_order=2) # noqa
ti0 = Array(name='ti0', shape=(3, 5, 7), dimensions=dims).indexify() # noqa
ti1 = Array(name='ti1', shape=(3, 5, 7), dimensions=dims).indexify() # noqa
t0 = Scalar(name='t0') # noqa
t1 = Scalar(name='t1') # noqa
t2 = Scalar(name='t2') # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = DummyEq(indexify(diffify(eval(e).evaluate)))
counter = generator()
make = lambda: Scalar(name='r%d' % counter()).indexify()
processed = _cse(exprs, make)
assert len(processed) == len(expected)
assert all(str(i.rhs) == j for i, j in zip(processed, expected))
@pytest.mark.parametrize('expr,expected', [
('2*fa[x] + fb[x]', '2*fa[x] + fb[x]'),
('fa[x]**2', 'fa[x]*fa[x]'),
('fa[x]**2 + fb[x]**3', 'fa[x]*fa[x] + fb[x]*fb[x]*fb[x]'),
('3*fa[x]**4', '3*(fa[x]*fa[x]*fa[x]*fa[x])'),
('fa[x]**2', 'fa[x]*fa[x]'),
('1/(fa[x]**2)', '1/(fa[x]*fa[x])'),
('1/(fb[x]**2 + 1)', '1/(fb[x]*fb[x] + 1)'),
('1/(fa[x] + fb[x])', '1/(fa[x] + fb[x])'),
('3*sin(fa[x])**2', '3*(sin(fa[x])*sin(fa[x]))'),
('fa[x]/(fb[x]**2)', 'fa[x]/((fb[x]*fb[x]))'),
('(fa[x]**0.5)**2', 'fa[x]'),
])
def test_pow_to_mul(expr, expected):
grid = Grid((4, 5))
x, y = grid.dimensions
s = Scalar(name='s') # noqa
fa = Function(name='fa', grid=grid, dimensions=(x,), shape=(4,)) # noqa
fb = Function(name='fb', grid=grid, dimensions=(x,), shape=(4,)) # noqa
assert str(pow_to_mul(eval(expr))) == expected
@pytest.mark.parametrize('expr,expected,estimate', [
('Eq(t0, t1)', 0, False),
('Eq(t0, -t1)', 0, False),
('Eq(t0, -t1)', 0, True),
('Eq(t0, fa[x] + fb[x])', 1, False),
('Eq(t0, fa[x + 1] + fb[x - 1])', 1, False),
('Eq(t0, fa[fb[x+1]] + fa[x])', 1, False),
('Eq(t0, fa[fb[x+1]] + fc[x+2, y+1])', 1, False),
('Eq(t0, t1*t2)', 1, False),
('Eq(t0, 2.*t0*t1*t2)', 3, False),
('Eq(t0, cos(t1*t2))', 2, False),
('Eq(t0, (t1*t2)**0)', 0, False),
('Eq(t0, (t1*t2)**t1)', 2, False),
('Eq(t0, (t1*t2)**2)', 3, False), # SymPy distributes integer exponents in a Mul
('Eq(t0, 2.*t0*t1*t2 + t0*fa[x+1])', 5, False),
('Eq(t0, (2.*t0*t1*t2 + t0*fa[x+1])*3. - t0)', 7, False),
('[Eq(t0, (2.*t0*t1*t2 + t0*fa[x+1])*3. - t0), Eq(t0, cos(t1*t2))]', 9, False),
('Eq(t0, cos(fa*fb))', 51, True),
('Eq(t0, cos(fa[x]*fb[x]))', 51, True),
('Eq(t0, cos(t1*t2))', 51, True),
('Eq(t0, cos(c*c))', 2, True), # `cos(...constants...)` counts as 1
('Eq(t0, t1**3)', 2, True),
('Eq(t0, t1**4)', 3, True),
('Eq(t0, t2*t1**-1)', 26, True),
('Eq(t0, t1**t2)', 50, True),
('Eq(t0, 3.2/h_x)', 2, True), # seen as `3.2*(1/h_x)`, so counts as 2
('Eq(t0, 3.2/h_x*fa + 2.4/h_x*fb)', 7, True), # `pow(...constants...)` counts as 1
])
def test_estimate_cost(expr, expected, estimate):
# Note: integer arithmetic isn't counted
grid = Grid(shape=(4, 4))
x, y = grid.dimensions # noqa
h_x = x.spacing # noqa
c = Constant(name='c') # noqa
t0 = Scalar(name='t0') # noqa
t1 = Scalar(name='t1') # noqa
t2 = Scalar(name='t2') # noqa
fa = Function(name='fa', grid=grid, shape=(4,), dimensions=(x,)) # noqa
fb = Function(name='fb', grid=grid, shape=(4,), dimensions=(x,)) # noqa
fc = Function(name='fc', grid=grid) # noqa
assert estimate_cost(eval(expr), estimate) == expected
@pytest.mark.parametrize('exprs,exp_u,exp_v', [
(['Eq(s, 0, implicit_dims=(x, y))', 'Eq(s, s + 4, implicit_dims=(x, y))',
'Eq(u, s)'], 4, 0),
(['Eq(s, 0, implicit_dims=(x, y))', 'Eq(s, s + s + 4, implicit_dims=(x, y))',
'Eq(s, s + 4, implicit_dims=(x, y))', 'Eq(u, s)'], 8, 0),
(['Eq(s, 0, implicit_dims=(x, y))', 'Inc(s, 4, implicit_dims=(x, y))',
'Eq(u, s)'], 4, 0),
(['Eq(s, 0, implicit_dims=(x, y))', 'Inc(s, 4, implicit_dims=(x, y))', 'Eq(v, s)',
'Eq(u, s)'], 4, 4),
(['Eq(s, 0, implicit_dims=(x, y))', 'Inc(s, 4, implicit_dims=(x, y))', 'Eq(v, s)',
'Eq(s, s + 4, implicit_dims=(x, y))', 'Eq(u, s)'], 8, 4),
(['Eq(s, 0, implicit_dims=(x, y))', 'Inc(s, 4, implicit_dims=(x, y))', 'Eq(v, s)',
'Inc(s, 4, implicit_dims=(x, y))', 'Eq(u, s)'], 8, 4),
(['Eq(u, 0)', 'Inc(u, 4)', 'Eq(v, u)', 'Inc(u, 4)'], 8, 4),
(['Eq(u, 1)', 'Eq(v, 4)', 'Inc(u, v)', 'Inc(v, u)'], 5, 9),
])
def test_makeit_ssa(exprs, exp_u, exp_v):
"""
A test building Operators with non-trivial sequences of input expressions
that push hard on the `makeit_ssa` utility function.
"""
grid = Grid(shape=(4, 4))
x, y = grid.dimensions # noqa
u = Function(name='u', grid=grid) # noqa
v = Function(name='v', grid=grid) # noqa
s = Scalar(name='s') # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs)
op.apply()
assert np.all(u.data == exp_u)
assert np.all(v.data == exp_v)
@pytest.mark.parametrize('opt', ['noop', 'advanced'])
def test_time_dependent_split(opt):
grid = Grid(shape=(10, 10))
u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2, save=3)
v = TimeFunction(name='v', grid=grid, time_order=2, space_order=0, save=3)
# The second equation needs a full loop over x/y for u then
# a full one over x.y for v
eq = [Eq(u.forward, 2 + grid.time_dim),
Eq(v.forward, u.forward.dx + u.forward.dy + 1)]
op = Operator(eq, opt=opt)
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
op()
assert np.allclose(u.data[2, :, :], 3.0)
assert np.allclose(v.data[1, 1:-1, 1:-1], 1.0)
class TestLifting(object):
@pytest.mark.parametrize('exprs,expected', [
# none (different distance)
(['Eq(y.symbolic_max, g[0, x], implicit_dims=(t, x))',
'Inc(h1[0, 0], 1, implicit_dims=(t, x, y))'],
[6., 0., 0.]),
(['Eq(y.symbolic_max, g[0, x], implicit_dims=(t, x))',
'Eq(h1[0, 0], y, implicit_dims=(t, x, y))'],
[2., 0., 0.]),
(['Eq(y.symbolic_max, g[0, x], implicit_dims=(t, x))',
'Eq(h1[0, y], y, implicit_dims=(t, x, y))'],
[0., 1., 2.]),
(['Eq(y.symbolic_min, g[0, x], implicit_dims=(t, x))',
'Eq(h1[0, y], 3 - y, implicit_dims=(t, x, y))'],
[3., 2., 1.]),
(['Eq(y.symbolic_min, g[0, x], implicit_dims=(t, x))',
'Eq(y.symbolic_max, g[0, x], implicit_dims=(t, x))',
'Eq(h1[0, y], y, implicit_dims=(t, x, y))'],
[0., 1., 2.]),
(['Eq(y.symbolic_min, g[0, 0], implicit_dims=(t, x))',
'Eq(y.symbolic_max, g[0, 2], implicit_dims=(t, x))',
'Eq(h1[0, y], y, implicit_dims=(t, x, y))'],
[0., 1., 2.]),
(['Eq(y.symbolic_min, g[0, x], implicit_dims=(t, x))',
'Eq(y.symbolic_max, g[0, 2], implicit_dims=(t, x))',
'Inc(h1[0, y], y, implicit_dims=(t, x, y))'],
[0., 2., 6.]),
(['Eq(y.symbolic_min, g[0, x], implicit_dims=(t, x))',
'Eq(y.symbolic_max, g[0, 2], implicit_dims=(t, x))',
'Inc(h1[0, x], y, implicit_dims=(t, x, y))'],
[3., 3., 2.]),
(['Eq(y.symbolic_min, g[0, 0], implicit_dims=(t, x))',
'Inc(h1[0, y], x, implicit_dims=(t, x, y))'],
[3., 3., 3.]),
(['Eq(y.symbolic_min, g[0, 2], implicit_dims=(t, x))',
'Inc(h1[0, x], y.symbolic_min, implicit_dims=(t, x))'],
[2., 2., 2.]),
(['Eq(y.symbolic_min, g[0, 2], implicit_dims=(t, x))',
'Inc(h1[0, x], y.symbolic_min, implicit_dims=(t, x, y))'],
[2., 2., 2.]),
(['Eq(y.symbolic_min, g[0, 2], implicit_dims=(t, x))',
'Eq(h1[0, x], y.symbolic_min, implicit_dims=(t, x))'],
[2., 2., 2.]),
(['Eq(y.symbolic_min, g[0, x], implicit_dims=(t, x))',
'Eq(y.symbolic_max, g[0, x]-1, implicit_dims=(t, x))',
'Eq(h1[0, y], y, implicit_dims=(t, x, y))'],
[0., 0., 0.])
])
def test_edge_cases(self, exprs, expected):
t, x, y = dimensions('t x y')
g = TimeFunction(name='g', shape=(1, 3), dimensions=(t, x),
time_order=0, dtype=np.int32)
g.data[0, :] = [0, 1, 2]
h1 = TimeFunction(name='h1', shape=(1, 3), dimensions=(t, y), time_order=0)
h1.data[0, :] = 0
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs)
op.apply()
assert np.all(h1.data == expected)
@pytest.mark.parametrize('exprs,expected,visit', [
(['Eq(f, f + g*2, implicit_dims=(time, x, y))',
'Eq(u, (f + f[y+1])*g)'],
['txy', 'txy'], 'txyy'),
])
def test_contracted(self, exprs, expected, visit):
"""
Test that in situations such as
for i
for x
r = f(a[x])
the `r` statement isn't lifted outside of `i`, since we're not recording
each of the computed `x` value (IOW, we're writing to `r` rather than `r[x]`).
"""
grid = Grid(shape=(3, 3), dtype=np.int32)
x, y = grid.dimensions
time = grid.time_dim # noqa
f = Function(name='f', grid=grid, shape=(3,), dimensions=(y,)) # noqa
g = Function(name='g', grid=grid) # noqa
u = TimeFunction(name='u', grid=grid, time_order=0) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs)
trees = retrieve_iteration_tree(op)
iters = FindNodes(Iteration).visit(op)
assert len(trees) == len(expected)
# mapper just makes it quicker to write out the test parametrization
mapper = {'time': 't'}
assert ["".join(mapper.get(i.dim.name, i.dim.name) for i in j)
for j in trees] == expected
assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit
class TestAliases(object):
@pytest.mark.parametrize('exprs,expected', [
# none (different distance)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x+1] + fb[x])'],
['fa[x] + fb[x]', 'fa[x+1] + fb[x]']),
# none (different dimension)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x] + fb[y])'],
['fa[x] + fb[x]']),
# none (different operation)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x] - fb[x])'],
['fa[x] + fb[x]', 'fa[x] - fb[x]']),
# simple
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x+1] + fb[x+1])',
'Eq(t2, fa[x+2] + fb[x+2])'],
['fa[x+1] + fb[x+1]']),
# 2D simple
(['Eq(t0, fc[x,y] + fd[x,y])', 'Eq(t1, fc[x+1,y+1] + fd[x+1,y+1])'],
['fc[x+1,y+1] + fd[x+1,y+1]']),
# 2D with stride
(['Eq(t0, fc[x,y] + fd[x+1,y+2])', 'Eq(t1, fc[x+1,y+1] + fd[x+2,y+3])'],
['fc[x+1,y+1] + fd[x+2,y+3]']),
# 2D with subdimensions
(['Eq(t0, fc[xi,yi] + fd[xi+1,yi+2])',
'Eq(t1, fc[xi+1,yi+1] + fd[xi+2,yi+3])'],
['fc[xi+1,yi+1] + fd[xi+2,yi+3]']),
# 2D with constant access
(['Eq(t0, fc[x,y]*fc[x,0] + fd[x,y])',
'Eq(t1, fc[x+1,y+1]*fc[x+1,0] + fd[x+1,y+1])'],
['fc[x+1,y+1]*fc[x+1,0] + fd[x+1,y+1]']),
# 2D with multiple, non-zero, constant accesses
(['Eq(t0, fc[x,y]*fc[x,0] + fd[x,y]*fc[x,1])',
'Eq(t1, fc[x+1,y+1]*fc[x+1,0] + fd[x+1,y+1]*fc[x+1,1])'],
['fc[x+1,0]*fc[x+1,y+1] + fc[x+1,1]*fd[x+1,y+1]']),
# 2D with different shapes
(['Eq(t0, fc[x,y]*fa[x] + fd[x,y])',
'Eq(t1, fc[x+1,y+1]*fa[x+1] + fd[x+1,y+1])'],
['fc[x+1,y+1]*fa[x+1] + fd[x+1,y+1]']),
# complex (two 2D aliases with stride inducing relaxation)
(['Eq(t0, fc[x,y] + fd[x+1,y+2])',
'Eq(t1, fc[x+1,y+1] + fd[x+2,y+3])',
'Eq(t2, fc[x+1,y+1]*3. + fd[x+2,y+2])',
'Eq(t3, fc[x+2,y+2]*3. + fd[x+3,y+3])'],
['fc[x+1,y+1] + fd[x+2,y+3]', '3.*fc[x+2,y+2] + fd[x+3,y+3]']),
])
def test_collection(self, exprs, expected):
"""
Unit test for the detection and collection of aliases out of a series
of input expressions.
"""
grid = Grid(shape=(4, 4))
x, y = grid.dimensions # noqa
xi, yi = grid.interior.dimensions # noqa
t0 = Scalar(name='t0') # noqa
t1 = Scalar(name='t1') # noqa
t2 = Scalar(name='t2') # noqa
t3 = Scalar(name='t3') # noqa
fa = Function(name='fa', grid=grid, shape=(4,), dimensions=(x,), space_order=4) # noqa
fb = Function(name='fb', grid=grid, shape=(4,), dimensions=(x,), space_order=4) # noqa
fc = Function(name='fc', grid=grid, space_order=4) # noqa
fd = Function(name='fd', grid=grid, space_order=4) # noqa
# List/dict comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = DummyEq(indexify(eval(e).evaluate))
for i, e in enumerate(list(expected)):
expected[i] = eval(e)
aliases = collect(exprs, lambda i: False, {'min-storage': False})
assert len(aliases) == len(expected)
assert all(i in expected for i in aliases)
def get_params(self, op, *names):
ret = []
for i in names:
for p in op.parameters:
if i == p.name:
ret.append(p)
return tuple(ret)
def check_array(self, array, exp_halo, exp_shape, rotate=False):
assert len(array.dimensions) == len(exp_halo)
shape = []
for i in array.symbolic_shape:
if i.is_Number or i.is_Symbol:
shape.append(i)
else:
assert i.is_Add
shape.append(Add(*i.args))
if rotate:
exp_shape = (sum(exp_halo[0]) + 1,) + tuple(exp_shape[1:])
exp_halo = ((0, 0),) + tuple(exp_halo[1:])
assert tuple(array.halo) == exp_halo
assert tuple(shape) == tuple(exp_shape)
@pytest.mark.parametrize('rotate', [False, True])
def test_full_shape(self, rotate):
"""
Check the shape of the Array used to store an aliasing expression.
The shape is impacted by loop blocking, which reduces the required
write-to space.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name="u1", grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 3D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 1), (1, 1)), (xs+2, ys+2, zs+2), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_contracted_shape(self, rotate):
"""
Conceptually like `test_full_shape`, but the Operator used in this
test leads to contracted Arrays (2D instead of 3D).
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 2D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x, y+1, z+1])*3*f +
(u[t, x, y+2, z+2] + u[t, x, y+3, z+3])*3*f + 1))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
ys, zs = self.get_params(op1, 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 1)), (ys+2, zs+2), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_uncontracted_shape(self, rotate):
"""
Like `test_contracted_shape`, but the potential contraction is
now along the innermost Dimension, which causes falling back to
3D Arrays.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 3D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z])*3*f +
(u[t, x+2, y+2, z] + u[t, x+3, y+3, z])*3*f + 1))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 1), (0, 0)), (xs+2, ys+2, zs), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
def test_uncontracted_shape_invariants(self):
"""
Like `test_uncontracted_shape`, but now with some (outer-)Dimension-invariant
aliasing expressions.
"""
grid = Grid(shape=(6, 6, 6))
x, y, z = grid.dimensions
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] =\
np.linspace(-1, 1, f.data_with_halo.size).reshape(*f.shape_with_halo)
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
def func(f):
return sqrt(f**2 + 1.)
# Leads to 3D aliases despite the potential contraction along x and y
eqn = Eq(u.forward, u*(func(f) + func(f[x, y, z-1])))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x_size', 'y_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(arrays) == 1
self.check_array(arrays[0], ((0, 0), (0, 0), (1, 0)), (xs, ys, zs+1))
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.allclose(u.data, u1.data, rtol=10e-7)
@pytest.mark.parametrize('rotate', [False, True])
def test_full_shape_w_subdims(self, rotate):
"""
Like `test_full_shape`, but SubDomains (and therefore SubDimensions) are used.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 3D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1),
subdomain=grid.interior)
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'i0x0_blk0_size', 'i0y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 1), (1, 1)), (xs+2, ys+2, zs+2), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_mixed_shapes(self, rotate):
"""
Test that if running with ``opt=(..., {'min-storage': True})``, then,
when possible, aliasing expressions are assigned to (n-k)D Arrays (k>0)
rather than nD Arrays.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
d = Dimension(name='d')
c = Function(name='c', grid=grid, shape=(2, 3), dimensions=(d, z))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u', grid=grid, space_order=3)
c.data_with_halo[:] = 1.
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 1.5
u1.data_with_halo[:] = 1.5
# Leads to 2D and 3D aliases
eqn = Eq(u.forward,
((c[0, z]*u[t, x+1, y+1, z] + c[1, z+1]*u[t, x+1, y+1, z+1])*f +
(c[0, z]*u[t, x+2, y+2, z] + c[1, z+1]*u[t, x+2, y+2, z+1])*f +
(u[t, x, y+1, z+1] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x, y+3, z+1] + u[t, x+1, y+3, z+1])*3*f))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced',
{'openmp': True, 'min-storage': True,
'cire-mincost-sops': 1, 'cire-rotate': rotate}))
# Check code generation
assert len(op1._func_table) == 1
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
self.check_array(arrays[0], ((1, 1), (0, 0)), (ys+2, zs), rotate)
self.check_array(arrays[1], ((1, 0), (1, 0), (0, 0)), (xs+1, ys+1, zs), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
def test_min_storage_in_isolation(self):
"""
Test that if running with ``opt=(..., opt=('cire-sops', {'min-storage': True})``,
then, when possible, aliasing expressions are assigned to (n-k)D Arrays (k>0)
rather than nD Arrays.
"""
grid = Grid(shape=(8, 8, 8))
x, y, z = grid.dimensions
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name="u1", grid=grid, space_order=2)
u2 = TimeFunction(name="u2", grid=grid, space_order=2)
u.data_with_halo[:] = 1.42
u1.data_with_halo[:] = 1.42
u2.data_with_halo[:] = 1.42
eqn = Eq(u.forward, u.dy.dy + u.dx.dx)
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('cire-sops', {'openmp': True, 'min-storage': True}))
op2 = Operator(eqn, opt=('advanced-fsg', {'openmp': True}))
# Check code generation
# `min-storage` leads to one 2D and one 3D Arrays
xs, ys, zs = self.get_params(op1, 'x_size', 'y_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(arrays) == 2
assert len([i for i in arrays if i._mem_shared]) == 1
assert len([i for i in arrays if i._mem_local]) == 1
self.check_array(arrays[1], ((1, 0), (0, 0), (0, 0)), (xs+1, ys, zs))
self.check_array(arrays[0], ((1, 0), (0, 0)), (ys+1, zs))
# Check that `advanced-fsg` + `min-storage` is incompatible
try:
Operator(eqn, opt=('advanced-fsg', {'openmp': True, 'min-storage': True}))
except InvalidOperator:
assert True
except:
assert False
# Check that `cire-rotate=True` has no effect in this code has there's
# no blocking
op3 = Operator(eqn, opt=('cire-sops', {'openmp': True, 'min-storage': True,
'cire-rotate': True}))
assert str(op3) == str(op1)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
op2(time_M=1, u=u2)
expected = norm(u)
assert np.isclose(expected, norm(u1), rtol=1e-5)
assert np.isclose(expected, norm(u2), rtol=1e-5)
def test_min_storage_issue_1506(self):
grid = Grid(shape=(10, 10))
u1 = TimeFunction(name='u1', grid=grid, time_order=2, space_order=4, save=10)
u2 = TimeFunction(name='u2', grid=grid, time_order=2, space_order=4, save=10)
v1 = TimeFunction(name='v1', grid=grid, time_order=2, space_order=4, save=None)
v2 = TimeFunction(name='v2', grid=grid, time_order=2, space_order=4, save=None)
eqns = [Eq(u1.forward, (u1+u2).laplace),
Eq(u2.forward, (u1-u2).laplace),
Eq(v1.forward, (v1+v2).laplace + u1.dt2),
Eq(v2.forward, (v1-v2).laplace + u2.dt2)]
op0 = Operator(eqns, opt=('advanced', {'min-storage': False,
'cire-mincost-sops': 1}))
op1 = Operator(eqns, opt=('advanced', {'min-storage': True,
'cire-mincost-sops': 1}))
# Check code generation
# min-storage has no effect in this example
assert str(op0) == str(op1)
@pytest.mark.parametrize('rotate', [False, True])
def test_mixed_shapes_v2_w_subdims(self, rotate):
"""
Analogous `test_mixed_shapes`, but with different sets of aliasing expressions.
Also, uses SubDimensions.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
d = Dimension(name='d')
c = Function(name='c', grid=grid, shape=(2, 3), dimensions=(d, z))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
c.data_with_halo[:] = 1.
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 1.5
u1.data_with_halo[:] = 1.5
# Leads to 2D and 3D aliases
eqn = Eq(u.forward,
((c[0, z]*u[t, x+1, y-1, z] + c[1, z+1]*u[t, x+1, y-1, z+1])*f +
(c[0, z]*u[t, x+2, y-2, z] + c[1, z+1]*u[t, x+2, y-2, z+1])*f +
(u[t, x, y+1, z+1] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x, y+3, z+2] + u[t, x+1, y+3, z+2])*3*f),
subdomain=grid.interior)
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced',
{'openmp': True, 'min-storage': True,
'cire-mincost-sops': 1, 'cire-rotate': rotate}))
# Check code generation
assert len(op1._func_table) == 1
xs, ys, zs = self.get_params(op1, 'i0x0_blk0_size', 'i0y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
self.check_array(arrays[0], ((1, 1), (1, 0)), (ys+2, zs+1), rotate)
self.check_array(arrays[1], ((1, 0), (1, 0), (0, 0)), (xs+1, ys+1, zs), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_in_bounds_w_shift(self, rotate):
"""
Make sure the iteration space and indexing of the aliasing expressions
are shifted such that no out-of-bounds accesses are generated.
"""
grid = Grid(shape=(5, 5, 5))
x, y, z = grid.dimensions
t = grid.stepping_dim
d = Dimension(name='d')
c = Function(name='c', grid=grid, shape=(2, 5), dimensions=(d, z))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=4)
u1 = TimeFunction(name='u1', grid=grid, space_order=4)
c.data_with_halo[:] = 1.
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 1.5
u1.data_with_halo[:] = 1.5
# Leads to 3D aliases
eqn = Eq(u.forward,
((c[0, z]*u[t, x+1, y, z] + c[1, z+1]*u[t, x+1, y, z+1])*f +
(c[0, z]*u[t, x+2, y+2, z] + c[1, z+1]*u[t, x+2, y+2, z+1])*f +
(u[t, x, y-4, z+1] + u[t, x+1, y-4, z+1])*3*f +
(u[t, x-1, y-3, z+1] + u[t, x, y-3, z+1])*3*f))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
assert len(op1._func_table) == 1
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
self.check_array(arrays[0], ((1, 0), (1, 1), (0, 0)), (xs+1, ys+2, zs), rotate)
self.check_array(arrays[1], ((1, 0), (1, 1), (0, 0)), (xs+1, ys+2, zs), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_constant_symbolic_distance(self, rotate):
"""
Test the detection of aliasing expressions in the case of a
constant symbolic distance, such as `a[t, x_m+2, y, z]` when the
Dimensions are `(t, x, y, z)`; here, `x_m + 2` is a constant
symbolic access.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
x_m = x.symbolic_min
y_m = y.symbolic_min
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 2D aliases
eqn = Eq(u.forward,
((u[t, x_m+2, y, z] + u[t, x_m+3, y+1, z+1])*3*f +
(u[t, x_m+2, y+2, z+2] + u[t, x_m+3, y+3, z+3])*3*f + 1 +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + # Not an alias
(u[t, x_m+1, y+2, z+2] + u[t, x_m+1, y+3, z+3])*3*f + # Not an alias
(u[t, x+2, y_m+3, z+2] + u[t, x+3, y_m+3, z+3])*3*f +
(u[t, x+1, y_m+3, z+1] + u[t, x+2, y_m+3, z+2])*3*f))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
self.check_array(arrays[0], ((1, 0), (1, 0)), (xs+1, zs+1), rotate)
self.check_array(arrays[1], ((1, 1), (1, 1)), (ys+2, zs+2), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
def test_outlier_with_long_diameter(self, rotate):
"""
Test that if there is a potentially aliasing expression, say A, with
excessively long diameter (that is, such that it cannot safely be
computed in a loop with other aliasing expressions), then A is ignored
and the other aliasing expressions are captured correctly.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 2.
u.data_with_halo[:] = 1.5
u1.data_with_halo[:] = 1.5
# Leads to 3D aliases
# Note: the outlier already touches the halo extremes, so it cannot
# be computed in a loop with extra y-iterations, hence it must be ignored
# while not compromising the detection of the two aliasing sub-expressions
eqn = Eq(u.forward, ((u[t, x, y+1, z+1] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x, y-3, z+1] + u[t, x+1, y+3, z+1])*3*f + # outlier
(u[t, x, y+3, z+2] + u[t, x+1, y+3, z+2])*3*f))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
assert len(op1._func_table) == 1
# Check code generation
ys, zs = self.get_params(op1, 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 0)), (ys+2, zs+1), rotate)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
def test_composite(self):
"""
Check that composite alias are optimized away through "smaller" aliases.
Examples
--------
Instead of the following:
t0 = a[x, y]
t1 = b[x, y]
t2 = a[x+1, y+1]*b[x, y]
out = t0 + t1 + t2 # pseudocode
We should get:
t0 = a[x, y]
t1 = b[x, y]
out = t0 + t1 + t0[x+1,y+1]*t1[x, y] # pseudocode
"""
grid = Grid(shape=(3, 3))
x, y = grid.dimensions # noqa
g = Function(name='g', grid=grid)
u = TimeFunction(name='u', grid=grid)
u1 = TimeFunction(name='u1', grid=grid)
g.data[:] = 2.
u.data[:] = 1.
u1.data[:] = 1.
expr = (cos(g)*cos(g) +
sin(g)*sin(g) +
sin(g)*cos(g) +
sin(g[x + 1, y + 1])*cos(g[x + 1, y + 1]))*u
op0 = Operator(Eq(u.forward, expr), opt='noop')
op1 = Operator(Eq(u.forward, expr), opt=('advanced', {'cire-mincost-sops': 1}))
# Check code generation
# We expect two temporary Arrays, one for `cos(g)` and one for `sin(g)`
arrays = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(arrays) == 2
assert all(i._mem_heap and not i._mem_external for i in arrays)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.allclose(u.data, u1.data, rtol=10e-7)
@pytest.mark.xfail(reason="Cannot deal with nested aliases yet")
def test_nested_invariants(self):
"""
Check that nested aliases are optimized away through "smaller" aliases.
Examples
--------
Given the expression
sqrt(cos(a[x, y]))
We should get
t0 = cos(a[x,y])
t1 = sqrt(t0)
out = t1 # pseudocode
"""
grid = Grid(shape=(3, 3))
x, y = grid.dimensions # noqa
u = TimeFunction(name='u', grid=grid)
g = Function(name='g', grid=grid)
op = Operator(Eq(u.forward, u + sin(cos(g)) + sin(cos(g[x+1, y+1]))))
# We expect two temporary Arrays: `r1 = cos(g)` and `r2 = sqrt(r1)`
arrays = [i for i in FindSymbols().visit(op) if i.is_Array and i._mem_local]
assert len(arrays) == 2
assert all(i._mem_heap and not i._mem_external for i in arrays)
@switchconfig(profiling='advanced')
def test_twin_sops(self):
"""
Check that identical sum-of-product aliases are caught via CSE thus
reducing the operation count (but not the working set size).
"""
grid = Grid(shape=(10, 10, 10), dtype=np.float64)
x, y, z = grid.dimensions
space_order = 2
u = TimeFunction(name='u', grid=grid, space_order=space_order)
v = TimeFunction(name='v', grid=grid, space_order=space_order)
u1 = TimeFunction(name='u', grid=grid, space_order=space_order)
v1 = TimeFunction(name='v', grid=grid, space_order=space_order)
u2 = TimeFunction(name='u', grid=grid, space_order=space_order)
v2 = TimeFunction(name='v', grid=grid, space_order=space_order)
f = Function(name='f', grid=grid, space_order=space_order)
e = Function(name='e', grid=grid, space_order=space_order)
p0 = Function(name='p0', grid=grid, space_order=space_order)
p1 = Function(name='p1', grid=grid, space_order=space_order)
f.data[:] = 1.2
e.data[:] = 0.3
p0.data[:] = 0.4
p1.data[:] = 0.7
def d0(field):
return (sin(p0) * cos(p1) * field.dx(x0=x+x.spacing/2) +
sin(p0) * sin(p1) * field.dy(x0=y+y.spacing/2) +
cos(p0) * field.dz(x0=z+z.spacing/2))
def d1(field):
return ((sin(p0) * cos(p1) * field).dx(x0=x-x.spacing/2) +
(sin(p0) * sin(p1) * field).dy(x0=y-y.spacing/2) +
(cos(p0) * field).dz(x0=z-z.spacing/2))
eqns = [Eq(u.forward, d1((1 - f * e**2) + f * e * sqrt(1 - e**2) * d0(v))),
Eq(v.forward, d1((1 - f + f * e**2) * d0(v) + f * e * sqrt(1 - e**2)))]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='advanced')
op2 = Operator(eqns, opt=('advanced', {'cire-maxalias': True}))
# Check code generation
# We expect two temporary Arrays which have in common a sub-expression
# stemming from `d0(v, p0, p1)`
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
assert all(i._mem_heap and not i._mem_external for i in arrays)
trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
assert len(trees) == 2
exprs = FindNodes(Expression).visit(trees[0][2])
assert exprs[-1].write is arrays[-1]
assert arrays[0] not in exprs[-1].reads
# Check numerical output
op0(time_M=2)
summary1 = op1(time_M=2, u=u1, v=v1)
expected_u = norm(u)
expected_v = norm(v)
assert np.isclose(expected_u, norm(u1), rtol=10e-16)
assert np.isclose(expected_v, norm(v1), rtol=10e-16)
summary2 = op2(time_M=2, u=u2, v=v2)
assert np.isclose(expected_u, norm(u2), rtol=10e-16)
assert np.isclose(expected_v, norm(v2), rtol=10e-16)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert sum(i.ops for i in summary1.values()) == 73
assert sum(i.ops for i in summary2.values()) == 60
@pytest.mark.parametrize('rotate', [False, True])
def test_from_different_nests(self, rotate):
"""
Check that aliases arising from two sets of equations A and B,
characterized by a flow dependence, are scheduled within A's and B's
loop nests respectively.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
i = Dimension(name='i')
f = Function(name='f', grid=grid)
g = Function(name='g', shape=(3,), dimensions=(i,))
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
v = TimeFunction(name='v', grid=grid, space_order=3)
v1 = TimeFunction(name='v1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
g.data[:] = 2.
# Leads to 3D aliases
eqns = [Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1)),
Inc(u[t+1, i, i, i], g + 1),
Eq(v.forward, ((v[t, x, y, z] + v[t, x+1, y+1, z+1])*3*u.forward +
(v[t, x+2, y+2, z+2] + v[t, x+3, y+3, z+3])*3*u.forward +
1))]
op0 = Operator(eqns, opt=('noop', {'openmp': True}))
op1 = Operator(eqns, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
assert 'bf0' in op1._func_table
assert 'bf1' in op1._func_table
trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
assert len(trees) == 2
assert trees[0][-1].nodes[0].body[0].write.is_Array
assert trees[1][-1].nodes[0].body[0].write is u
trees = retrieve_iteration_tree(op1._func_table['bf1'].root)
assert len(trees) == 2
assert trees[0][-1].nodes[0].body[0].write.is_Array
assert trees[1][-1].nodes[0].body[0].write is v
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
@pytest.mark.parametrize('rotate', [False, True])
@switchconfig(autopadding=True, platform='knl7210') # Platform is to fix pad value
def test_minimize_remainders_due_to_autopadding(self, rotate):
"""
Check that the bounds of the Iteration computing an aliasing expression are
relaxed (i.e., slightly larger) so that backend-compiler-generated remainder
loops are avoided.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 3D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1))
op0 = Operator(eqn, opt=('noop', {'openmp': False}))
op1 = Operator(eqn, opt=('advanced', {'openmp': False, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
assert arrays[0].padding == ((0, 0), (0, 0), (0, 30))
self.check_array(arrays[0], ((1, 1), (1, 1), (1, 1)), (xs+2, ys+2, zs+32), rotate)
# Check loop bounds
trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
assert len(trees) == 2
expected_rounded = trees[0].inner
assert expected_rounded.symbolic_max ==\
z.symbolic_max + (z.symbolic_max - z.symbolic_min + 3) % 16 + 1
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
def test_catch_largest_invariant(self):
"""
Make sure the largest time-invariant sub-expressions are extracted
such that their operation count exceeds a certain threshold.
"""
grid = Grid((10, 10))
a = Function(name="a", grid=grid, space_order=4)
b = Function(name="b", grid=grid, space_order=4)
c = Function(name="c", grid=grid, space_order=4)
d = Function(name="d", grid=grid, space_order=4)
e = TimeFunction(name="e", grid=grid, space_order=4)
deriv = (sqrt((a - 2*b)/c) * e.dx).dy + (sqrt((d - 2*c)/a) * e.dy).dx
op = Operator(Eq(e.forward, deriv + e))
# We expect four temporary Arrays, two of which for the `sqrt` subexpr
arrays = [i for i in FindSymbols().visit(op) if i.is_Array]
assert len(arrays) == 4
exprs = FindNodes(Expression).visit(op)
sqrt_exprs = exprs[:2]
assert all(e.write in arrays for e in sqrt_exprs)
assert all(e.expr.rhs.is_Pow for e in sqrt_exprs)
assert all(e.write._mem_heap and not e.write._mem_external for e in sqrt_exprs)
tmp_exprs = exprs[2:4]
assert all(e.write in arrays for e in tmp_exprs)
assert all(e.write._mem_heap and not e.write._mem_external for e in tmp_exprs)
def test_catch_duplicate_from_different_clusters(self):
"""
Check that the compiler is able to detect redundant aliases when these
stem from different Clusters.
"""
grid = Grid((10, 10))
a = Function(name="a", grid=grid, space_order=4)
b = Function(name="b", grid=grid, space_order=4)
c = Function(name="c", grid=grid, space_order=4)
d = Function(name="d", grid=grid, space_order=4)
s = SparseTimeFunction(name="s", grid=grid, npoint=1, nt=2)
e = TimeFunction(name="e", grid=grid, space_order=4)
f = TimeFunction(name="f", grid=grid, space_order=4)
deriv = (sqrt((a - 2*b)/c) * e.dx).dy + (sqrt((d - 2*c)/a) * e.dy).dx
deriv2 = (sqrt((c - 2*b)/c) * f.dy).dx + (sqrt((d - 2*c)/a) * f.dx).dy
eqns = ([Eq(e.forward, deriv + e)] +
s.inject(e.forward, expr=s) +
[Eq(f.forward, deriv2 + f + e.forward.dx)])
op = Operator(eqns, opt=('advanced', {'cire-mincost-sops': 1000}))
arrays = [i for i in FindSymbols().visit(op) if i.is_Array]
assert len(arrays) == 3
assert all(i._mem_heap and not i._mem_external for i in arrays)
def test_hoisting_if_coupled(self):
"""
Test that coupled aliases are successfully hoisted out of the time loop.
This test also checks the correct behaviour of the Operator opt-option
``cire-repeats-inv``.
"""
grid = Grid((10, 10))
a = Function(name="a", grid=grid, space_order=4)
b = Function(name="b", grid=grid, space_order=4)
e = TimeFunction(name="e", grid=grid, space_order=4)
f = TimeFunction(name="f", grid=grid, space_order=4)
subexpr0 = sqrt(1. + 1./a)
subexpr1 = 1/(8.*subexpr0 - 8./b)
eqns = [Eq(e.forward, e + 1),
Eq(f.forward, f*subexpr0 - f*subexpr1 + e.forward.dx)]
op = Operator(eqns, opt=('advanced', {'cire-repeats-inv': 2,
'cire-mincost-inv': 28}))
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
arrays = [i for i in FindSymbols().visit(trees[0].root) if i.is_Array]
assert len(arrays) == 2
assert all(i._mem_heap and not i._mem_external for i in arrays)
@pytest.mark.parametrize('rotate', [False, True])
def test_drop_redundants_after_fusion(self, rotate):
"""
Test for detection of redundant aliases that get exposed after
Cluster fusion.
"""
grid = Grid(shape=(10, 10))
t = cos(Function(name="t", grid=grid))
p = sin(Function(name="p", grid=grid))
a = TimeFunction(name="a", grid=grid)
b = TimeFunction(name="b", grid=grid)
c = TimeFunction(name="c", grid=grid)
d = TimeFunction(name="d", grid=grid)
e = TimeFunction(name="e", grid=grid)
f = TimeFunction(name="f", grid=grid)
s1 = SparseTimeFunction(name="s1", grid=grid, npoint=1, nt=2)
eqns = [Eq(a.forward, t*a.dx + p*b.dy),
Eq(b.forward, p*b.dx + p*t*a.dy)]
eqns += s1.inject(field=a.forward, expr=s1)
eqns += s1.inject(field=b.forward, expr=s1)
eqns += [Eq(c.forward, t*p*a.forward.dx + b.forward.dy),
Eq(d.forward, t*d.dx + e.dy + p*a.dt),
Eq(e.forward, p*d.dx + e.dy + t*b.dt)]
eqns += [Eq(f.forward, t*p*e.forward.dx + p*d.forward.dy)]
op = Operator(eqns, opt=('advanced', {'cire-rotate': rotate}))
arrays = [i for i in FindSymbols().visit(op) if i.is_Array]
assert len(arrays) == 2
assert all(i._mem_heap and not i._mem_external for i in arrays)
def test_full_shape_big_temporaries(self):
"""
Test that if running with ``opt=advanced-fsg``, then the compiler uses
temporaries spanning the whole grid rather than blocks.
"""
grid = Grid(shape=(3, 3, 3))
x, y, z = grid.dimensions
t = grid.stepping_dim
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=3)
u1 = TimeFunction(name='u1', grid=grid, space_order=3)
f.data_with_halo[:] = 1.
u.data_with_halo[:] = 0.5
u1.data_with_halo[:] = 0.5
# Leads to 3D aliases
eqn = Eq(u.forward, ((u[t, x, y, z] + u[t, x+1, y+1, z+1])*3*f +
(u[t, x+2, y+2, z+2] + u[t, x+3, y+3, z+3])*3*f + 1))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced-fsg', {'openmp': True,
'cire-mincost-sops': 1}))
# Check code generation
assert len(op1._func_table) == 2 # Expected two separate blocked loop nests
xs, ys, zs = self.get_params(op1, 'x_size', 'y_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array]
assert len(arrays) == 1
self.check_array(arrays[0], ((1, 1), (1, 1), (1, 1)), (xs+2, ys+2, zs+2))
# Check that `cire-rotate=True` has no effect in this code has there's
# no cross-loop blocking
op2 = Operator(eqn, opt=('advanced-fsg', {'openmp': True, 'cire-rotate': True,
'cire-mincost-sops': 1}))
assert str(op2) == str(op1)
# Check numerical output
op0(time_M=1)
op1(time_M=1, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('rotate', [False, True])
@switchconfig(profiling='advanced')
def test_extraction_from_lifted_ispace(self, rotate):
"""
Test that the aliases are scheduled correctly when extracted from
Clusters whose iteration space is lifted (ie, stamp != 0).
"""
so = 8
grid = Grid(shape=(6, 6, 6))
f = Function(name='f', grid=grid, space_order=so, is_param=True)
v = TimeFunction(name="v", grid=grid, space_order=so)
v1 = TimeFunction(name="v1", grid=grid, space_order=so)
p = TimeFunction(name="p", grid=grid, space_order=so, staggered=NODE)
p1 = TimeFunction(name="p1", grid=grid, space_order=so, staggered=NODE)
v.data_with_halo[:] = 1.
v1.data_with_halo[:] = 1.
p.data_with_halo[:] = 0.5
p1.data_with_halo[:] = 0.5
f.data_with_halo[:] = 0.2
eqns = [Eq(v.forward, v - f*p),
Eq(p.forward, p - v.forward.dx + div(f*grad(p)))]
# Operator
op0 = Operator(eqns, opt=('noop', {'openmp': True}))
op1 = Operator(eqns, opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check numerical output
op0(time_M=1)
summary = op1(time_M=1, v=v1, p=p1)
assert np.isclose(norm(v), norm(v1), rtol=1e-5)
assert np.isclose(norm(p), norm(p1), atol=1e-5)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert summary[('section0', None)].ops == 115
@pytest.mark.parametrize('so_ops', [(4, 39), (8, 79)])
@pytest.mark.parametrize('rotate', [False, True])
@switchconfig(profiling='advanced')
def test_tti_adjoint_akin(self, so_ops, rotate):
"""
Extrapolated from TTI adjoint.
"""
so, exp_ops = so_ops
to = 2
soh = so // 2
T = transpose
grid = Grid(shape=(10, 10, 10), dtype=np.float64)
x, y, z = grid.dimensions
p = TimeFunction(name='p', grid=grid, space_order=so, time_order=to)
r = TimeFunction(name='r', grid=grid, space_order=so, time_order=to)
r1 = TimeFunction(name='r1', grid=grid, space_order=so, time_order=to)
delta = Function(name='delta', grid=grid, space_order=so)
theta = Function(name='theta', grid=grid, space_order=so)
phi = Function(name='phi', grid=grid, space_order=so)
p.data_with_halo[:] = 1.
r.data_with_halo[:] = 0.5
r1.data_with_halo[:] = 0.5
delta.data_with_halo[:] = 0.2
theta.data_with_halo[:] = 0.8
phi.data_with_halo[:] = 0.2
costheta = cos(theta)
sintheta = sin(theta)
cosphi = cos(phi)
sinphi = sin(phi)
delta = sqrt(delta)
field = delta*p + r
Gz = -(sintheta * cosphi*first_derivative(field, dim=x, fd_order=soh) +
sintheta * sinphi*first_derivative(field, dim=y, fd_order=soh) +
costheta * first_derivative(field, dim=z, fd_order=soh))
Gzz = (first_derivative(Gz * sintheta * cosphi, dim=x, fd_order=soh, matvec=T) +
first_derivative(Gz * sintheta * sinphi, dim=y, fd_order=soh, matvec=T) +
first_derivative(Gz * costheta, dim=z, fd_order=soh, matvec=T))
# Equation
eqn = [Eq(r.backward, Gzz)]
op0 = Operator(eqn, subs=grid.spacing_map, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, subs=grid.spacing_map,
opt=('advanced', {'openmp': True, 'cire-mincost-sops': 1,
'cire-rotate': rotate}))
# Check numerical output
op0(time_M=1)
summary = op1(time_M=1, r=r1)
assert np.isclose(norm(r), norm(r1), rtol=1e-5)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert summary[('section1', None)].ops == exp_ops
@switchconfig(profiling='advanced')
def test_tti_adjoint_akin_v2(self):
"""
Yet another extrapolation from TTI adjoint which has caused headaches
in the past.
"""
so = 12
to = 2
fd_order = so // 2
grid = Grid(shape=(10, 10, 10), dtype=np.float64)
x, y, z = grid.dimensions
p = TimeFunction(name='p', grid=grid, space_order=so, time_order=to)
p1 = TimeFunction(name='p', grid=grid, space_order=so, time_order=to)
r = TimeFunction(name='r', grid=grid, space_order=so, time_order=to)
delta = Function(name='delta', grid=grid, space_order=so)
theta = Function(name='theta', grid=grid, space_order=so)
phi = Function(name='phi', grid=grid, space_order=so)
p.data_with_halo[:] = 1.1
p1.data_with_halo[:] = 1.1
r.data_with_halo[:] = 0.5
delta.data_with_halo[:] = 0.2
theta.data_with_halo[:] = 0.8
phi.data_with_halo[:] = 0.2
field = sqrt(1 + 2*delta)*p + r
Gz = sin(theta) * cos(phi) * field.dx(fd_order=fd_order)
Gzz = (Gz * cos(theta)).dz(fd_order=fd_order).T
H0 = field.laplace - Gzz
eqn = Eq(p.backward, H0)
op0 = Operator(eqn, subs=grid.spacing_map, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, subs=grid.spacing_map, opt=('advanced', {'openmp': True}))
# Check code generation
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 2
self.check_array(arrays[0], ((3, 3),), (zs+6,))
self.check_array(arrays[1], ((6, 6), (6, 6), (6, 6)), (xs+12, ys+12, zs+12))
# Check numerical output
op0(time_M=1)
summary = op1(time_M=1, p=p1)
assert np.isclose(norm(p), norm(p1), atol=1e-15)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert summary[('section1', None)].ops == 92
@pytest.mark.parametrize('rotate', [False, True])
@switchconfig(profiling='advanced')
def test_nested_first_derivatives(self, rotate):
"""
Test that aliasing sub-expressions from nested derivatives aren't split,
but rather they're captured together and scheduled to a single temporary.
"""
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid, space_order=4)
v = TimeFunction(name="v", grid=grid, space_order=4)
v1 = TimeFunction(name="v1", grid=grid, space_order=4)
v2 = TimeFunction(name="v2", grid=grid, space_order=4)
f.data_with_halo[:] = 0.5
v.data_with_halo[:] = 1.
v1.data_with_halo[:] = 1.
v2.data_with_halo[:] = 1.
eqn = Eq(v.forward, (v.dx * (1 + 2*f) * f).dx)
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-rotate': rotate}))
op2 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-rotate': rotate,
'cire-maxalias': True}))
# Check code generation
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == 1
# Check numerical output
op0(time_M=1)
summary1 = op1(time_M=1, v=v1)
expected_v = norm(v)
assert np.isclose(expected_v, norm(v1), rtol=1e-5)
summary2 = op2(time_M=1, v=v2)
assert np.isclose(expected_v, norm(v2), rtol=1e-5)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert summary1[('section0', None)].ops == 19
assert summary2[('section0', None)].ops == 15
@switchconfig(profiling='advanced')
@pytest.mark.parametrize('expr,exp_arrays,exp_ops', [
('f.dx.dx + g.dx.dx', (1, 1, 2, 1), (46, 40, 49, 17)),
('v.dx.dx + p.dx.dx', (2, 2, 2, 2), (61, 49, 49, 25)),
('(v.dx + v.dy).dx - (v.dx + v.dy).dy + 2*f.dx.dx + f*f.dy.dy + f.dx.dx(x0=1)',
(3, 3, 4, 3), (217, 199, 208, 94)),
('(g*(1 + f)*v.dx).dx + (2*g*f*v.dx).dx', (1, 1, 2, 1), (50, 44, 53, 19)),
('g*(f.dx.dx + g.dx.dx)', (1, 1, 2, 1), (47, 41, 50, 18)),
])
def test_sum_of_nested_derivatives(self, expr, exp_arrays, exp_ops):
"""
Test that aliasing sub-expressions from sums of nested derivatives
along `x` and `y` are scheduled to *two* different temporaries, not
three (one per unique derivative argument), thanks to FD linearity.
"""
grid = Grid(shape=(10, 10, 10), dtype=np.float64)
x, y, z = grid.dimensions # noqa
f = Function(name='f', grid=grid, space_order=4)
g = Function(name='g', grid=grid, space_order=4)
p = TimeFunction(name="p", grid=grid, space_order=4, staggered=x)
v = TimeFunction(name="v", grid=grid, space_order=4)
f.data_with_halo[:] =\
np.linspace(-10, 10, f.data_with_halo.size).reshape(*f.shape_with_halo)
g.data_with_halo[:] =\
np.linspace(-20, 20, g.data_with_halo.size).reshape(*g.shape_with_halo)
p.data_with_halo[:] = 0.7
v.data_with_halo[:] = 1.2
eqn = Eq(v.forward, eval(expr))
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('collect-derivs', 'cire-sops', {'openmp': True}))
op2 = Operator(eqn, opt=('collect-derivs', 'cire-sops', {'openmp': True,
'cire-maxalias': True}))
op3 = Operator(eqn, opt=('cire-sops', {'openmp': True, 'cire-maxalias': True}))
op4 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-maxalias': True}))
# Check code generation
arrays = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(arrays) == exp_arrays[0]
arrays = [i for i in FindSymbols().visit(op2) if i.is_Array]
assert len(arrays) == exp_arrays[1]
arrays = [i for i in FindSymbols().visit(op3) if i.is_Array]
assert len(arrays) == exp_arrays[2]
arrays = [i for i in FindSymbols().visit(op4._func_table['bf0'].root)
if i.is_Array and i._mem_local]
assert len(arrays) == exp_arrays[3]
# Check numerical output
op0(time_M=1)
exp_v = norm(v)
for n, op in enumerate([op1, op2, op3, op4]):
v1 = TimeFunction(name="v", grid=grid, space_order=4)
v1.data_with_halo[:] = 1.2
summary = op(time_M=1, v=v1)
assert np.isclose(exp_v, norm(v1), atol=1e-11, rtol=1e-8)
# Also check against expected operation count to make sure
# all redundancies have been detected correctly
assert summary[('section0', None)].ops == exp_ops[n]
@pytest.mark.parametrize('rotate', [False, True])
def test_maxpar_option(self, rotate):
"""
Test the compiler option `cire-maxpar=True`.
"""
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name="u", grid=grid, space_order=2)
f.data[:] = 0.0012
u.data[:] = 1.3
u1.data[:] = 1.3
eq = Eq(u.forward, f*u.dy.dy)
op0 = Operator(eq, opt='noop')
op1 = Operator(eq, opt=('advanced', {'cire-maxpar': True, 'cire-rotate': rotate}))
# Check code generation
trees = retrieve_iteration_tree(op1._func_table['bf0'].root)
assert len(trees) == 2
assert trees[0][1] is trees[1][1]
assert trees[0][2] is not trees[1][2]
# Check numerical output
op0.apply(time_M=2)
op1.apply(time_M=2, u=u1)
assert np.isclose(norm(u), norm(u1), rtol=1e-5)
@pytest.mark.parametrize('rotate', [False, True])
def test_blocking_options(self, rotate):
"""
Test CIRE with all compiler options impacting loop blocking, which in turn
impact the shape of the created temporaries as well as the surrounding loop
nests.
"""
grid = Grid(shape=(20, 20, 20))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name="u", grid=grid, space_order=2)
u2 = TimeFunction(name="u", grid=grid, space_order=2)
f.data_with_halo[:] =\
np.linspace(-10, 10, f.data_with_halo.size).reshape(*f.shape_with_halo)
u.data_with_halo[:] =\
np.linspace(-3, 3, u.data_with_halo.size).reshape(*u.shape_with_halo)
u1.data_with_halo[:] = u.data_with_halo[:]
u2.data_with_halo[:] = u.data_with_halo[:]
eq = Eq(u.forward, u.dx.dx + f*u.dy.dy)
op0 = Operator(eq, opt='noop')
op1 = Operator(eq, opt=('advanced', {'blocklevels': 2, 'cire-rotate': rotate}))
op2 = Operator(eq, opt=('advanced', {'blocklevels': 2, 'par-nested': 0,
'cire-rotate': rotate}))
# Check code generation
assert len([i for i in op1.dimensions if i.is_Incr]) == 6 + (2 if rotate else 0)
if configuration['language'] == 'openmp':
pariters = FindNodes(ParallelIteration).visit(op2._func_table['bf0'].root)
assert len(pariters) == 2
# Check numerical output
op0.apply(time_M=2)
op1.apply(time_M=2, u=u1, x0_blk1_size=2, y0_blk1_size=2)
op2.apply(time_M=2, u=u2, x0_blk1_size=2, y0_blk1_size=2)
expected = norm(u)
assert np.isclose(expected, norm(u1), rtol=1e-5)
assert np.isclose(expected, norm(u2), rtol=1e-5)
@pytest.mark.parametrize('rotate', [False, True])
def test_arrays_enforced_on_stack(self, rotate):
"""
Test enforcement of tensor temporaries on the stack.
"""
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, space_order=(2, 4, 4))
u1 = TimeFunction(name="u", grid=grid, space_order=(2, 4, 4))
f.data[:] = 0.0012
u.data[:] = 1.3
u1.data[:] = 1.3
eq = Eq(u.forward, f*u.dx.dx + u.dy.dy)
op0 = Operator(eq, opt=('noop', {'openmp': True}))
op1 = Operator(eq, opt=('advanced', {'openmp': True, 'cire-onstack': True,
'cire-rotate': rotate}))
# Check code generation
pbs = FindNodes(ParallelBlock).visit(op1._func_table['bf0'].root)
assert len(pbs) == 1
pb = pbs[0]
if rotate:
assert 'r6[2][y0_blk0_size][z_size]' in str(pb.partree.prefix[0].header[0])
assert 'r3[2][z_size]' in str(pb.partree.prefix[0].header[1])
else:
assert 'r6[x0_blk0_size + 1][y0_blk0_size][z_size]'\
in str(pb.partree.prefix[0].header[0])
assert 'r3[y0_blk0_size + 1][z_size]' in str(pb.partree.prefix[0].header[1])
# Check numerical output
op0.apply(time_M=2)
op1.apply(time_M=2, u=u1)
assert np.isclose(norm(u), norm(u1), rtol=1e-7)
@pytest.mark.parametrize('rotate', [False, True])
def test_grouping_fallback(self, rotate):
"""
MFE for issue #1477.
"""
space_order = 8
grid = Grid(shape=(21, 21, 11))
eps = Function(name='eps', grid=grid, space_order=space_order)
p = TimeFunction(name='p', grid=grid, time_order=2, space_order=space_order)
p1 = TimeFunction(name='p0', grid=grid, time_order=2, space_order=space_order)
p.data[:] = 0.02
p1.data[:] = 0.02
eps.data_with_halo[:] =\
np.linspace(0.1, 0.3, eps.data_with_halo.size).reshape(*eps.shape_with_halo)
eqn = Eq(p.forward, ((1+sqrt(eps)) * p.dy).dy + (p.dz).dz)
op0 = Operator(eqn, opt=('noop', {'openmp': True}))
op1 = Operator(eqn, opt=('advanced', {'openmp': True, 'cire-rotate': rotate}))
# Check code generation
# `min-storage` leads to one 2D and one 3D Arrays
xs, ys, zs = self.get_params(op1, 'x0_blk0_size', 'y0_blk0_size', 'z_size')
arrays = [i for i in FindSymbols().visit(op1._func_table['bf0']) if i.is_Array]
assert len(arrays) == 3
assert len([i for i in arrays if i._mem_shared]) == 1
assert len([i for i in arrays if i._mem_local]) == 2
self.check_array(arrays[0], ((4, 4),), (zs+8,)) # On purpose w/o `rotate`
self.check_array(arrays[1], ((4, 4), (0, 0)), (ys+8, zs), rotate)
# Check numerical output
op0.apply(time_M=2)
op1.apply(time_M=2, p=p1)
assert np.isclose(norm(p), norm(p1), rtol=1e-7)
# Acoustic
class TestIsoAcoustic(object):
def run_acoustic_forward(self, opt=None):
shape = (50, 50, 50)
spacing = (10., 10., 10.)
nbl = 10
nrec = 101
t0 = 0.0
tn = 250.0
# Create two-layer model from preset
model = demo_model(preset='layers-isotropic', vp_top=3., vp_bottom=4.5,
spacing=spacing, shape=shape, nbl=nbl)
# Source and receiver geometries
src_coordinates = np.empty((1, len(spacing)))
src_coordinates[0, :] = np.array(model.domain_size) * .5
src_coordinates[0, -1] = model.origin[-1] + 2 * spacing[-1]
rec_coordinates = np.empty((nrec, len(spacing)))
rec_coordinates[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
rec_coordinates[:, 1:] = src_coordinates[0, 1:]
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=t0, tn=tn, src_type='Ricker', f0=0.010)
solver = AcousticWaveSolver(model, geometry, opt=opt)
rec, u, summary = solver.forward(save=False)
op = solver.op_fwd(save=False)
return u, rec, summary, op
@switchconfig(profiling='advanced')
def test_fullopt(self):
u0, rec0, summary0, op0 = self.run_acoustic_forward(opt=None)
u1, rec1, summary1, op1 = self.run_acoustic_forward(opt='advanced')
assert len(op0._func_table) == 0
assert len(op1._func_table) == 1 # due to loop blocking
assert summary0[('section0', None)].ops == 50
assert summary0[('section1', None)].ops == 151
assert np.isclose(summary0[('section0', None)].oi, 2.851, atol=0.001)
assert summary1[('section0', None)].ops == 33
assert np.isclose(summary1[('section0', None)].oi, 1.882, atol=0.001)
assert np.allclose(u0.data, u1.data, atol=10e-5)
assert np.allclose(rec0.data, rec1.data, atol=10e-5)
class TestTTI(object):
@cached_property
def model(self):
# TTI layered model for the tti test, no need for a smooth interace
# bewtween the two layer as the compilation passes are tested, not the
# physical prettiness of the result -- which ultimately saves time
return demo_model('layers-tti', nlayers=3, nbl=10, space_order=4,
shape=(50, 50, 50), spacing=(20., 20., 20.), smooth=False)
@cached_property
def geometry(self):
nrec = 101
t0 = 0.0
tn = 250.
# Source and receiver geometries
src_coordinates = np.empty((1, len(self.model.spacing)))
src_coordinates[0, :] = np.array(self.model.domain_size) * .5
src_coordinates[0, -1] = self.model.origin[-1] + 2 * self.model.spacing[-1]
rec_coordinates = np.empty((nrec, len(self.model.spacing)))
rec_coordinates[:, 0] = np.linspace(0., self.model.domain_size[0], num=nrec)
rec_coordinates[:, 1:] = src_coordinates[0, 1:]
geometry = AcquisitionGeometry(self.model, rec_coordinates, src_coordinates,
t0=t0, tn=tn, src_type='Gabor', f0=0.010)
return geometry
def tti_operator(self, opt, space_order=4):
return AnisotropicWaveSolver(self.model, self.geometry,
space_order=space_order, opt=opt)
@cached_property
def tti_noopt(self):
wavesolver = self.tti_operator(opt=None)
rec, u, v, summary = wavesolver.forward()
# Make sure no opts were applied
op = wavesolver.op_fwd('centered', False)
assert len(op._func_table) == 0
assert summary[('section0', None)].ops == 737
return v, rec
@switchconfig(profiling='advanced')
def test_fullopt(self):
wavesolver = self.tti_operator(opt='advanced')
rec, u, v, summary = wavesolver.forward(kernel='centered')
assert np.allclose(self.tti_noopt[0].data, v.data, atol=10e-1)
assert np.allclose(self.tti_noopt[1].data, rec.data, atol=10e-1)
# Check expected opcount/oi
assert summary[('section1', None)].ops == 102
assert np.isclose(summary[('section1', None)].oi, 1.610, atol=0.001)
# With optimizations enabled, there should be exactly four IncrDimensions
op = wavesolver.op_fwd(kernel='centered')
block_dims = [i for i in op.dimensions if i.is_Incr]
assert len(block_dims) == 4
x, x0_blk0, y, y0_blk0 = block_dims
assert x.parent is x0_blk0
assert y.parent is y0_blk0
assert not x._defines & y._defines
# Also, in this operator, we expect seven temporary Arrays:
# * all of the seven Arrays are allocated on the heap
# * with OpenMP, five Arrays are defined globally, and two additional
# Arrays are defined locally in bf0; otherwise, all of the seven
# Arrays are defined globally and passed as arguments to bf0
arrays = [i for i in FindSymbols().visit(op) if i.is_Array]
extra_arrays = 0 if configuration['language'] == 'openmp' else 2
assert len(arrays) == 5 + extra_arrays
assert all(i._mem_heap and not i._mem_external for i in arrays)
arrays = [i for i in FindSymbols().visit(op._func_table['bf0'].root)
if i.is_Array]
assert all(not i._mem_external for i in arrays)
assert len(arrays) == 7
assert len([i for i in arrays if i._mem_heap]) == 7
assert len([i for i in arrays if i._mem_shared]) == 5
assert len([i for i in arrays if i._mem_local]) == 2
@skipif(['nompi'])
@switchconfig(profiling='advanced')
@pytest.mark.parallel(mode=[(1, 'full')])
def test_fullopt_w_mpi(self):
tti_noopt = self.tti_operator(opt=None)
rec0, u0, v0, _ = tti_noopt.forward(kernel='centered')
tti_agg = self.tti_operator(opt='advanced')
rec1, u1, v1, _ = tti_agg.forward(kernel='centered')
assert np.allclose(v0.data, v1.data, atol=10e-1)
assert np.allclose(rec0.data, rec1.data, atol=10e-1)
# Run a quick check to be sure MPI-full-mode code was actually generated
op = tti_agg.op_fwd('centered', False)
assert len(op._func_table) == 8
assert 'pokempi0' in op._func_table
@switchconfig(profiling='advanced')
@pytest.mark.parametrize('space_order,expected', [
(8, 173), (16, 307)
])
def test_opcounts(self, space_order, expected):
op = self.tti_operator(opt='advanced', space_order=space_order)
sections = list(op.op_fwd(kernel='centered')._profiler._sections.values())
assert sections[1].sops == expected
class TestTTIv2(object):
@switchconfig(profiling='advanced')
@pytest.mark.parametrize('space_order,expected', [
(4, 203), (12, 395)
])
def test_opcounts(self, space_order, expected):
grid = Grid(shape=(3, 3, 3))
s = 0.00067
u = TimeFunction(name='u', grid=grid, space_order=space_order)
v = TimeFunction(name='v', grid=grid, space_order=space_order)
f = Function(name='f', grid=grid)
g = Function(name='g', grid=grid)
m = Function(name='m', grid=grid)
e = Function(name='e', grid=grid)
d = Function(name='d', grid=grid)
ang0 = cos(f)
ang1 = sin(f)
ang2 = cos(g)
ang3 = sin(g)
H1u = (ang1*ang1*ang2*ang2*u.dx2 +
ang1*ang1*ang3*ang3*u.dy2 +
ang0*ang0*u.dz2 +
2*ang1*ang1*ang3*ang2*u.dxdy +
2*ang0*ang1*ang3*u.dydz +
2*ang0*ang1*ang2*u.dxdz)
H2u = -H1u + u.laplace
H1v = (ang1*ang1*ang2*ang2*v.dx2 +
ang1*ang1*ang3*ang3*v.dy2 +
ang0*ang0*v.dz2 +
2*ang1*ang1*ang3*ang2*v.dxdy +
2*ang0*ang1*ang3*v.dydz +
2*ang0*ang1*ang2*v.dxdz)
H2v = -H1v + v.laplace
eqns = [Eq(u.forward, (2*u - u.backward) + s**2/m * (e * H2u + H1v)),
Eq(v.forward, (2*v - v.backward) + s**2/m * (d * H2v + H1v))]
op = Operator(eqns)
sections = list(op._profiler._sections.values())
assert len(sections) == 2
assert sections[0].sops == 4
assert sections[1].sops == expected
|
the-stack_106_24915
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check whether repo is clear of fixes - to be used in combination with scripts in the CI pipeline."""
import logging
import pathlib
import sys
import traceback
from typing import List
from git import Repo
from ilcli import Command
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class FilesChanged(Command):
"""Check whether files have changed in a repository."""
def _init_arguments(self):
self.add_argument('exclude', help='Extensions to exclude.', nargs='*')
self.add_argument('-v', '--verbose', action='store_true')
self.add_argument('-C', help='Repository root', type=pathlib.Path, default=pathlib.Path.cwd())
self.add_argument('-u', '--untracked', help='Error on untracked files.', action='store_true')
def _run(self, args):
if args.verbose:
logger.setLevel(logging.INFO)
try:
pretty_exclude = args.exclude if len(args.exclude) > 0 else ''
if FilesChanged.has_changed(args.C, args.exclude):
logger.info(f'Files excluding types: {pretty_exclude} have changed.')
return 1
if args.untracked:
if FilesChanged.untracked(args.C):
logger.info('Untracked files in the repo.')
return 1
logger.info(f'No files have changed (excluding the following extensions: {pretty_exclude}.')
return 0
except Exception as e:
logger.error(f'Unexpected error {e}')
logger.debug(traceback.format_exc())
return 2
@staticmethod
def untracked(repo_root: pathlib.Path) -> bool:
"""Determine if there are untracked files in the repo, respecting .gitignore."""
repo = Repo(repo_root)
if len(repo.untracked_files) > 0:
for untracked in repo.untracked_files:
logger.info(f'Untracked: {untracked}')
return True
return False
@staticmethod
def has_changed(repo_root: pathlib.Path, excludes: List[str]) -> bool:
"""Determine if files have changed."""
# Ensure no periods are passed.
excludes = list(map(lambda x: x.lstrip('.'), excludes))
repo = Repo(repo_root)
if repo.bare:
raise Exception('Cannot operate on a bare git repository.')
if not repo.is_dirty():
logger.info('Repository is completely clean.')
return False
head_commit = repo.head.commit
# None is a reference to current working tree.
for diff in head_commit.diff(None):
path = pathlib.Path(diff.a_path)
if not path.suffix.lstrip('.') in excludes:
logger.info(f'The following path has changed {path}.')
return True
return False
if __name__ == '__main__':
sys.exit(FilesChanged().run())
|
the-stack_106_24916
|
'''
Created on Aug 9, 2017
@author: Hao Wu
'''
from ScopeFoundry import HardwareComponent
from VOTAScopeHW.arduino_odometer.arduino_odometer_dev import ArduinoOdometerDev
import time
from math import exp
class ArduinoOdometerHW(HardwareComponent):
'''
Hardware Component Class for receiving AI input for breathing, licking etc
'''
name='arduino_odometer'
def setup(self,port='COM4',baud_rate=250000):
'''
add settings for analog input event
'''
self.settings.New(name='port',initial=port,dtype=str,ro=False)
self.settings.New(name='baud_rate',initial=baud_rate,dtype=int,ro=False)
self.settings.New(name='x',initial=0,dtype=int,ro=True)
self.settings.New(name='y',initial=0,dtype=int,ro=True)
self.settings.New(name='vx',initial=0,dtype=int,ro=True)
self.settings.New(name='vy',initial=0,dtype=int,ro=True)
def read(self):
position, speed = self._dev.read()
self.settings.x.update_value(position[0])
self.settings.y.update_value(position[1])
self.settings.vx.update_value(speed[0])
self.settings.vy.update_value(speed[1])
def connect(self):
self._dev=ArduinoOdometerDev(self.settings.port.value(),
self.settings.baud_rate.value())
def start(self):
self._dev.open()
def stop(self):
self._dev.close()
def disconnect(self):
try:
self.stop()
del self._dev
del self.write
except AttributeError:
pass
|
the-stack_106_24918
|
import datetime
from flask import Blueprint, render_template
from BOFS.util import *
from BOFS.globals import db
from BOFS.admin.util import verify_admin
# The name of this variable must match the folder's name.
unity_example = Blueprint('unity_example', __name__,
static_url_path='/unity_example',
template_folder='templates',
static_folder='static')
def handle_game_post():
log = db.GameLog()
log.participantID = session['participantID']
log.input = request.form['input']
db.session.add(log)
db.session.commit()
return ""
@unity_example.route("/game_embed", methods=['POST', 'GET'])
@verify_correct_page
@verify_session_valid
def game_embed():
if request.method == 'POST':
return handle_game_post()
return render_template("game_embed.html")
@unity_example.route("/game_fullscreen", methods=['POST', 'GET'])
@verify_correct_page
@verify_session_valid
def game_fullscreen():
if request.method == 'POST':
return handle_game_post()
return render_template("game_fullscreen.html")
@unity_example.route("/game_custom", methods=['POST', 'GET'])
@verify_correct_page
@verify_session_valid
def game_custom():
if request.method == 'POST':
return handle_game_post()
return render_template("game_custom.html")
@unity_example.route("/fetch_condition")
@verify_session_valid
def fetch_condition():
return str(session['condition'])
|
the-stack_106_24920
|
from functools import wraps, partial
from itertools import product, chain
import itertools
import collections
import copy
import operator
import random
import numbers
import unittest
import torch
import numpy as np
from torch._six import inf
import collections.abc
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, Dict
from torch.testing import \
(make_non_contiguous, floating_types, floating_types_and, complex_types,
floating_and_complex_types, floating_and_complex_types_and,
all_types_and_complex_and, all_types_and, all_types_and_complex,
integral_types_and, all_types, double_types, make_tensor)
from .._core import _dispatch_dtypes
from torch.testing._internal.common_device_type import \
(onlyOnCPUAndCUDA, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver,
skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride, toleranceOverride, tol)
from torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater
from torch.testing._internal.common_utils import \
(is_iterable_of_tensors,
random_symmetric_matrix, random_symmetric_psd_matrix,
make_fullrank_matrices_with_distinct_singular_values,
random_symmetric_pd_matrix, make_symmetric_matrices,
make_symmetric_pd_matrices, random_square_matrix_of_rank,
random_fullrank_matrix_distinct_singular_value,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,
torch_to_numpy_dtype_dict, TEST_WITH_ASAN,
GRADCHECK_NONDET_TOL,)
import torch.testing._internal.opinfo_helper as opinfo_helper
from setuptools import distutils
if TEST_SCIPY:
import scipy.special
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
# Unique value to distinguish default from anything else
_NOTHING = object()
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
class SkipInfo(DecorateInfo):
"""Describes which test, or type of tests, should be skipped when testing
an operator. Any test that matches all provided arguments will be skipped.
The skip will only be checked if the active_if argument is True."""
def __init__(
self, cls_name=None, test_name=None, *, device_type=None, dtypes=None, active_if=True,
expected_failure=False):
"""
Args:
cls_name: the name of the test class to skip
test_name: the name of the test within the test class to skip
device_type: the devices for which to skip the tests
dtypes: the dtypes for which to skip the tests
active_if: whether tests matching the above arguments should be skipped
expected_failure: whether to assert that skipped tests fail
"""
decorator = unittest.expectedFailure if expected_failure else unittest.skip("Skipped!")
super().__init__(decorators=decorator, cls_name=cls_name, test_name=test_name,
device_type=device_type, dtypes=dtypes, active_if=active_if)
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
# op with TensorList inputs do not support method or inplace variants.
assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)
self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
def numpy(self):
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Numbers, strings, and bool are preserved as is
# Lists, tuples and dicts are handled by calling this function recursively
def to_numpy(x):
def _np(t):
return t.detach().cpu().numpy()
if isinstance(x, torch.Tensor):
return _np(x)
elif isinstance(x, list):
return list(map(to_numpy, x))
elif isinstance(x, tuple):
return tuple(map(to_numpy, x))
elif isinstance(x, dict):
return {k: to_numpy(v) for k, v in x.items()}
elif isinstance(x, (numbers.Number, bool, str)):
return x
raise ValueError("Unknown type {0}!".format(type(x)))
sample_np_input, np_args, np_kwargs = to_numpy(self.input), to_numpy(self.args), to_numpy(self.kwargs)
return (sample_np_input, np_args, np_kwargs)
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# This note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do two things:
#
# 1) to simplify testing an operator
# 2) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# Both these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests still have to be written manually.
#
# The utility of OpInfos can also be motivated from a different perspective.
# PyTorch is a complicated framework with many interrelated systems, too
# many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo. This is a helpful perspective to have, because it's often
# surprising to OpInfo writers that just implementing an OpInfo typically can't
# verify an operator is actually implemented correctly. "If an OpInfo doesn't
# validate my op works as expected, what's the point of it?" But the point of
# it is that it lets engineers focus on testing their operator logic instead
# of having to write tests for how the operator interacts with each of
# PyTorch's many systems. And, OK, sometimes it validates your op works
# the way you want and all you have to do is write an OpInfo and you're done
# testing... more on that below.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return a list of SampleInputs (see the class description above).
# Each SampleInput defines an "input", "args", "kwargs",
# an "output_process_fn_grad" function, the "broadcasts_input" bool and
# a "name".
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to the torch, torch.fft, torch.linalg,
# or torch.special namespaces then you should add an OpInfo for it. As
# mentioned a couple times above, implementing an OpInfo is not usually
# sufficient testing (unless the operator is a unary elementwise operator).
# The OpInfo will only test the properties described in the "WHAT'S TESTED"
# section. It DOES NOT verify that the operator is implemented correctly.
#
# We are currently reviewing if operators in the torch.nn.functional namespace
# will be added as OpInfos, but you are encouraged to add an OpInfo for
# such operators, too.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach can be frustrating to writing an OpInfo can
# be frustrating, but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in opinfo_helper.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve, particularly for the
# torch, torch.fft, torch.linalg, and torch.special namespaces, and possibly
# for the torch.nn.functional namespace, too. In addition an analogous class,
# ModuleInfo, will be developed to improve module testing.
#
# We also expect at least two new OpInfo subclasses: BinaryUfuncInfo and
# ReductionInfo. Both will have new automated tests for correctness, too,
# which might make testing binary elementwise operations and reductions as
# simple as testing unary elementwise operations today.
# Classes and methods for the operator database
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
def __init__(self,
name, # the string name of the function
*,
ref=None, # An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
# the following metadata describes the operator, its variants,
# and its aliases, if any
aliases=None, # iterable of aliases, e.g. ("absolute",) for torch.abs
variant_test_name='', # additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
op=None, # the function variant of the operation, populated as torch.<name> if None
method_variant=_NOTHING, # explicitly specifies the method variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
# the following metadata are test directives for skipping or
# modifying tests and a pointer to the op's sample inputs function
# this function lets the OpInfo generate valid inputs
skips=tuple(), # information about which tests to skip
decorators=tuple(), # decorators to apply to generated tests
sample_inputs_func=None, # function to generate sample inputs
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
dtypes=floating_types(), # dtypes this function is expected to work with
# the following dtypesIf... options override the dtypes value
# on their respective device types
dtypesIfCPU=None, # dtypes this function is expected to work with on CPU
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
backward_dtypes=None, # backward dtypes this function is expected to work with
backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU
backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA
backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM
default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with
# these dtypes for the op unless otherwise specified.
# This is helpful in reducing the test matrix.
# the following metadata describes the operators out= support
supports_out=True, # whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments
# the following metadata relates to autograd support
supports_autograd=True, # whether the operation supports gradient computations
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_gradgrad=True, # whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# (this value is ignored if supports_autograd=False)
supports_inplace_autograd=None, # whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_forward_ad=False, # Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck
check_batched_grad=True, # whether to check batched grad when doing gradcheck
check_batched_gradgrad=True, # whether to check batched grad grad when doing gradgradcheck
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
aten_name=None, # name of the corresponding aten:: operator
assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed
autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
# the following metadata relates to sparse support and is used in test_sparse.py
supports_sparse=False, # whether the op supports sparse inputs
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples=True,
test_neg_view=True,
assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape
):
dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
self.name = name
self.ref = ref
self.aten_name = aten_name if aten_name is not None else name
self.variant_test_name = variant_test_name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes
self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCPU if dtypesIfCPU is not None
else dtypes)
self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (
backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None
else backward_dtypes if backward_dtypes is not None
else dtypesIfROCM if dtypesIfROCM is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes
self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA
self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
self.op = op if op else _getattr_qual(torch, self.name)
method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant
# attributes like real, imag are not callable
self.method_variant = method_variant if callable(method_variant) else None
inplace_name = name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \
if inplace_variant is _NOTHING else inplace_variant
self.operator_variant = getattr(operator, name, None)
self.supports_out = supports_out
self.safe_casts_outputs = safe_casts_outputs
self.decorators = (*decorators, *skips)
self.sample_inputs_func = sample_inputs_func
self.assert_autodiffed = assert_autodiffed
self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []
if autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
else:
self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes
# autograd support
self.supports_autograd = supports_autograd
self.supports_inplace_autograd = supports_inplace_autograd
if self.supports_inplace_autograd is None:
self.supports_inplace_autograd = supports_autograd
self.gradcheck_wrapper = gradcheck_wrapper
self.supports_gradgrad = supports_gradgrad
self.supports_forward_ad = supports_forward_ad
self.check_batched_grad = check_batched_grad
self.check_batched_gradgrad = check_batched_gradgrad
self.gradcheck_nondet_tol = gradcheck_nondet_tol
self.gradcheck_fast_mode = gradcheck_fast_mode
self.supports_sparse = supports_sparse
self.aliases = ()
if aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]
self.assert_jit_shape_analysis = assert_jit_shape_analysis
self.test_conjugated_samples = test_conjugated_samples
self.test_neg_view = test_neg_view
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator_variant(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
with torch.no_grad():
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i in range(len(samples)):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
with torch.no_grad():
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypesIfCPU
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypesIfCPU
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_complex_autograd(self, device_type):
if device_type == 'cpu':
return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)
if device_type == 'cuda':
if TEST_WITH_ROCM:
return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)
else:
return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)
else:
return any(dtype.is_complex for dtype in self.backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
def default_test_dtypes(self, device_type):
"""Returns the default dtypes used to test this operator on the device.
Equal to the operator's default_test_dtypes filtered to remove dtypes
not supported by the device.
"""
supported = self.supported_dtypes(device_type)
return (supported if self._default_test_dtypes is None
else supported.intersection(self._default_test_dtypes))
def _generate_reduction_inputs(device, dtype, requires_grad):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], device, dtype, requires_grad=requires_grad)
yield make_tensor([2], device, dtype, requires_grad=requires_grad)
yield make_tensor([2, 3], device, dtype, requires_grad=requires_grad, noncontiguous=True)
yield make_tensor([3, 2, 1, 5], device, dtype, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.genearte_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
inputs: List[SampleInput] = []
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
inputs.append(SampleInput(t, args=args, kwargs=kwargs))
return inputs
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by skipping the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
return sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
super(ReductionOpInfo, self).__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
def sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
return (SampleInput(make_tensor((L,), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)))
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=None,
dtypesIfCUDA=None,
dtypesIfROCM=None,
default_test_dtypes=(
torch.uint8, torch.long, torch.half, torch.bfloat16,
torch.float32, torch.cfloat), # dtypes which tests check by default
domain=(None, None), # the [low, high) domain of the function
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
handles_extremals=True, # whether the op correctly handles extremal values (like inf)
handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
supports_sparse=False,
**kwargs):
super(UnaryUfuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
default_test_dtypes=default_test_dtypes,
sample_inputs_func=sample_inputs_func,
supports_sparse=supports_sparse,
**kwargs)
self.ref = ref
self.domain = domain
self.handles_large_floats = handles_large_floats
self.handles_extremals = handles_extremals
self.handles_complex_extremals = handles_complex_extremals
self.supports_complex_to_float = supports_complex_to_float
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
args_cases = (
# Cases with tensor indices.
(torch.tensor([1, 2, 3]),),
(torch.tensor(1),),
(torch.tensor([1, 2, 3]), 1),
(torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),
# Cases with list of indices.
((2, 4),),
((2, 4), 1),
((2, 4), -1),
# Cases with integer section.
(3,),
(3, 1),
(3, -1),
)
def generator():
for args in args_cases:
yield SampleInput(make_input((S, S, S)), args=args)
return list(generator())
def sample_inputs_linalg_det(op_info, device, dtype, requires_grad):
kw = dict(device=device, dtype=dtype)
inputs = [
make_tensor((S, S), **kw),
make_tensor((1, 1), **kw), # 1x1
random_symmetric_matrix(S, **kw), # symmetric
random_symmetric_psd_matrix(S, **kw), # symmetric_psd
random_symmetric_pd_matrix(S, **kw), # symmetric_pd
random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null
random_square_matrix_of_rank(S, 1, **kw), # rank1
random_square_matrix_of_rank(S, 2, **kw), # rank2
random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value
make_tensor((3, 3, S, S), **kw), # batched
make_tensor((3, 3, 1, 1), **kw), # batched_1x1
random_symmetric_matrix(S, 3, **kw), # batched_symmetric
random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd
random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd
random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values
make_tensor((0, 0), **kw),
make_tensor((0, S, S), **kw),
]
for t in inputs:
t.requires_grad = requires_grad
return [SampleInput(t) for t in inputs]
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank <= size[-1]
with torch.no_grad():
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs = x.lu()
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
assert (matrix.det().abs() < torch.finfo(dtype).eps * torch.linalg.matrix_norm(matrix)).all().item()
matrix.requires_grad_(requires_grad)
return matrix
def sample_generator():
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield make_singular_matrix_batch_base(shape, rank)
return [SampleInput(t) for t in sample_generator()]
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
inputs = []
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(n,)))
for n in [-4, -2, -1]:
t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)
t.requires_grad = requires_grad
inputs.append(SampleInput(t, args=(n,)))
return inputs
def sample_inputs_hsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6,), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_vsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_dsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),
SampleInput(make_tensor((S, S, 6), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),)
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2]
]
result = []
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
tensors.append(t)
result.append(SampleInput(tensors))
return result
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((2, 2), (2, 3, 2))
ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
inputs: List[SampleInput] = []
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(ord, dim, keepdim)))
return inputs
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
yield SampleInput(make_arg(shape))
return list(generator())
def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (2,), '2'),
((S, S), (0,), '0'),
((S, S), (0.5,), '0_5'),
((S, S), (1,), '1'),
((S, S), (3,), '3'),
((S, S), (-1,), 'neg_1'),
((S, S), (-2,), 'neg_2'),
((S, S), (-0.5,), 'neg_0_5'),
((S, S), (-1.5,), 'neg_1_5'),
)
cases_nonzero_input = (
((S, S, S), (1.5,), '1_5_default'),
((S, S, S), (1.5, 1), '1_5_dim'),
((S, S, S), (1.5, -1), '1_5_neg_dim'),
((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),
((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),
)
cases_negdim_base = (
((S, S), (-2, 1,), 'neg_2_2_dim'),
((S, S), (-1, 1,), 'neg_1_2_dim'),
((S, S), (0, 1,), '0_2_dim'),
((S, S), (1, 1,), '1_2_dim'),
((S, S), (2, 1,), '2_2_dim'),
((S, S), (3, 1,), '3_2_dim'),
((S, S, S), (2, 1), '2_dim'),
((S, S, S), (3, 1), '3_dim'),
((S, S, S), (2, 1, True), 'keepdim_2_dim'),
((S, S, S), (3, 1, True), 'keepdim_3_dim'),
((), (2, 0), '2_dim_scalar'),
((), (3, 0), '3_dim_scalar'),
((), (2, 0, True), 'keepdim_2_dim_scalar'),
((), (3, 0, True), 'keepdim_3_dim_scalar'),
)
cases_negdim = []
for case in cases_negdim_base:
cases_negdim.append(case)
shape, args, name = case
new_args = copy.deepcopy(list(args))
new_args[1] *= -1
cases_negdim.append((shape, tuple(new_args), name.replace("_dim", "_neg_dim")))
def generator():
for shape, args, name in itertools.chain(cases, cases_negdim):
yield SampleInput(make_arg(shape), args=args, name=name)
for shape, args, name in cases_nonzero_input:
yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)
return list(generator())
def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (), 'default'),
((S, S), ('fro',), 'fro_default'),
((S, S), ('fro', [0, 1],), 'fro'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), ('nuc',), 'nuc'),
((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (-inf,), '-inf'),
((S, S), (inf,), 'inf'),
((S, S), (inf, 1,), 'inf_2_dim'),
((S, S), (inf, -1,), 'inf_2_neg_dim'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):
size_1D = (S,)
size_2D = (2, 2)
test_cases = [
# input size, ord, dim args
(size_1D, 2, None),
(size_1D, 2, (0,)),
(size_1D, 0, None),
(size_1D, 0, (0,)),
(size_1D, 0.9, None),
(size_1D, 0.9, (0,)),
(size_1D, 1, None),
(size_1D, 1, (0,)),
(size_1D, -2.1, None),
(size_1D, -2.1, (0,)),
(size_1D, inf, None),
(size_1D, inf, (0,)),
(size_1D, -inf, None),
(size_1D, -inf, (0,)),
(size_2D, 2, None),
(size_2D, 2, (0,)),
(size_2D, 2, (-1, 0)),
(size_2D, 0, None),
(size_2D, 0, (0,)),
(size_2D, 0, (-1, 0)),
(size_2D, 0.9, None),
(size_2D, 0.9, (0,)),
(size_2D, 0.9, (-1, 0)),
(size_2D, 1, None),
(size_2D, 1, (0,)),
(size_2D, 1, (-1, 0)),
(size_2D, -2.1, None),
(size_2D, -2.1, (0,)),
(size_2D, -2.1, (-1, 0)),
(size_2D, inf, None),
(size_2D, inf, (0,)),
(size_2D, inf, (-1, 0)),
(size_2D, -inf, None),
(size_2D, -inf, (0,)),
(size_2D, -inf, (-1, 0)),
]
inputs = []
for test_size, ord, dim in test_cases:
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim,
dim=dim)))
return inputs
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self, name, *, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None, **kwargs):
super().__init__(name, **kwargs)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
def _resolve_binay_pwise_kwargs(
op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None
):
"""Resolves default values for :func:`sample_inputs_binary_pwise`.
By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty
dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`
and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.
"""
if op_kwargs is None:
op_kwargs = {}
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs
def sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
*,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binay_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
scalar = make_tensor((), device=device, dtype=dtype, **rhs_make_tensor_kwargs)
if python_scalars:
scalar = scalar.item() # type: ignore[assignment]
shapes = [
((), scalar),
((S,), scalar),
((S, 1), (S,)),
((M, S), scalar),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
]
sample_inputs = []
for shape_lhs, shape_rhs_or_scalar in shapes:
lhs = make_tensor(
shape_lhs,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**lhs_make_tensor_kwargs,
)
if isinstance(shape_rhs_or_scalar, tuple):
# shape
rhs = make_tensor(
shape_rhs_or_scalar,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**rhs_make_tensor_kwargs,
)
broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs
else:
# scalar
rhs = shape_rhs_or_scalar # type: ignore[assignment]
broadcasts_input = False
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input))
return sample_inputs
def sample_inputs_add_sub(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
alpha=1,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binay_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False))
return sample_inputs
def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((1, 2))),
SampleInput(make_arg((2,))),
SampleInput(make_arg(())))
def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):
args_list = (
((S, M), (M, S)),
)
inputs = tuple(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),))
for first_shape, second_shape in args_list)
return inputs
def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)
tests_list = [
((2, 3), (2, 2), (2, 3), False)
]
tests_with_lhs_broadcasting = [
((1,), (2, 2), (2, 3), True),
((), (2, 2), (2, 3), True)
]
test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]
inputs = tuple(SampleInput(make_tensor(shape_a, device, dtype, requires_grad=requires_grad),
args=(make_tensor(shape_b, device, dtype,
requires_grad=requires_grad),
make_tensor(shape_c, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},
broadcasts_input=broadcasts_input)
for shape_a, shape_b, shape_c, broadcasts_input in test_cases)
return inputs
def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):
sample_inputs = []
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
))
if dtype.is_complex:
# dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)
# is tested in test_conj_view (which tests operations with only conjugated input tensor
# -- not conjugated arg tensors)
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
torch.conj(make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
)
))
return sample_inputs
def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (((S,), (S, M), (M,), 1, 1, False),
((S,), (S, M), (M,), 0.2, 0.6, False),
)
test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),
((1,), (S, M), (M,), 0.2, 0.6, True),
((), (S, M), (M,), 1, 1, True),
((), (S, M), (M,), 0.2, 0.6, True),
)
cases = test_cases + test_cases_with_broadcast
def generator():
# addmv performs: beta * M + alpha * (mat @ vec)
for M, mat, vec, beta, alpha, broadcasts_input in cases:
yield SampleInput(make_arg(M), args=(make_arg(mat), make_arg(vec)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting
test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
def generator():
for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:
if dtype.is_complex:
beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)
return list(generator())
def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [(((S, S), (S, S), (S, S)), False),
(((S, S), (S, 1), (1, S)), False),
(((1,), (S, S, 1), (1, S)), True),
(((), (), ()), False),
(((S, S), (), ()), True),
(((), (S, S, 1), (1, S)), True)
]
sample_inputs = []
for input_args, broadcasts_input in test_cases:
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(args[0], args=args[1:], broadcasts_input=broadcasts_input))
sample_inputs.append(SampleInput(args[0], args=args[1:], kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
sample_inputs = []
for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:
args = (make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch1_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch2_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))
if dtype.is_complex:
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),
broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):
input1 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))
input2 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
broadcasts_input=True)
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
input3 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
input4 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha),
broadcasts_input=True)
return (input1, input2, input3, input4)
def sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_xlog1py(self, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
# same shape
yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))
# rhs broadcast
yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))
# all zero `x`
with torch.no_grad():
x = make_arg((S, S))
x.fill_(0)
yield SampleInput(x, args=(make_arg((S, S), low=-1),))
# randomly zero-masked `x`
x = make_arg((S, S))
y = make_arg((S, S), low=-1)
with torch.no_grad():
x[torch.rand(x.shape) > 0.5] = 0
yield SampleInput(x, args=(y,))
# Scalar x
# `input` has to be a tensor
# yield SampleInput(0, args=(make_arg((S, S), low=-1),))
# yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))
# Scalar y
yield SampleInput(make_arg((S, S)), args=(-0.5,))
yield SampleInput(make_arg((S, S)), args=(1.2,))
return list(generator())
def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = ((), (S, S, S), (S,))
def generator():
for shape in cases:
yield(SampleInput(make_arg(shape)))
return list(generator())
def sample_inputs_logsumexp(self, device, dtype, requires_grad):
inputs = (
((), (0,), True),
((S, S), (1,), True),
((S, S), (1,), False)
)
samples = []
for shape, dim, keepdim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim, keepdim)))
return tuple(samples)
def sample_inputs_logcumsumexp(self, device, dtype, requires_grad):
inputs = (
((S, S, S), 0),
((S, S, S), 1),
((), 0),
)
samples = []
for shape, dim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim,)))
return tuple(samples)
def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2, 1, 0.5)),
((S, S, S), (2, -1, 0.5)),
((S, S, S), (1, 2, 3)),
((S, S, S), (float('inf'), 2, 0.5)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((1, 2, 3), (-1, -2)),
((1, 2, 3), (-1, 2)),
((1, 2, 3), (1, -2)),
((1, 2, 3), (1, 2)),
((), (0, 0)),
((1, ), (0, 0)),
((M, M), (0, 1)),
((S, S, S), (2, 0)), )
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always invertible input for linear algebra ops using
random_fullrank_matrix_distinct_singular_value.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# autograd is not supported for inputs with zero number of elements
shapes = ((S, S),
(2, S, S),
(2, 1, S, S), )
def generator():
for shape in shapes:
yield SampleInput(make_arg(shape))
return list(generator())
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(
SampleInput(
make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(shape,)) for size, shape in test_cases)
def sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
(S, S, S),
(S,),
(),
)
sample_inputs = []
for size in test_cases:
tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)
tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)
sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))
sample_inputs.append(SampleInput(tensor1, args=(2,)))
return tuple(sample_inputs)
def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):
small_S = 2
test_cases = (
((S, S, 2), (S, S + 1, 2)),
((S, S), (S, S)),
((S, S, S), (S, S, S)),
((3, 5), (3, 5)),
((2, 3, 5), (2, 3, 5)),
((1, 2, 3), (1, 2, 3)),
((1, 1), (S, 1)),
((0, 5), (4, 5)),
((4, 5), (0, 5)),
((0, 4, 5), (3, 5)),
((4, 5), (0, 3, 5)),
((0, 4, 5), (1, 3, 5)),
((1, 4, 5), (0, 3, 5)),
# Using S here would make this one test take 9s
((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),
((small_S, 1, 1, small_S), (1, small_S, small_S)),
((1, 1, small_S), (small_S, 1, small_S, small_S)),
)
samples = []
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
# FIXME add an override for JIT and revert 0. back to 0
# since it's accepted by eager
for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]:
for t1_size, t2_size in test_cases:
# The args should never be non-contiguous as this is not supported in the backward
samples.append(SampleInput(
make_tensor(t1_size, device, dtype, requires_grad=requires_grad, noncontiguous=False),
args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad, noncontiguous=False), p, cm)))
return samples
def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
cases = (((S, S, S), (1,)),
((), (1,)),
# For requires_grad=False below,
# check https://github.com/pytorch/pytorch/issues/59137
((S, S, S), (make_arg((), requires_grad=False),)))
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, S, S), (S, S, S), False),
((S, S, S), (), False),
((S, S, S), (1,), False),
((S,), (1,), False),
((), (), False),
)
test_cases_lhs_broadcasting = (
((S, 1, S), (S, S, S), True),
((1,), (S, S, S), True),
((1, S), (1, 1, S), True),
((), (0,), True),
((), (S, S, S), True),
)
cases = test_cases + test_cases_lhs_broadcasting
sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
broadcasts_input=broadcasts_input)
for first_shape, second_shape, broadcasts_input in cases)
equal_tensors_non_bool = (
([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),
([[[6, 5]], [[1, -5]]]),
([[2], [-1]]),
([0, -6]),
([3],),
)
equal_tensors_bool = (
([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),
([[[1, 1]], [[1, 0]]]),
([[1], [0]]),
([0, 1]),
([1],),
)
more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool
more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),
args=(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),))
for elements in more_cases)
sample_inputs = [*sample_inputs, *more_inputs]
return tuple(sample_inputs)
def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors, args=(0,)),)
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors),)
def sample_inputs_hypot(op_info, device, dtype, requires_grad):
input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
return (
SampleInput(input, args=(args,)),
)
def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S), 1, S, True, device=device), 0)),
# `indices` broadcast
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),
# `self` broadcast
SampleInput(make_tensor((1, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),
# without `dim` arg
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), )),
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device),)),
)
def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):
test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S, S), {}),
((S, S, S), {'dim': 1}),
((S, S, S), {'dim': 1, 'keepdim': True}),
((), {'dim': 0}),
((), {}),
((), {'dim': 0, 'keepdim': True}),
)
samples: List[SampleInput] = []
for shape, kwargs in test_cases:
samples.append(SampleInput(
make_tensor(shape, device, dtype, requires_grad=requires_grad),
kwargs=kwargs))
return samples
def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((S, S, S), 1, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
args = (make_tensor(size, device, dtype,
low=None, high=None,
requires_grad=requires_grad), 1, dim,
make_tensor(size_prepend, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_prepend else None,
make_tensor(size_append, device, dtype,
low=None, high=None,
requires_grad=requires_grad) if size_append else None)
sample_inputs.append(SampleInput(args[0], args=args[1:]))
return tuple(sample_inputs)
def sample_inputs_histogram(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):
input_tensor = make_arg(size)
weight_tensor = make_arg(size) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = make_arg((bin_ct + 1,))
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_gradient(op_info, device, dtype, requires_grad):
sample_inputs = []
test_cases_float = (
((S,), None, None, 1),
((S,), 2., None, 1),
((S, S), None, None, 2),
((S, S), [2.0, 2.1], None, 1),
((S, S), [2.0, 2.1], (0, 1), 1),
((4, 4, 4), [2., 1.], (0, 1), 2),
)
for size, spacing, dim, edge_order in test_cases_float:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))
test_cases_tensor = (
((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),
((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),
)
for size, coordinates, dim, edge_order in test_cases_tensor:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
coordinates_tensor_list = []
for coords in coordinates:
a = torch.tensor(coords, dtype=dtype, device=device)
coordinates_tensor_list.append(a)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))
return tuple(sample_inputs)
def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, index_variable(2, S, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
test_args = [
([1, 2],),
(slice(0, 3),),
([slice(0, 3), 1],),
([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),
([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),
([slice(None), slice(None), [0, 3]],),
([slice(None), [0, 3], slice(None)],),
([[0, 3], slice(None), slice(None)],),
([[0, 3], [1, 2], slice(None)],),
([[0, 3], ],),
([[0, 3], slice(None)],),
([[0, 3], Ellipsis],),
([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),
(index_variable(2, S, device=device),),
(mask_not_all_zeros((S,)),),
]
return tuple(SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=args)
for args in test_args)
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for accumulate in [False, True]:
# Test with indices arg
inputs.append(SampleInput(
make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(index_variable(2, S, device=device), ),
make_tensor((2, S), device, dtype, low=None, high=None)),
kwargs=dict(accumulate=accumulate)))
# Test with mask arg
mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))
inputs.append(SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(mask, ),
make_tensor((S,), device, dtype, low=None, high=None),),
kwargs=dict(accumulate=accumulate)))
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):
# These testa are pretty much the same as those from index_copy.
# Perhaps merge?
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# non-contiguous target
t_nonctg = t.transpose(0, 1)
# non-contiguous source
s_nonctg = s.transpose(0, 1)
idx = make_arg((S,), dtype=torch.int64, low=0, high=S)
idx_nonctg = make_arg((S,), dtype=torch.int64, low=0, high=S, noncontiguous=True)
samples = [SampleInput(tensor, args=(1, idx, source))
for tensor, idx, source in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg])]
samples.extend(SampleInput(tensor, args=(1, idx, source), kwargs=dict(alpha=a))
for tensor, idx, source, a in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg], [-1, 0, 2]))
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
samples.extend(SampleInput(t, args=(0, idx, s), kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))
return samples
def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def small_3d_unique(dtype, device):
res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)
res = res.to(dtype)
apply_grad(res)
return res
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
samples.append(largesample)
# Test cases for small 3d tensors.
# Imitates legacy tests from test/test_torch.py
t = small_3d_unique(dtype, device)
dims = range(-3, 3)
flag = [True, False]
for dim, descending, stable in product(dims, flag, flag):
# default schema without stable sort
samples.append(SampleInput(t, args=(dim, descending)))
# schema with stable sort, no CUDA support yet
if torch.device(device).type == 'cpu':
samples.append(
SampleInput(t, kwargs=dict(dim=dim, descending=descending, stable=stable))
)
# Test cases for scalar tensor
scalar = torch.tensor(1, dtype=dtype, device=device)
apply_grad(scalar)
samples.append(SampleInput(scalar))
samples.append(SampleInput(scalar, args=(0,)))
samples.append(SampleInput(scalar, args=(0, True)))
# Test cases for stable sort
samples.append(SampleInput(scalar, kwargs=dict(stable=True)))
samples.append(SampleInput(scalar, kwargs=dict(dim=0, stable=True)))
samples.append(SampleInput(scalar, kwargs=dict(dim=0, descending=True, stable=True)))
return samples
def sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):
samples = []
t = make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)
fill_val = torch.tensor(-1 + 1j if t.is_complex() else -1)
# non-contiguous input
t01 = t.transpose(0, 1)
t02 = t.transpose(0, 2)
t12 = t.transpose(1, 2)
idx = index_variable(1, S, device=device)
# non-contiguous index
idx_nonctg = torch.empty_strided((S,), (2,), device=device, dtype=torch.int64)
idx_nonctg.copy_(idx)
for d in range(t.dim()):
for tensor in [t, t01, t02, t12]:
samples.append(SampleInput(tensor, args=(d, idx, fill_val)))
samples.append(SampleInput(tensor, args=(d, -idx - 1, fill_val)))
samples.append(SampleInput(tensor, args=(d, idx_nonctg, fill_val)))
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
index_tensor = partial(torch.tensor, device=device, dtype=torch.long)
def unique_idx(numel, max_idx):
# Generate unique random indices vector of `numel`
# elements in range [0, max_idx).
indices = random.sample(range(max_idx), numel)
return index_tensor(indices)
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))
# Duplicate indices
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))
return samples
def sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_binary_op = (
((S, S, S), (S, S, S),),
((S, S, S), (S,),),
((S,), (S, S, S),),
((S, 1, S), (S, S),),
((S, S), (S, S),),
((), (),),
((S, S, S), (),),
((), (S, S, S),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(make_tensor(other_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),),))
for input_tensor, other_tensor in args_for_binary_op)
return inputs
def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((1, 8, 8, 8), (5, 7)),
((2, 8, 8, 8), (None, 7)),
((1, 8, 4, 3), (5, None)),
((1, 8, 4, 3), (None, None)),
((1, 8, 4, 3), (5)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((2, 1, 4, 5), {'p': 1., 'dim': 2}),
((2, 3, 4, 5), {'p': 2., 'dim': 1}),
((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),
((1, 3, 4, 5), {'p': -1., 'dim': 1}),
((1, 3, 4, 5), {'p': 0., 'dim': -1}),
((), {'p': 1.2, 'dim': 0}),
((2, 3, 4, 5), {}),
((2, 3, 4, 5), {'eps': 1e-4}))
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),
((2, 2, 4, 4), (2, 2, 4, 5), (4,),
{'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),
((1, 1, 4, 5), (1, 1, 4, 3), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 1, 4, 3), (1, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5), (4, 8, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_hardswish(self, device, dtype, requires_grad):
N = 5
# make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]
return tensors
def sample_inputs_interpolate(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
align_corners_options: Tuple[Any, ...] = (None,)
if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):
align_corners_options = (True, False, None)
ranks_for_mode = {
'nearest': [1, 2, 3],
'linear': [1],
'bilinear': [2],
'bicubic': [2],
'trilinear': [3],
'area': [1, 2, 3]
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
args=(shape(S, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(shape(L, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 1.7, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 0.6, mode, align_corners)),
])
return sample_inputs
def sample_inputs_gelu(self, device, dtype, requires_grad):
N = 5
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-3, high=3)) for _ in range(1, N)]
return tensors
def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_reduction_with_dim = (
((S, S, S), (1,),),
((S, S, S), (1, True, ),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args_for_reduction_with_dim)
return inputs
def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
inputs.append(SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
return inputs
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):
test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))
test_interpolations = ['linear', 'midpoint']
inputs = []
for quantiles in test_quantiles:
for t in _generate_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t, args=(quantiles,)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):
# Interpolation kwarg for now is only supported when providing both dim and keepdim
kwargs.setdefault('dim', 0)
kwargs.setdefault('keepdim', False)
for interpolation in test_interpolations:
kwargs['interpolation'] = interpolation
inputs.append(SampleInput(t, args=(quantiles,), kwargs=kwargs))
return inputs
def sample_inputs_reduction_count_nonzero(*args, **kwargs):
"""Sample inputs for count_nonzero"""
samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)
# count_nonzero does not support keepdim yet
for sample in samples:
sample.kwargs.pop('keepdim', None)
return samples
def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),
((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),
((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),
((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),
((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),
((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))
def generator():
for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:
yield SampleInput(make_arg(input_shape),
args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))
# Case with just input_shape and kernel_size
yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))
return list(generator())
def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):
def get_tensor_input(size):
return make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs = []
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1,)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))
return inputs
def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)
arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(arg_a, args=(arg_b,)))
return inputs
def sample_inputs_dist(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))
ps = (2, 4)
def generate_samples():
for size_x, size_y, p in product(sizes, sizes, ps):
yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))
return list(generate_samples())
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape, low=None, high=None, dtype=dtype):
return make_tensor(shape, device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# non-contiguous input
t01 = t.transpose(0, 1)
# non-contiguous input
s01 = s.transpose(0, 1)
# idx is a permutation of 0...S-1 for this function to be deterministic
idx = torch.randperm(S, device=device, dtype=torch.int64)
# non-contiguous index
idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]
# index_copy_ does not support negative indices
# idx_neg = -idx - 1
samples = [SampleInput(tensor, args=(1, idx, source))
for tensor, idx, source in product([t, t01], [idx, idx_nonctg], [s, s01])]
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
return samples
def sample_inputs_mode(op_info, device, dtype, requires_grad):
inputs = []
args = (
((S, S, S), (),),
((S, S, S), (1, ),),
((S, S, S), (1, True, ),),
((), (),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args)
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_put(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs
tgt_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))
src_gen = (make_arg((S,), noncontiguous=not ctg) for ctg in (True, False))
idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]
idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]
idx_neg = -idx - 1
idx_list = [idx, idx_nonctg, idx_neg]
for tgt, idx, src, acc in product(tgt_gen, idx_list, src_gen, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
# Scalar cases
scalar_sizes = [(), (1,)]
tgt_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
src_gen = (make_arg(size) for size in scalar_sizes)
for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
# Empty cases
tgt_sizes = [(0,), (), (1,), (3, 2)]
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
yield SampleInput(input=tgt, args=(idx, src, acc))
return list(gen_inputs())
def sample_inputs_take(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs: take S elements out of S * S
src_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))
idx = make_idx((S,), high=S * S)
idx_nonctg = make_idx((S,), high=S * S, noncontiguous=True)
idx_neg = -idx - 1
idx_list = [idx, idx_nonctg, idx_neg]
for src, idx in product(src_gen, idx_list):
yield SampleInput(input=src, args=(idx,))
# Scalar cases
scalar_sizes = [(), (1,)]
src_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
for src, idx in product(src_gen, idx_gen):
yield SampleInput(input=src, args=(idx,))
# Empty cases
src_sizes = [(0,), (), (1,), (3, 2)]
src_gen = (make_arg(size) for size in src_sizes)
idx = make_idx((0,), high=1)
for src in src_gen:
yield SampleInput(input=src, args=(idx,))
return list(gen_inputs())
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, 1, 2, 3], [3, 2, 1, 0])),
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, -1, -2, -3], [-3, -2, -1, -0]))
)
def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]
shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]
tensors = [make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad) for shape in shapes]
samples = []
for rep_dim, tensor in product(rep_dims, tensors):
for t in (tensor, tensor.T):
if op_info.name == 'repeat' and len(rep_dim) >= t.dim():
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
samples.append(SampleInput(t, args=(rep_dim,),))
elif op_info.name == 'tile':
samples.append(SampleInput(t, args=(rep_dim,),))
return samples
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, S, S), (1, 2, 2)),
((S, S, S), (-1, 2, 2)),
((S, S, S), (1, 0, 0)),
((S, S, S), (-1, 0, 0)),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_axes = [
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 3),
((3, 4, 5), -1),
((3, 4, 5), -3),
((), 0)
]
samples = []
for shape, axis in shapes_and_axes:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(tensor, args=(axis,),))
return samples
def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):
shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))
kernel_sizes = (2, (2, 2), (3, 3))
dilations = (1, 2, (1, 2))
paddings = (0, 1, (1, 1))
strides = (1, 2, (1, 2))
def generator():
cases = product(shapes, kernel_sizes, dilations, paddings, strides)
for shape, kernel_size, dilation, padding, stride in cases:
tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)
yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))
# With default args
yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),
args=((3, 3),))
return list(generator())
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, 1, S, 1), ()),
((1, 1, 1, 1), ()),
((S, 1, S, 1), (1,)),
((S, 1, S, 1), (-1,)),
((S, 1, S, 1), (2,)),
((S, 1, S, 1), (-2,)),
((), (0, )),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
assert mode in ('constant', 'reflect', 'replicate', 'circular')
if mode in ['reflect', 'replicate']:
cases: tuple = ( # ignore
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
elif mode == 'constant':
cases = (
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((1, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((0, 3, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((1, 3, 3), (1, 1, 1, 1, 1, 1)),
((0, 3, 3, 3), (1, 2)),
((0, 3, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((3, 3, 5, 5), (1, 2)),
((3, 3, 5, 5), (0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 2)),
((1, 3, 3, 3, 3), (0, 1)),
((1, 3, 3, 3, 3), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
else: # mode == 'circular'
if dtype == torch.bool:
# test_dtypes fails on ASAN with for the case ab
# runtime error: load of value 190, which is not a valid value for type 'bool'
# Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562
# Reference Issue: https://github.com/pytorch/pytorch/issues/63034
cases = (
((2, 3, 3), (1, 2)),
((1, 3, 3), (1, 2)),
)
else:
cases = (
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
if mode == 'constant':
# Default args
yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))
if mode in ['reflect', 'replicate', 'circular']:
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode))
else: # mode == 'constant'
for pad_value in (1., 2.):
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))
return list(generator())
# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet
# Creates matrices with a positive nonzero determinant
def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):
def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):
u, s, vh = torch.linalg.svd(A, full_matrices=False)
s.clamp_(min=min_singular_value)
A = (u * s.unsqueeze(-2)) @ vh
det = A.det()
if sign is not None:
if A.dim() == 2:
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
samples = []
# cases constructed using make_tensor()
tensor_shapes = (
(S, S),
(1, 1),
(3, 3, S, S),
(3, 3, 1, 1)
)
for shape in tensor_shapes:
t = make_tensor(shape, device=device, dtype=dtype)
d = make_nonzero_det(t).requires_grad_(requires_grad)
samples.append(SampleInput(d))
# cases constructed using:
# 1) make_symmetric_matrices
# 2) make_symmetric_pd_matrices
# 3) make_fullrank_matrices_with_distinct_singular_values
symmetric_shapes = (
(S, S),
(3, S, S),
)
def _helper(constructor, *shape, **kwargs):
t = constructor(*shape, device=device, dtype=dtype)
d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)
samples.append(SampleInput(d))
for shape in symmetric_shapes:
_helper(make_symmetric_matrices, *shape)
_helper(make_symmetric_pd_matrices, *shape)
_helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)
return tuple(samples)
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
nd_tensor = make_tensor((S, S + 1, S + 2), device, dtype, low=None, high=None,
requires_grad=requires_grad)
tensor = make_tensor((31,), device, dtype, low=None, high=None,
requires_grad=requires_grad)
if self.ndimensional:
return [
SampleInput(nd_tensor, kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(s=(8,))),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
else:
return [
SampleInput(nd_tensor, kwargs=dict(n=10, dim=1, norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(norm='ortho')),
SampleInput(nd_tensor, kwargs=dict(n=7)),
SampleInput(tensor),
*(SampleInput(nd_tensor, kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: bool, # Whether dim argument can be a tuple
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
skipCUDAIfRocm,
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref if ref is not None else _getattr_qual(np, name)
self.ndimensional = ndimensional
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCPU=None,
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):
if same_size:
return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = "_foreach_" + name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
safe_casts_outputs=True,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCPU=dtypesIfCPU,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
safe_casts_outputs=safe_casts_outputs,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):
# Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
)
test_cases = (torch.linalg.cholesky(a) for a in inputs)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
out.append(SampleInput(a, kwargs=dict(upper=True)))
return out
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
out = []
for batch in ((), (3,), (3, 3)):
shape = batch + (3, 3)
# NOTE: inputs are not marked with `requires_grad` since
# linalg_lstsq is not differentiable
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = make_tensor(shape, device, dtype, low=None, high=None)
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
samples = (
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
)
return samples
def sample_inputs_ormqr(op_info, device, dtype, requires_grad):
# create a helper function wrapping `make_tensor`
make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def gen_inputs():
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
tf = [True, False]
for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):
reflectors = make_input((*batch, m, n))
tau = make_input((*batch, min(m, n)))
other_matrix_shape = (m, n) if left else (n, m)
other = make_input((*batch, *other_matrix_shape))
kwargs = {"left": left, "transpose": transpose}
yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)
return tuple(gen_inputs())
def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_symeig(op_info, device, dtype, requires_grad=False):
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"upper": bool(np.random.choice([True, False])),
"eigenvectors": True}
# A gauge-invariant function
o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))
return out
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.eigh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.kwargs = {"UPLO": np.random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):
def out_fn(output):
return output[1]
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
for o in out:
o.kwargs = {"hermitian": True}
return out
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):
"""
This function generates always solvable input for torch.linalg.solve
Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
out = []
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)
b.requires_grad = requires_grad
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
return out
def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
batch_shapes = ((), (3,), (3, 3))
for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):
shape = batch_shape + (S + size_delta, S)
input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
yield SampleInput(input, args=(True, get_infos))
return list(generate_samples())
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 3, 0]
nrhs = [0, 1, 6]
def generate_samples():
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
requires_grad_options = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for lu_requires_grad, b_requires_grad in product(requires_grad_options, requires_grad_options):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not lu_requires_grad and not b_requires_grad:
continue
# we run LU several times to guarantee that the produced SampleInputs are independent
# this is especially important when setting different requries_grad for same tensors!
lu, pivs = a.lu()
lu.requires_grad = lu_requires_grad
b = torch.randn(*batch, n, rhs, dtype=dtype, device=device)
b.requires_grad = b_requires_grad
yield SampleInput(b, args=(lu, pivs))
return list(generate_samples())
def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):
lu_data, pivots = lu_sample.input.lu()
yield SampleInput(lu_data, args=(pivots,))
# generate rectangular inputs
lu_data_shape = lu_data.shape
batch_shape = lu_data_shape[:-2]
n = lu_data_shape[-2]
for shape_inc in ((1, 0), (0, 1)):
lu_data, pivots = make_tensor(
batch_shape + (n + shape_inc[0], n + shape_inc[1]),
device, dtype,
requires_grad=False,
low=None, high=None
).lu()
lu_data.requires_grad_(requires_grad)
yield SampleInput(lu_data, args=(pivots,))
return list(generate_samples())
def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((1, (0, 1),),
(1, (1, 2),),
(1, (1, -1),),
())
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):
tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
tensor_1d = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
return [
SampleInput(tensor_nd),
SampleInput(tensor_nd, kwargs=dict(dim=1)),
SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),
SampleInput(tensor_nd, kwargs=dict(dim=(1,), correction=S // 2)),
SampleInput(tensor_nd, kwargs=dict(dim=None, correction=0, keepdim=True)),
]
def _generate_correlation_inputs(device, dtype, requires_grad):
shapes = [(2,), (1, 2), (3, 2), (2, 3)]
for shape in shapes:
yield make_tensor(shape, device, dtype, requires_grad=requires_grad)
def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]
def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_correlation_inputs(device, dtype, requires_grad):
inputs.append(SampleInput(t))
num_observations = t.numel() if t.ndimension() < 2 else t.size(1)
fweights = make_tensor((num_observations,), device, torch.int, low=0, high=10, requires_grad=requires_grad)
aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)
for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):
inputs.append(SampleInput(t, kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))
return inputs
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [((1, 2, 3, 4), (0, 2, 3, 1)),
((1, 2, 3, 4), (0, -2, -1, 1)),
((), ()),
((1, 2, 3, 4), (2, 1, 3, 0))]
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
return list(generator())
# Based on erstwhile method_tests tests & some tensor_op_tests for pow
def sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):
samples = []
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
test_cases = (
((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),
((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, False, False),
((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),
)
tests_require_resizing = (
((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),
((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),
)
cases = test_cases + tests_require_resizing
samples = list(SampleInput(make_tensor(shape_b, low=low_b, high=high_b,
requires_grad=b_grad, device=device,
dtype=dtype) + additive_b,
args=(make_tensor(shape_e, low=low_e, high=high_e,
requires_grad=e_grad, device=device,
dtype=dtype) + additive_e,),
broadcasts_input=broadcasts_input)
for shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,
high_e, additive_e, e_grad, broadcasts_input in cases)
tensor_scalar_inputs = (
((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))
)
more_samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,
high=high, low=low,
requires_grad=b_grad) + additive,
args=exp)
for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)
samples = [*samples, *more_samples]
elif dtype in [torch.complex64, torch.complex128]:
args_tuple = (
((2, 2), 0, 5, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14j,))
)
samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,
high=high, low=low,
requires_grad=b_grad) + 1e-3 * (1 + 1j),
args=arg)
for shape, low, high, b_grad, arg in args_tuple)
elif dtype == torch.bool:
arg_tuple = (0, 1, 1., 2.3)
samples = list(SampleInput(make_tensor((2, 2), device=device, dtype=dtype,
requires_grad=requires_grad),
args=(arg,))
for arg in arg_tuple)
dtypes_list = [torch.float64, torch.float32, torch.int64, torch.int32]
more_samples = list(SampleInput(make_tensor((2, 2), device, dtype=torch.bool,
requires_grad=requires_grad),
args=(make_tensor((2, 2), device, dtype=dtype,
requires_grad=requires_grad),))
for dtype in dtypes_list)
samples = [*samples, *more_samples]
samples.append(SampleInput(make_tensor((2, 2, 2), device, dtype=torch.bool,
requires_grad=requires_grad),
args=(make_tensor((2, 1), device, dtype=torch.float64,
requires_grad=requires_grad),)))
else:
exp_tuple = (1, 2, 3)
samples = list(SampleInput(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),
args=(arg,))
for arg in exp_tuple)
samples.append(SampleInput(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),
args=(make_tensor((2, 2), device, dtype,
requires_grad=requires_grad),)))
return tuple(samples)
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):
eigvecs = make_tensor((S, S), device=device, dtype=dtype,
low=None, high=None)
eigvals = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None)
# we produce only diagonazible inputs which do not have
# complex eigenvalues for real inputs, as there is no
# backward implementation for real inputs with complex
# eigenvalues yet.
input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()
input.requires_grad_(requires_grad)
def process_output(eigpair):
eigvals, eigvecs = eigpair
if dtype.is_complex:
# eig produces eigenvectors which are normalized to 1 norm.
# Note that if v is an eigenvector, so is v * e^{i \phi},
# and |v| = |v * e^{i \phi}| = 1.
# This, however, makes the eigenvector backward computation process
# rather unstable unless the objective function is gauge-invariant,
# that is if f(z) == f(|z|), for example.
# Hence for complex inputs we ignore the phases and return only
# the absolute values.
return eigvals, eigvecs.abs()
else:
return eigvals, eigvecs
return [
SampleInput(
input,
kwargs=dict(eigenvectors=True),
output_process_fn_grad=process_output
),
]
def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):
x = make_tensor((3,), device, dtype, requires_grad=requires_grad)
y = make_tensor((4,), device, dtype, requires_grad=requires_grad)
A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)
C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)
D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)
H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)
I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)
inputs = []
# Vector operations
inputs.append(SampleInput([x], args=('i->',))) # sum
inputs.append(SampleInput([x, y], args=('i,j->ij',))) # outer
# Matrix operations
inputs.append(SampleInput([A], args=("ij->i",))) # col sum
inputs.append(SampleInput([A, B], args=("ij,kj->ik",))) # matmul
inputs.append(SampleInput([A, E], args=("ij,Ab->ijAb",))) # matrix outer product
# Tensor operations
inputs.append(SampleInput([C, D], args=("aij,ajk->aik",))) # batch matmul
inputs.append(SampleInput([D, E], args=("aij,jk->aik",))) # tensor matrix contraction
inputs.append(SampleInput([C, B], args=("ijk,ik->j",))) # non contiguous
# Test diagonals
inputs.append(SampleInput([I], args=('iji->j',))) # non-contiguous trace
# Test ellipsis
inputs.append(SampleInput([H], args=("i...->...",)))
inputs.append(SampleInput([C, x], args=('...ik, ...j -> ij',)))
return inputs
def sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.qr
The input is generated as the itertools.product of 'batches' and 'ns'.
"""
batches = [(), (0,), (2, ), (1, 1)]
ns = [5, 2, 0]
out = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)
out.append(SampleInput(a))
return out
def sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
# TODO: CUDA path doesn't work with batched or empty inputs
if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):
continue
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_flip(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, M, S), (S, 0, M))
all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
def gen_samples():
for size, dims in product(sizes, all_dims):
yield SampleInput(make_arg(size), kwargs={"dims": dims})
return list(gen_samples())
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
if autodiffed:
samples = (
((S, S, S), 1.5, False),
((), 1.5, False),
)
else:
cases = (
((S, S, S), (), False),
((S, S, S), (S, S, S), False),
((S, S, S), (S,), False),
)
# Sample inputs with scalars as torch tensors
cases_with_tensor_scalar = (
((), torch.tensor(1, dtype=dtype, device=device, requires_grad=False), False),
)
# Sample inputs with broadcasting
cases_with_broadcasting = (
((S,), (S, S, S), True),
((S, 1, S), (S, S, S), True),
((), (S, S, S), True),
)
samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]
def generator():
for shape, arg_other, broadcasts_input in samples:
if isinstance(arg_other, tuple):
arg = make_arg(arg_other, requires_grad=False, exclude_zero=True)
else:
# shape_other is scalar or torch.tensor
arg = arg_other
yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))
return list(generator())
# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!
def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):
x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
def detach(tensor):
return tensor.clone().detach_().requires_grad_(requires_grad)
return [
SampleInput(detach(x), args=(lb, ub)),
SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),
SampleInput(detach(x), args=(detach(lb[:, :1]),)),
]
def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
if dtype is torch.uint8:
min_max_vals = ((2, 5), (3, 7))
else:
min_max_vals = ((0, 1), (-1, 1))
output = [SampleInput(tensor, args=vals) for tensor, vals in product(tensors, min_max_vals)]
output += [SampleInput(tensors[0], args=(0.5, None)), SampleInput(tensors[0], args=(None, 0.5))]
empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)
output += [SampleInput(empty_tensor, args=(0.0, 1.0)), ]
return output
def sample_kwargs_clamp_scalar(device, dtype, input):
if dtype is torch.uint8:
min_val, max_val = (random.randint(1, 3), random.randint(4, 8))
elif dtype.is_floating_point:
min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]
else:
min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))
return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))
sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': 1})
return (sample0, sample1)
def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_zeros(dim_select):
assert len(dim_select) == 2
result = make_arg(3 * (S,))
with torch.no_grad():
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
# will not be needed once OpInfo tests suport Iterables
def sample_generator():
for dim in range(3):
yield SampleInput(make_arg((S, S, S)), args=(dim,))
# Scalar tensors and empty tensor
for size in [(), (1,), (0,)]:
yield SampleInput(make_arg(size), args=(0,))
yield SampleInput(prod_zeros([0, 1]), args=(1,))
yield SampleInput(prod_zeros([0, 2]), args=(1,))
yield SampleInput(prod_zeros([1, 2]), args=(1,))
# test dtype kwarg
yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})
return list(sample_generator())
def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]
def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((), device, dtype, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor(*shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
cases = [
# no broadcast
((S, S, S), (S, S, S), False),
# broadcast rhs
((S, S, S), (S, S), False),
# scalar
((S, S), 3.14, False),
# scalar positive zero
((S, S), 0.0, False),
# scalar negative zero
((S, S), -0.0, False),
]
# broadcast lhs
cases.append(((S, S), (S, S, S), True))
# broadcast all
cases.append(((S, 1, S), (M, S), True))
def generator():
for input_shape, arg_val, broadcasts_input in cases:
if isinstance(arg_val, tuple):
arg = _make_tensor(*arg_val)
else:
# arg_val is scalar
arg = arg_val
yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_prod(op_info, device, dtype, requires_grad):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_single_zero():
result = make_arg(2 * (S,))
with torch.no_grad():
result[0, 1] = 0
return result
# will not be needed once OpInfo tests support Iterables
def sample_generator():
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
yield SampleInput(sample.input) # only Tensor, ignore other inputs
yield sample
sample.kwargs['keepdim'] = True
yield sample
yield SampleInput(prod_single_zero())
yield SampleInput(make_arg((3, 3, 3)), args=(1,))
yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})
# test zero scalar tensor
zero = make_arg(())
with torch.no_grad():
zero.zero_()
yield SampleInput(zero)
yield SampleInput(zero, args=(0,))
yield SampleInput(zero, args=(0,), kwargs={'keepdim': True})
return list(sample_generator())
def sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (
((S, S), (S, S), False),
((S, S), (S,), False),
((S, ), (S, S), True)
)
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor, args=arg))
return samples + [vec_sample]
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
yield SampleInput(make_arg(shape), args=arg)
return list(generator())
def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),
SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)
# Used for both log_softmax and softmax
def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [
((S, ), (0, )),
((S, S), (0, )),
((S, S), (1, )),
((S, S), (-1, )),
((S, M, S), (2, )),
]
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != 'xla':
cases.append(((), (0, )))
return [
SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)
for shape, dim in cases
]
def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_isin(op_info, device, dtype, requires_grad):
element = make_tensor((L,), device, dtype, low=None, high=None, requires_grad=requires_grad)
indices = torch.randint(0, L, size=[S])
test_elements = element[indices].clone()
return [
SampleInput(element, args=(test_elements,))
]
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def samples_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),
broadcasts_input=True)
samples = tuple(samples_generator())
return samples
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def sample_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg(())),
broadcasts_input=True)
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
samples = tuple(sample_generator())
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),
)
return samples
def sample_inputs_matmul(op_info, device, dtype, requires_grad):
test_cases = (((L,), (L,)),
((S, M), (M,)),
((M,), (M, S)),
((S, M), (M, S)),
((S, 0), (0, M)),
((S, S, M), (M,)),
((S, S, M), (M, S)),
((S, S, 0), (0, S)),
((M,), (S, M, S)),
((S, M), (S, M, S)),
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
((M,), (S, S, M, S)))
sample_inputs = []
for lhs_shape, rhs_shape in test_cases:
lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
if op_info.name == 'matmul':
sample_inputs.append(SampleInput(lhs, args=(rhs,)))
elif op_info.name == '__rmatmul__':
sample_inputs.append(SampleInput(rhs, args=(lhs,)))
else:
raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'")
return tuple(sample_inputs)
def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,
requires_grad: bool,
*, variant: str) -> List[SampleInput]:
if variant == 'variadic':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors[0], tuple(tensors[1:])
elif variant == 'list':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors, ()
else:
raise ValueError(
'Unsupported variant, must be one of {"variadic", "list"}. '
f'Got "{variant}".')
SCALAR = torch.Size([])
VECTOR = torch.Size([3])
test_cases: List[List[torch.Size]] = [
[SCALAR],
[VECTOR],
[VECTOR, SCALAR],
[VECTOR, SCALAR, VECTOR],
[VECTOR, SCALAR, VECTOR, SCALAR],
]
sample_inputs = []
for shapes in test_cases:
input, args = make_inputs(
[make_tensor(shape, device, dtype, requires_grad=requires_grad)
for shape in shapes])
sample_inputs.append(SampleInput(input=input, args=args))
return sample_inputs
def sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape):
return make_tensor(shape, device, dtype, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
def generator():
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
return list(generator())
def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
# Since the accepted lower bound for input
# to mvlgamma depends on `p` argument,
# the following function computes the lower bound
# which we pass to `make_tensor`.
def compute_min_val(p):
return (p - 1.) / 2
def generator():
for shape, n in product(tensor_shapes, ns):
min_val = compute_min_val(n)
if not dtype.is_floating_point:
# Round-up minimum value for integral dtypes
min_val += 1
yield SampleInput(make_arg(shape, low=min_val), args=(n,))
return list(generator())
# Since `mvlgamma` has multiple entries,
# there are multiple common skips for the additional
# entries. Following function is a helper to that end.
def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
SkipInfo('TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
)
return skips
# To test reference numerics against multiple values of argument `p`,
# we make multiple OpInfo entries with each entry corresponding to different value of p.
# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.
# Class `MvlGammaInfo` already contains the basic information related to the operator,
# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which
# differ between the entries.
class MvlGammaInfo(UnaryUfuncInfo):
def __init__(self, variant_test_name, domain, skips, sample_kwargs):
super(MvlGammaInfo, self).__init__(
'mvlgamma',
ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,
aliases=('special.multigammaln',),
variant_test_name=variant_test_name,
domain=domain,
decorators=(precisionOverride({torch.float16: 5e-2}),),
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half),
sample_inputs_func=sample_inputs_mvlgamma,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=skips,
sample_kwargs=sample_kwargs)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
return (SampleInput(make_tensor((L,), device, dtype,
low=low,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=low,
requires_grad=requires_grad)))
def sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(make_arg((S,), low=2, requires_grad=False),)),
SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(3.,)),
)
return samples
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S,), device, dtype,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
requires_grad=requires_grad)))
if requires_grad and op_info.op == torch.special.i0e:
# NOTE: `i0e`'s first-order gradient is not continous
# at `0`, hence we don't test `i0e` with any input being `0`.
# TODO: Remove this when `make_tensor` supports excluding `0`.
with torch.no_grad():
for sample in samples:
t = sample.input
t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]
elif requires_grad and op_info.op != torch.special.i0e:
# Special Case for gradient
# Sample with `0` in the input
t = make_tensor((S,), device, dtype,
requires_grad=requires_grad)
with torch.no_grad():
t[0] = 0
samples += (SampleInput(t),) # type: ignore[assignment]
return samples
def sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):
filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]
return (SampleInput(input, args=(arg,), kwargs=dict(alpha=alpha))
for (input, arg), alpha in filtered_product)
int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j
if variant == 'tensor':
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),
)
if dtype.is_complex:
alphas = [int_alpha, float_alpha, complex_alpha]
elif dtype.is_floating_point:
alphas = [int_alpha, float_alpha]
else:
alphas = [int_alpha]
args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),
(_make_tensor_helper((S, S)), _make_tensor_helper((S,))),
(_make_tensor_helper(()), _make_tensor_helper(())))
samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]
elif variant == 'scalar':
# Scalar Other
samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),
SampleInput(_make_tensor_helper(()), args=(0.5,)),
SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),
SampleInput(_make_tensor_helper(()), args=(1.5j,)),
SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),
SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))
scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),
(_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),
(_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]
alphas = [int_alpha, float_alpha, complex_alpha]
def filter_fn(arg_alpha):
arg, alpha = arg_alpha
if isinstance(alpha, complex):
if dtype.is_complex or isinstance(arg[1], complex):
return True
else:
# complex alpha is valid only if either `self` or `other` is complex
return False
# Non-Complex Alpha
return True
# Samples with alpha (scalar version) covers the following cases
# self | other | alpha
# -----------------------------------------
# real | real | real (int and float)
# real | complex | real and complex
# complex | real | real and complex
# complex | complex | real and complex
#
# It does not cover
# real | real | complex
# x = torch.randn(2, requires_grad=True, dtype=torch.float64)
# torch.rsub(x, 1, alpha=1. + 1.6j)
# RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)
samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]
else:
raise Exception("Invalid variant!")
return samples
def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),
SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),
SampleInput(_make_tensor_helper(()), args=(0,)),
]
if supports_dtype_kwargs:
# NOTE: if `dtype` is not same as input, then inplace variants fail with
# `provided dtype must match the dtype of self tensor in cumsum`
samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))
return samples
def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((), (0, 1, 1)),
((S, S, S, S), (0, 3, 1)),
((S, S, S, S), (1, 3, 1)),
((S, S, S, S), (2, 3, 1)),
((S, S, S, S), (3, 3, 1)),
((S, S, S, S), (0, 3, 2)),
((S, S, S, S), (1, 3, 2)),
((S, S, S, S), (2, 3, 2)),
((S, S, S, S), (3, 3, 2)),
((S, S, S, S), (0, 4, 1)),
((S, S, S, S), (1, 4, 1)),
((S, S, S, S), (2, 4, 1)),
((S, S, S, S), (3, 4, 1)),
((M,), (0, 3, 1)),
((M,), (0, 3, 2)),
((M,), (0, 3, 3)),
((1000,), (0, 3, 11)),
((1000,), (0, 2, 27)),
((10, 10), (0, 1, 2)),
((10, 10), (1, 2, 3)),
((10, 10), (1, 2, 2)),
((S, S, S), (2, 3, 2)),
)
sample_inputs = []
for shape, arguments in test_cases:
sample_inputs += [SampleInput(make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=arguments)]
return sample_inputs
def sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S, S), (S, S, S), False),
((), (), False),
((S, S, S), (S,), False),
((S,), (S, S, S), True),
((S, 1, S), (S, S), True),
)
def generator():
for x_shape, y_shape, broadcasts_input in cases:
yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if list_args:
cases = (
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)
)
else:
cases = ( # type: ignore[assignment]
((S, S, S), (2,)),
((S, S, S), (S, 1)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_msort(op_info, device, dtype, requires_grad):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
sample = SampleInput(make_tensor((S, M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))
return [largesample, sample]
def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = (
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),
# broadcast rhs with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),
# broadcast rhs and weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),
# broadcast_lhs
SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# scalar broadcast_lhs
SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# tensor broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),
broadcasts_input=True),
)
if dtype.is_complex:
samples = samples + ( # type: ignore[assignment]
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),
SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),
)
return samples
def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):
cases = (
((2, 2, 2), (2, 2, 2), (2)),
((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),
)
samples = []
for first_shape, second_shape, dims in cases:
samples.append(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
kwargs=dict(dims=dims,)))
return tuple(samples)
def sample_inputs_kron(op_info, device, dtype, requires_grad):
test_cases = (
((S, S), (M, L)),
)
sample_inputs = []
for input_shape, other_shape in test_cases:
input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample = SampleInput(input, args=(other,))
sample_inputs.append(sample)
return tuple(sample_inputs)
def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
)
),
SampleInput(
make_tensor((), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
)
),
)
def sample_inputs_scatter(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
(_tensor(()), (0, zero.clone().detach(), 2.5)),
)
samples = []
for tensor, args in test_cases:
samples.append(SampleInput(tensor, args=args))
if not requires_grad:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'add'}
))
if dtype.is_floating_point:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'multiply'}
))
return samples
def sample_inputs_scatter_add(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
)
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),)
return samples
def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((M, M), ()),
((M, M), (2,),),
((S, M, M), ()),
((S, M, M), (2,)),
((3, 3, S, S), ()),)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, M, S)))
yield SampleInput(make_arg(()))
return list(generator())
def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, S)))
yield SampleInput(make_arg((S, S), noncontiguous=True))
return list(generator())
def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1, 1)),
)
def generator():
for shape, args_or_shape in cases:
# Update `args` based on operator
if op_info.name == 'resize_':
# resize_ takes shape/tuple of ints,
args = (args_or_shape, )
elif op_info.name == 'resize_as_':
# resize_as_ takes another tensor
args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]
else:
raise ValueError("sample_inputs_resize_ops is being used with incorrect operator")
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S * S, S)),
((S * S, S), (S, S, S)),
((S,), (S,)),
((), ()),
((), (1,)))
def generator():
for case in cases:
shape, args = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(args, )))
if op_info.name != "view" and len(shape) >= 2:
yield(SampleInput(inp.transpose(0, 1), args=(args, )))
return list(generator())
def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for case in cases:
shape, shape_other = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))
if op_info.name != "view_as" and len(shape) >= 2:
yield(SampleInput(inp.transpose(0, 1), args=(make_arg(shape_other, requires_grad=False),)))
return list(generator())
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (1, 2)),
((S, S, S), (-1, 2)),
((S, S, S), (-1, -1)),
((S, S, S), (1, -1)),
((S,), (0, 2))
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
scalar: Union[int, float, complex] = 3
if dtype.is_floating_point:
scalar = 3.14
elif dtype.is_complex:
scalar = 3.14j
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),
SampleInput(_make_tensor_helper(()), args=(scalar,)),
]
return samples
def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=(args, )))
return list(generator())
def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, 1, 1), (S, S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for shape, shape_other in cases:
yield(SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(shape_other, requires_grad=False), )))
return list(generator())
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def make_bool_mask(shape):
# Make sure atleast one element is nonzero,
# except for empty tensor
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
if mask_t.numel() == 0:
return mask_t
elif mask_t.numel() == 1:
mask_t.fill_(True)
return mask_t
if mask_t.sum() == 0:
def random_index(shape):
return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))
mask_t[random_index(mask_t.shape)] = True
return mask_t
return mask_t
cases = (((M, M), (M, M), (M, M), False),
((M, 1, M), (M, M), (M, M, 1), True),
((), (), (), False),
((M, 1, M), (), (M, M, 1), True),
((), (M, M), (), True),)
def generator():
for shape, mask_shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape),
args=(make_bool_mask(mask_shape), make_arg(other_shape)),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (2,)),
((S, S, S), (S, 1)),
((S, S, S), (S, -1)))
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
test_cases = [
(_tensor((S, S, S)), (2,)),
(_tensor((S, S, S)), (2, 1,)),
(_tensor((S, S, S)), (2, -1,)),
(_tensor((S, S, S)), (2, 1, True,)),
(_tensor((S, S, S)), (2, -1, True,)),
(_tensor((S,)), (2, 0,)),
(_tensor((S,)), (2, 0, True,)),
(_tensor(()), (1,)),
(_tensor(()), (1, 0,)),
(_tensor(()), (1, 0, True))
]
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)
shapes = ((), (S,), (L, M, S))
num_classess = (-1, 10)
return [
SampleInput(
make_input(
shape,
low=0,
high=10 if num_classes == -1 else num_classes // 2,
),
kwargs=dict(num_classes=num_classes),
)
for shape, num_classes in itertools.product(shapes, num_classess)
]
def sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input()),
SampleInput(make_input(), kwargs=dict(beta=3)),
SampleInput(make_input(low=1), kwargs=dict(threshold=1)),
]
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
def make_input():
input = make_fullrank_matrices_with_distinct_singular_values(12, 12, device=device, dtype=dtype)
return input.requires_grad_(requires_grad)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
return [
SampleInput(make_input().reshape(*shape_lhs, *shape_rhs), kwargs=dict(ind=len(shape_lhs)))
for shape_lhs, shape_rhs in shapes
]
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes_and_kwargs = [
((), None),
((S,), dict(reduction="mean")),
((S,), dict(reduction="sum")),
((S,), dict(reduction="none")),
((S, S), None),
((S, S, S), None),
]
return [
SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)
for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batch_size = 2
num_channels = 3
modes = ("bilinear", "nearest")
align_cornerss = (False, True)
padding_modes = ("zeros", "border", "reflection")
sample_inputs = []
for dim in (2, 3):
input = _make_tensor((batch_size, num_channels, *[S] * dim))
grid = _make_tensor((batch_size, *[S] * dim, dim))
modes_ = (*modes, "bicubic") if dim == 2 else modes
for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):
sample_inputs.append(
SampleInput(
input,
args=(grid,),
kwargs=dict(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
)
)
return sample_inputs
def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
batch_size, num_classes = shape = (2, 3)
input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [
((*shape, 1), dict()),
((*shape, 1, 2), dict()),
((*shape, 1, 2, 3), dict()),
(shape, dict(weight=make_tensor((num_classes,), device=device, dtype=dtype).abs())),
(shape, dict(ignore_index=num_classes // 2)),
(shape, dict(reduction="sum")),
(shape, dict(reduction="mean")),
]
sample_inputs = []
for input_shape, kwargs in input_shape_and_kwargs:
input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)
target = make_tensor(
(batch_size, *input_shape[2:]),
low=0,
high=num_classes,
device=device,
dtype=torch.long,
requires_grad=requires_grad
)
sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))
return sample_inputs
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo('exp'),
ForeachFuncInfo('acos'),
ForeachFuncInfo('asin'),
ForeachFuncInfo('atan'),
ForeachFuncInfo('cos'),
ForeachFuncInfo('cosh'),
ForeachFuncInfo('log'),
ForeachFuncInfo('log10'),
ForeachFuncInfo('log2'),
ForeachFuncInfo('tan'),
ForeachFuncInfo('tanh'),
ForeachFuncInfo('sin'),
ForeachFuncInfo('sinh'),
ForeachFuncInfo(
'neg',
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
sample_inputs_func=sample_inputs_foreach,
safe_casts_outputs=False,
),
ForeachFuncInfo(
'sqrt',
dtypes=floating_types(),
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
),
ForeachFuncInfo(
'ceil',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erf',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erfc',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'expm1',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'floor',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'log1p',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'round',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'frac',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'reciprocal',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'sigmoid',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'trunc',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'abs',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
safe_casts_outputs=False,
supports_forward_ad=True,
),
]
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"sub",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"mul",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
ForeachFuncInfo(
"div",
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
"addcdiv",
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
]
foreach_minmax_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"maximum",
dtypesIfCPU=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
ForeachFuncInfo(
"minimum",
dtypesIfCPU=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
max_ = np.maximum(x.dtype.type(0), -x)
z = np.exp(-max_) + np.exp(-x - max_)
return -(max_ + np.log(z))
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype.
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
return scipy.special.polygamma(n, x).astype(np_dtype)
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.conj().transpose(-2, -1), *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
"""Gradcheck wrpper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
"""
return op(input.triu() if upper else input.tril(), upper)
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
),
supports_inplace_autograd=False,
assert_autodiffed=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cuda" not implemented for 'BFloat16'
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble]),
)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False,
supports_forward_ad=True),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_binary_pwise),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=floating_and_complex_types_and(torch.float16),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=floating_and_complex_types_and(torch.float16),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),
OpInfo('addmv',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
backward_dtypesIfROCM=floating_types_and(torch.half),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_reference_testing')],
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# addbmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/55907
SkipInfo('TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=floating_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater else [],
torch.complex64, torch.complex128),
supports_forward_ad=True,
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# baddbmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_baddbmm),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# bmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
skips=(
# bmm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_inplace_autograd=False,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types(),
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_forward_ad=True,
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Complex gradcheck tests asinh at points 0 + ix for x > 1 which are points
# where asinh is not differentiable
SkipInfo('TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
OpInfo('atan2',
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_atan2,
),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
)),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('bitwise_and',
dtypes=integral_types_and(torch.bool),
supports_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
supports_autograd=False),
OpInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist,
),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
# RuntimeError: torch.cholesky: U(1,1) is zero, singular U.
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),)),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_types(),
# TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs
# with complex dtype.
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True
# for complex tensors
SkipInfo('TestCommon', 'test_dtypes'),
# cholesky_inverse does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),)),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_chunk,
supports_out=False),
OpInfo('clone',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_clone,
supports_forward_ad=True,
supports_out=False),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_contiguous,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('symeig',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_symeig,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
# NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors
OpInfo('clamp',
aliases=('clip',),
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_clamp),
UnaryUfuncInfo('clamp',
variant_test_name='scalar',
aliases=('clip', ),
decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),
ref=np.clip,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/54841
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
),
sample_kwargs=sample_kwargs_clamp_scalar,
sample_inputs_func=sample_inputs_clamp_scalar),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_sparse=True,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex),
OpInfo('complex',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_complex,
supports_forward_ad=True,
),
OpInfo('copysign',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_copysign,
supports_inplace_autograd=False,
supports_forward_ad=True,
),
OpInfo('corrcoef',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_corrcoef,
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_cov,
supports_out=False,
supports_forward_ad=True,
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit'),)),
OpInfo('cross',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True,
skips=(
# AssertionError: UserWarning not triggered :
# Resized a non-empty tensor but did not warn about it.
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('cumsum',
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumsum does not handle correctly out= dtypes
SkipInfo('TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumprod does not handle correctly out= dtypes
SkipInfo('TestCommon', 'test_out',
dtypes=[torch.float32]),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
OpInfo('diff',
op=torch.diff,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diff),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_forward_ad=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="trunc"),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="floor"),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_binary_pwise,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
skips=(
# Because expand does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
supports_forward_ad=True,
supports_out=False),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_expand_as,
skips=(
# Because expand_as does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
supports_out=False),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('eq',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('fmod',
ref=np.fmod,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
OpInfo('remainder',
ref=np.remainder,
dtypesIfCPU=all_types_and(torch.float16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('remainder',
ref=np.remainder,
variant_test_name='autodiffed',
dtypesIfCPU=all_types_and(torch.float16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
# Reference for disabling extremals
# https://github.com/pytorch/pytorch/issues/51948
handles_extremals=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
ref=np.fft.fftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
ref=np.fft.hfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
ref=np.fft.rfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
ref=np.fft.rfftn,
ndimensional=True,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
ref=np.fft.ifft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
ref=np.fft.ifftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
ref=np.fft.ihfft,
ndimensional=False,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
ref=np.fft.irfft,
ndimensional=False,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
ref=np.fft.irfftn,
ndimensional=True,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else _NOTHING,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_i0_i1),
UnaryUfuncInfo('special.i0e',
aten_name='special_i0e',
ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 3e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1',
aten_name='special_i1',
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float: 1e-4}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1e',
aten_name='special_i1e',
ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.ndtr',
aten_name='special_ndtr',
decorators=(precisionOverride({torch.bfloat16: 5e-3,
torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),
safe_casts_outputs=True),
BinaryUfuncInfo('floor_divide',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half),
dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[skipCUDAIfRocm],
supports_out=False,
supports_forward_ad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
SkipInfo('TestUnaryUfuncs', 'test_batch_vs_slicing'),
SkipInfo('TestUnaryUfuncs', 'test_contig_vs_every_other'),
SkipInfo('TestUnaryUfuncs', 'test_contig_vs_transposed'),
SkipInfo('TestUnaryUfuncs', 'test_non_contig_expand'),
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
OpInfo('ge',
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
dtypesIfCPU=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_geqrf,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('gt',
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=(
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient),
OpInfo('inverse',
op=torch.inverse,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('isin',
dtypesIfCPU=all_types(),
dtypesIfCUDA=all_types_and(torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin),
OpInfo('kthvalue',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kthvalue),
OpInfo('le',
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('linalg.det',
op=torch.linalg.det,
aliases=('det', ),
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False),
OpInfo('linalg.det',
op=torch.linalg.det,
variant_test_name='singular',
aliases=('det', ),
dtypes=double_types(),
backward_dtypes=double_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed
# Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570
SkipInfo('TestGradients', 'test_fn_grad', device_type='cuda', dtypes=(torch.complex128,)),
SkipInfo('TestCommon', 'test_dtypes'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
# This test fails because singular inputs cannot be reliably
# generated unless we're using double types
SkipInfo('TestOpInfo', 'test_unsupported_dtypes'),
SkipInfo('TestOpInfo', 'test_unsupported_backward',
dtypes=(torch.float32, torch.complex64,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
dtypes=floating_and_complex_types(),
# TODO: RuntimeError: While computing batched gradients,
# got: vmap: Calling Tensor.as_strided is not supported
# unless the batch dims being vmapped over are at the front of the tensor (in memory layout).
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
# RuntimeError: torch.linalg.cholesky: U(1,1) is zero, singular U.
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.cond',
aten_name='linalg_cond',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.eig',
aten_name='linalg_eig',
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eig,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigvals',
aten_name='linalg_eigvals',
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigh',
aten_name='linalg_eigh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.eigvalsh',
aten_name='linalg_eigvalsh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('linalg.householder_product',
aten_name='linalg_householder_product',
op=torch.linalg.householder_product,
aliases=('orgqr', ),
dtypes=floating_and_complex_types(),
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
op=torch.linalg.lstsq,
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
supports_autograd=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_matrix_power',
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
sample_inputs_func=sample_inputs_linalg_matrix_power,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('linalg.multi_dot',
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name='linalg_multi_dot',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('linalg.norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_norm,
aten_name='linalg_norm',
skips=(
# linalg.norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
dtypes=floating_and_complex_types(),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
# linalg.matrix_norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('linalg.qr',
aten_name='linalg_qr',
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_qr,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_slogdet,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.vector_norm',
op=torch.linalg.vector_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_vector_norm,
aten_name='linalg_vector_norm',
skips=(
# linalg.vector_norm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat, torch.cdouble]),
)),
OpInfo('logaddexp',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
OpInfo('logaddexp2',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_autograd=False,
skips=(
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
SkipInfo('TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
OpInfo('lt',
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_lu_solve,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
supports_out=True,
sample_inputs_func=sample_inputs_lu_unpack,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# cuda gradchecks are slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),
)),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_fill,
supports_forward_ad=True,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
supports_forward_ad=True,
supports_out=False),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_masked_select),
OpInfo('matrix_exp',
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_matrix_exp,
supports_out=False,
),
OpInfo('matmul',
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_matmul,
skips=(
# matmul does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
SkipInfo('TestCommon', 'test_conj_view', device_type='cpu'),
)),
OpInfo('max',
op=torch.max,
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,),
OpInfo('max',
op=torch.max,
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True,
skips=(
# max does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),)),
OpInfo('max',
op=torch.max,
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('median',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of median do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# TODO: review with var_mean tests in test_autograd.py
SkipInfo('TestJit', 'test_variant_consistency_jit'),
SkipInfo('TestGradients', 'test_fn_grad'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
SkipInfo('TestGradients', 'test_forward_mode_AD'))),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# TODO: fix along with var_mean autograd tests
SkipInfo('TestJit', 'test_variant_consistency_jit'),
SkipInfo('TestGradients', 'test_fn_grad'),
SkipInfo('TestGradients', 'test_fn_gradgrad'),
SkipInfo('TestGradients', 'test_forward_mode_AD'))),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
# Our implementation corresponds to "ij" indexing for
# numpy.meshgrid, but its default value is "xy".
ref=lambda *tensors: np.meshgrid(*tensors, indexing='ij'),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
SkipInfo('TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_forward_ad=True),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_forward_ad=True),
OpInfo('min',
op=torch.min,
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,),
OpInfo('min',
op=torch.min,
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True,
skips=(
# min does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('min',
op=torch.min,
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
# TODO(@heitorschueroff) Add test for dtype kwarg
OpInfo('mean',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_reduction,
# Need to skip out test because one of the overload for mean does not support it
# TODO(@heitorschueroff) fix this when implementing ReductionInfo
skips=(SkipInfo('TestCommon', 'test_out'),)),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile),
OpInfo('maximum',
op=torch.maximum,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('minimum',
op=torch.minimum,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo('softmax',
aliases=('nn.functional.softmax',),
aten_name='softmax',
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True,
supports_out=False),
OpInfo('softmax',
aliases=('nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_out=False),
OpInfo('nn.functional.normalize',
dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize,
skips=(
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(onlyOnCPUAndCUDA,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax,
skips=(
# FIXME: aminmax does not check for safe casting to output
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.relu',
aten_name="relu",
supports_autograd=True,
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
dtypesIfCPU=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# op name not found in JIT graph
# There are multiple aten ops, namely reflection_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# op name not found in JIT graph
# There are multiple aten ops, namely replication_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
check_batched_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_types_and(torch.half),
sample_inputs_func=sample_inputs_nn_unfold,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
dtypesIfCPU=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
dtypes=floating_types(),
sample_inputs_func=sample_inputs_leaky_relu,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_out=False,
dtypesIfCPU=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_avgpool2d),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
ref=reference_logsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
# autodiff_nonfusible_nodes=["aten::log_sigmoid"],
decorators=[
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_normal'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_hard'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
],
),
OpInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_nextafter),
OpInfo('topk',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_topk,
skips=(
# Topk is not raising a warning when the out is resized
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('nn.functional.hardshrink',
aten_name="hardshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
OpInfo('nn.functional.hardtanh',
aten_name="hardtanh",
dtypesIfCPU=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),
backward_dtypesIfCPU=all_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"],
),
OpInfo('nn.functional.gelu',
aten_name="gelu",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::gelu"]),
OpInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mm,
skips=(
# mm does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mode,),
MvlGammaInfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
OpInfo('ne',
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_narrow),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
assert_autodiffed=True,),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_dist,
skips=(
# dist does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
OpInfo('permute',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_permute),
OpInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
# Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_pow,
supports_inplace_autograd=False,
supports_forward_ad=True,
assert_autodiffed=True,
),
OpInfo('float_power',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_pow,
supports_forward_ad=True,
skips=(
SkipInfo('TestMathBits', 'test_conj_view', device_type='cuda'),),),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr,
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('roll',
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_roll),
OpInfo('rot90',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_rot90),
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.complex64])
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_out=False,
assert_autodiffed=True),
OpInfo('split',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_out=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_split_with_sizes,
supports_out=False,
assert_autodiffed=True),
OpInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::add'],),
OpInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
supports_forward_ad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
OpInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
OpInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=floating_types(),
dtypesIfCPU=all_types_and_complex(),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],
torch.complex64, torch.complex128),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],
torch.complex64, torch.complex128),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_matmul,
supports_out=False,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=all_types_and(torch.bfloat16, torch.half),
dtypesIfCPU=floating_types_and(torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
OpInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_forward_ad=True,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
OpInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_tensor',
supports_out=False,
supports_inplace_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=[torch.cfloat, torch.cdouble]),
),
sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_scalar',
supports_out=False,
supports_inplace_autograd=False,
sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),
assert_autodiffed=True,),
OpInfo('select',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_autograd=False,),
OpInfo('solve',
op=torch.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('std',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: std does support out in some signatures
supports_out=False,
assert_autodiffed=True,
),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "tanh_backward_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
supports_out=False,
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half),
dtypesIfCPU=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool),
dtypesIfCPU=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
handles_complex_extremals=False),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16])),
safe_casts_outputs=True,
handles_complex_extremals=False),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.inv_ex',
aten_name='linalg_inv_ex',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_complex_to_float=True),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('eig',
op=torch.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_eig,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
skipCUDAIfRocm
],),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
supports_out=False,
sample_inputs_func=sample_inputs_einsum,
skips=(
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svdvals',
op=torch.linalg.svdvals,
aten_name='linalg_svdvals',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svdvals,
check_batched_gradgrad=False,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack]),
OpInfo('polar',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_polar),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Probably related to the way the function is
# scripted for JIT tests (or maybe not).
# RuntimeError:
# Arguments for call are not valid.
# The following variants are available:
# aten::polygamma(int n, Tensor self) -> (Tensor):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# The original call is:
# File "<string>", line 3
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo('special.polygamma',
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name='special_polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Probably related to the way the function is
# scripted for JIT tests (or maybe not).
# RuntimeError:
# Arguments for call are not valid.
# The following variants are available:
# aten::polygamma(int n, Tensor self) -> (Tensor):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):
# Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.
# The original call is:
# File "<string>", line 3
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_1',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_2',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_3',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_4',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
SkipInfo('TestGradients'),
SkipInfo('TestJit'),
SkipInfo('TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4})),
OpInfo('ravel',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_reshape,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_reshape_as,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Because view does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_inputs_func=sample_inputs_view_reshape,
),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Because view_as does not have a function variant.
SkipInfo('TestJit', 'test_variant_consistency_jit'),),
sample_inputs_func=sample_inputs_view_as_reshape_as,
),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_fill),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_copy,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_index_select,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_index_add,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=False,
op=torch.Tensor.__getitem__,
sample_inputs_func=sample_inputs_getitem,
skips=(SkipInfo('TestJit', 'test_variant_consistency_jit'),)),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_sort,
skips=(
# sort does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter,),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_add,
supports_out=False),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True,
skips=(
# stack does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),),),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# hstack does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),),),
OpInfo('hypot',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hypot,
),
OpInfo('histogram',
dtypes=_dispatch_dtypes(), # histogram is only implemented on CPU
dtypesIfCPU=floating_types(),
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
SkipInfo('TestJit', 'test_variant_consistency_jit'),),),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# vstack does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
SkipInfo('TestJit', 'test_jit_alias_remapping'))),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# dstack does not correctly warn when resizing out= inputs
SkipInfo('TestCommon', 'test_out'),)),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
check_batched_gradgrad=False,
skips=(
# torch.unfold does not exist so we get a RuntimeError.
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
# Skip operator schema test because this is a functional and not an operator
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
check_batched_gradgrad=False,
skips=(
# msort does not correctly warn when resizing out= inputs.
SkipInfo('TestCommon', 'test_out',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# torch.repeat does not exist so we get a RuntimeError.
SkipInfo('TestJit', 'test_variant_consistency_jit',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_repeat_tile),
OpInfo('squeeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_squeeze),
OpInfo('fill_',
op=lambda x, scalar: torch.fill_(x.clone(), scalar),
method_variant=None,
inplace_variant=torch.Tensor.fill_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_fill_),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=None,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_cumulative_trapezoid),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=sample_unsqueeze),
OpInfo('var',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: revisit, some var signatures do support out (see std, too)
supports_out=False,
assert_autodiffed=True,
),
OpInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_xlogy),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('special.xlog1py',
aten_name='special_xlog1py',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_xlog1py),
OpInfo('special.zeta',
aten_name='special_zeta',
dtypes=all_types_and(torch.bool),
supports_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_binary_pwise),
# OpInfo entry to verify the gradient formula of `other`/`q`
OpInfo('special.zeta',
op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
aten_name='special_zeta',
variant_test_name='grad',
dtypes=all_types_and(torch.bool),
supports_autograd=True,
safe_casts_outputs=True,
skips=(
# Lambda doesn't work in JIT test
SkipInfo("TestJit", "test_variant_consistency_jit"),
),
sample_inputs_func=sample_inputs_zeta),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
aliases=('swapdims', 'swapaxes'),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kron),
OpInfo('inner',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Currently failing due to an INTERNAL_ASSERT_FAILED error.
# Reference: https://github.com/pytorch/pytorch/issues/56314
SkipInfo("TestJit", "test_variant_consistency_jit", dtypes=[torch.float32]),
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
SkipInfo('TestCommon', 'test_dtypes'),
# JIT has issue when op is passed as lambda
SkipInfo('TestJit', 'test_variant_consistency_jit'),
)
),
OpInfo('logcumsumexp',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', ),
ref=reference_sigmoid if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# TODO: FIXME: sigmoid fails on complex inputs that require grad
SkipInfo('TestCommon', 'test_dtypes'),
# Reference: https://github.com/pytorch/pytorch/issues/56012
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.complex64]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.complex64]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('special.entr',
ref=scipy.special.entr if TEST_SCIPY else _NOTHING,
aten_name='special_entr',
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-1,
torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_entr),
UnaryUfuncInfo('special.ndtri',
ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aten_name='special_ndtri',
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else _NOTHING,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
)),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else _NOTHING,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
safe_casts_outputs=True),
OpInfo(
'logdet',
supports_out=False,
sample_inputs_func=sample_inputs_logdet,
decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True),
UnaryUfuncInfo('logit',
ref=scipy.special.logit if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit,
safe_casts_outputs=True),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other: torch.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
supports_out=False,
skips=(
# test does not work with passing lambda for op
SkipInfo('TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo('norm',
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
)
),
OpInfo('norm',
variant_test_name='nuc',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
variant_test_name='fro',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
SkipInfo('TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
variant_test_name='inf',
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# following 3 tests failed intermittenly
SkipInfo('TestJit', 'test_variant_consistency_jit',
device_type='cpu', dtypes=(torch.complex64,)),
SkipInfo('TestGradients', 'test_fn_grad',
device_type='cpu', dtypes=(torch.complex128,)),
SkipInfo('TestGradients', 'test_fn_gradgrad',
device_type='cpu', dtypes=(torch.complex128,)),
)
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,),
UnaryUfuncInfo('special.erfcx',
ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,
aten_name='special_erfcx',
decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.softplus",
ref=reference_softplus,
sample_inputs_func=sample_inputs_softplus,
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
skips=(
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
),
OpInfo(
"nn.functional.mse_loss",
ref=reference_mse_loss,
sample_inputs_func=sample_inputs_mse_loss,
supports_out=False,
dtypesIfCPU=floating_types_and(torch.float16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"nn.functional.grid_sample",
ref=_NOTHING,
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
ReductionOpInfo(
'all',
identity=True,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: does not support passing keepdim without dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
SkipInfo('TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: does not support passing keepdim without dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
SkipInfo('TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=lambda a, dim=None, keepdim=False, **kwargs: np.amax(a, axis=dim, keepdims=keepdim, **kwargs),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=lambda a, dim=None, keepdim=False, **kwargs: np.amin(a, axis=dim, keepdims=keepdim, **kwargs),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
SkipInfo('TestReductions', 'test_dim_single_keepdim'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
SkipInfo('TestReductions', 'test_dim_multi_keepdim'),
SkipInfo('TestReductions', 'test_dim_multi_unsorted_keepdim'),
SkipInfo('TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
SkipInfo('TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
supports_out=False,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_prod,
skips=(
# FIXME: prod does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: sum does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=False,
promotes_int_to_int64=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: nansum does not support passing keepdim without passing dim
SkipInfo('TestReductions', 'test_dim_default_keepdim'),
# FIXME: nansum reduces all dimensions when dim=[]
SkipInfo('TestReductions', 'test_dim_empty'),
SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
# FIXME: nansum does not support passing None to dim
SkipInfo('TestReductions', 'test_dim_none'),
SkipInfo('TestReductions', 'test_dim_none_keepdim'),
),
),
OpInfo(
"nn.functional.nll_loss",
ref=_NOTHING,
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
skips=(
SkipInfo(
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
]
# Common operator groupings
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse is True]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]
# TODO: review porting these to make_tensor
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove
# these from here
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
# TODO: move into common_utils.py or the test suite(s) that use this
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
pass
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
# TODO: move into common_utils.py or the test suite(s) that use this
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
if arg.tensor.dtype == torch.float:
return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))
if arg.tensor.dtype == torch.cfloat:
return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
|
the-stack_106_24921
|
from model.group import Group
from random import randrange
def test_delete_some_group(app):
if app.group.count() == 0:
app.group.create(Group(name="test_name", header="test_header", footer="test_footer"))
old_groups = app.group.get_group_list()
index = randrange(len(old_groups))
app.group.delete_group_by_index(index)
new_groups = app.group.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups[index:index+1] = []
assert old_groups == new_groups
|
the-stack_106_24922
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import copy
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
if root is None: return []
result = []
def dfs(node, path, curSum):
path.append(node.val)
curSum += node.val
isLeaf = node.left == None and node.right == None
if isLeaf and curSum == sum:
result.append(copy.deepcopy(path))
path.pop()
return
if node.left: dfs(node.left, path, curSum)
if node.right: dfs(node.right, path, curSum)
path.pop()
dfs(root, [], 0)
return result
|
the-stack_106_24923
|
# -*- coding: utf-8 -*-
import pytest
from cottonformation.core import helpers
from cottonformation.tests.helpers import jprint
class TestAssumeRolePolicyBuilder:
def test_build(self):
assert helpers.iam.AssumeRolePolicyBuilder(
helpers.iam.ServicePrincipal.ec2(),
helpers.iam.ServicePrincipal.awslambda(),
helpers.iam.AccountPrincipal("111122223333", external_id="ext", mfa_auth=True),
).build() == {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::111122223333:root"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "ext"
},
"Bool": {
"aws:MultiFactorAuthPresent": "true"
}
}
}
]
}
class TestAwsManagedPolicy:
def test(self):
_ = helpers.iam.AwsManagedPolicy.AmazonEC2FullAccess
_ = helpers.iam.AwsManagedPolicy.AWSLambdaBasicExecutionRole
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
|
the-stack_106_24927
|
def increment_string(strng):
str2 = strng[::-1]
strf = []
n = []
flag = True
for s in str2:
if s.isnumeric() and flag:
n.insert(0,s)
else:
flag = False
strf.insert(0,s)
if len(n) == 0:
return f'{strng}1'
else:
i = -1
while int(n[i])+1 == 10:
n[i]='0'
if len(n) + i == 0:
n.insert(0,'0')
i-=1
n[i]= str(int(n[i])+1)
return ''.join(strf)+''.join(n)
|
the-stack_106_24933
|
from flask import Blueprint, render_template
from flask import current_app
from critiquebrainz.db import users as db_users
from critiquebrainz.db.users import gravatar_url
moderators_bp = Blueprint('moderators', __name__)
@moderators_bp.route('/')
def mods_list():
mod_usernames = set(map(str.lower, current_app.config['ADMINS'])) # MusicBrainz usernames
mods_data = db_users.get_many_by_mb_username(list(mod_usernames))
mods = []
for mod_data in mods_data:
# Removing from `mod_usernames` to figure out which mods don't have a CB account afterwards
if mod_data["musicbrainz_username"].lower() in mod_usernames:
mod_usernames.remove(mod_data["musicbrainz_username"].lower())
mods.append({
'critiquebrainz_id': mod_data["id"],
'musicbrainz_username': mod_data["musicbrainz_username"],
'avatar_url': mod_data["avatar_url"],
})
for mod_username in mod_usernames: # The rest
mods.append({
'musicbrainz_username': mod_username,
'avatar_url': gravatar_url(mod_username, default="mm"),
})
mods = sorted(mods, key=lambda k: k['musicbrainz_username'].lower())
return render_template('moderators/moderators.html', moderators=mods)
|
the-stack_106_24935
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on 25 July 2016
@author: kylez
"""
from urllib.parse import urljoin
from poseidon.baseClasses.Logger_Base import Logger
from poseidon.poseidonMonitor.NorthBoundControllerAbstraction.proxy.controllerproxy import ControllerProxy
module_logger = Logger.logger
class CookieAuthControllerProxy(ControllerProxy):
def __init__(self, base_uri, login_resource, auth, *args, **kwargs):
super(CookieAuthControllerProxy, self).__init__(
base_uri, *args, **kwargs)
self.login_resource = login_resource
self.auth = auth
r = self.session.post(
urljoin(self.base_uri, login_resource), json=auth, verify=False)
self.session.cookies = r.cookies
|
the-stack_106_24937
|
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from gtk import HBox,CheckButton,Button,Label
import gobject
from mdspluswidget import MDSplusWidget
from mdsplusxdbox import MDSplusXdBox
from mdspluserrormsg import MDSplusErrorMsg
import sys
try:
import glade
guibuilder=True
except:
guibuilder=False
class props(object):
__gproperties__= {
'putOnApply' : (gobject.TYPE_BOOLEAN, 'putOnApply','put when apply button pressed',True,gobject.PARAM_READWRITE),
'nidOffset' : (gobject.TYPE_INT, 'nidOffset','Offset of nid in tree',-1,100000,-1,gobject.PARAM_READWRITE),
'buttonLabel' : (gobject.TYPE_STRING, 'buttonLabel','Label on popup button','',gobject.PARAM_READWRITE),
}
class MDSplusOnOffXdBoxWidget(props,MDSplusWidget,HBox):
__gtype_name__ = 'MDSplusOnOffXdBoxWidget'
__gproperties__= props.__gproperties__
def reset(self):
self.node_state.set_active(self.node.on)
self.node_state.set_label('')
if hasattr(self,'xdbox'):
self.xdbox.reset()
def apply(self):
if self.putOnApply:
if self.node.on != self.node_state.get_active():
try:
self.node.on=self.node_state.get_active()
except Exception:
if self.node_state.get_active():
state='on'
else:
state='off'
MDSplusErrorMsg('Error setting node on/off state','Error turning node %s %s\n\n%s' % (self.node.minpath,state,sys.exc_info()))
raise
if hasattr(self,'xdbox'):
try:
if self.node.compare(self.xdbox.value) != 1:
self.node.record=self.xdbox.value
self.reset()
except Exception:
MDSplusErrorMsg('Error storing value','Error storing value in to %s\n\n%s' % (self.node.minpath,sys.exc_info()))
def xd_state_changed(self,button):
self.node_state.set_active(self.xdbox.on.get_active())
def node_state_changed(self,button):
self.xdbox.on.set_active(self.node_state.get_active())
def popupXd(self,button):
if not hasattr(self,'xdbox'):
self.xdbox=MDSplusXdBox(self.node)
self.xdbox.putOnApply=False
self.xdbox.on.connect('toggled',self.xd_state_changed)
self.node_state.connect('toggled',self.node_state_changed)
self.xdbox.node=self.getNode()
self.xdbox.set_title(self.buttonLabel)
self.xdbox.on.set_active(self.node_state.get_active())
self.xdbox.show()
def setButtonLabel(self,button):
self.button.set_label(self.buttonLabel)
def __init__(self):
HBox.__init__(self)
MDSplusWidget.__init__(self)
HBox.set_homogeneous(self,False)
self.node_state=CheckButton('')
self.button=Button()
HBox.pack_start(self,self.node_state,False,False,0)
HBox.pack_start(self,self.button,False,False,0)
HBox.pack_start(self,Label(''),True,False,0)
if not guibuilder:
self.button.connect("clicked",self.popupXd)
self.button.connect("realize",self.setButtonLabel)
def show(self):
self.show_all()
gobject.type_register(MDSplusOnOffXdBoxWidget)
if guibuilder:
class MDSplusOnOffXdboxWidgetAdaptor(glade.get_adaptor_for_type('GtkHBox')):
__gtype_name__='MDSplusOnOffXdBoxWidgetAdaptor'
def do_set_property(self,widget,prop,value):
if prop == 'nidOffset':
widget.nidOffset=value
elif prop == 'putOnApply':
widget.putOnApply=value
elif prop == 'buttonLabel':
widget.buttonLabel=value
widget.button.set_label(value)
|
the-stack_106_24940
|
import math
import pygtk
pygtk.require('2.0')
import gtk
class ConfigDialog(object):
def __init__(self, config, levels):
self.config = config
self.levels = levels
def cb_ball_num(self, widget):
self.config.ball['num'] = widget.get_value_as_int()
def cb_level(self, widget):
self.levels[widget.get_model()[widget.get_active()][0]].init(self.config)
def cb_hazard_size(self, widget, index):
if index == 0:
self.config.hazard['size'] = (widget.get_value(), self.config.hazard['size'][1])
elif index == 1:
self.config.hazard['size'] = (self.config.hazard['size'][0], widget.get_value())
def cb_player(self, widget, player):
for k, v in self.config.player['type_name'].iteritems():
if v == widget.get_model()[widget.get_active()][0]:
self.config.paddle['paddle_type'][player] = k
return
def cb_paddle_curvature(self, widget):
self.config.paddle['curvature'] = widget.get_value()
def cb_paddle_curvature_flatten(self, widget):
widget.set_value(0)
def show(self):
window = gtk.Window()
window.set_title('Configure ' + self.config.name)
window.set_icon_from_file(self.config.icon_file)
window.connect('delete_event', lambda widget, data=None: False)
window.connect('destroy', lambda widget, data=None: gtk.main_quit())
# set up boxes
vbox_outer = gtk.VBox(False, 10)
vbox_outer.set_border_width(10)
window.add(vbox_outer)
hbox = gtk.HBox(False, 10)
vbox_outer.pack_start(hbox)
hbox_bottom = gtk.HBox(False, 10)
vbox_outer.pack_start(hbox_bottom)
vbox_labels = gtk.VBox(True, 10)
hbox.pack_start(vbox_labels)
vbox_controls = gtk.VBox(True, 10)
hbox.pack_start(vbox_controls)
# set up widgets in boxes
vbox_labels.pack_start(gtk.Label('Balls'))
adj = gtk.Adjustment(self.config.ball['num'], 1, self.config.ball['num_max'], 1)
spin = gtk.SpinButton(adj)
spin.set_wrap(False)
spin.connect('value-changed', self.cb_ball_num)
vbox_controls.pack_start(spin)
vbox_labels.pack_start(gtk.Label('Level'))
liststore = gtk.ListStore(str)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell)
cbox.add_attribute(cell, 'text', 0)
for level_name in self.levels.iterkeys():
treeiter = liststore.append([level_name])
if level_name == self.config.level_name:
cbox.set_active_iter(treeiter)
cbox.connect('changed', self.cb_level)
vbox_controls.pack_start(cbox)
vbox_labels.pack_start(gtk.Label('Corner width'))
adj = gtk.Adjustment(self.config.hazard['size'][0], 0, math.floor((self.config.size[0] - self.config.paddle['size_horizontal'][0] - 2*self.config.pixel_margin)/2), 1)
scale = gtk.HScale(adj)
scale.connect('value-changed', self.cb_hazard_size, 0)
vbox_controls.pack_start(scale)
vbox_labels.pack_start(gtk.Label('Corner height'))
adj = gtk.Adjustment(self.config.hazard['size'][1], 0, math.floor((self.config.size[1] - self.config.paddle['size_vertical'][1] - 2*self.config.pixel_margin)/2), 1)
scale = gtk.HScale(adj)
scale.connect('value-changed', self.cb_hazard_size, 1)
vbox_controls.pack_start(scale)
for player in self.config.PLAYER_ALL:
vbox_labels.pack_start(gtk.Label(self.config.player['name'][player]))
liststore = gtk.ListStore(str)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell)
cbox.add_attribute(cell, 'text', 0)
for p_type in self.config.PLAYER_TYPES_ALL:
treeiter = liststore.append([self.config.player['type_name'][p_type]])
if p_type == self.config.paddle['paddle_type'][player]:
cbox.set_active_iter(treeiter)
cbox.connect('changed', self.cb_player, player)
vbox_controls.pack_start(cbox)
vbox_labels.pack_start(gtk.Label('Paddle curvature'))
curv_hbox = gtk.HBox(False)
range = float(self.config.paddle['curvature_range'][1]-self.config.paddle['curvature_range'][0])
num_steps = 1000
adj = gtk.Adjustment(self.config.paddle['curvature'], self.config.paddle['curvature_range'][0], self.config.paddle['curvature_range'][1], range/num_steps)
scale = gtk.HScale(adj)
scale.set_digits(int(max(0,-math.log(range/num_steps, 10))))
scale.connect('value-changed', self.cb_paddle_curvature)
curv_hbox.pack_start(scale)
but = gtk.Button('Flat')
but.connect_object('clicked', self.cb_paddle_curvature_flatten, scale)
curv_hbox.pack_start(but, expand=False)
vbox_controls.pack_start(curv_hbox)
but = gtk.Button(stock=gtk.STOCK_OK)
but.connect_object('clicked', gtk.Widget.destroy, window)
hbox_bottom.pack_end(but, expand=False)
window.show_all()
gtk.main()
|
the-stack_106_24941
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, strides, compress_layer=True):
self.inplanes = 32
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=strides[0], padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 32, layers[0], stride=strides[1])
self.layer2 = self._make_layer(block, 64, layers[1], stride=strides[2])
self.layer3 = self._make_layer(block, 128, layers[2], stride=strides[3])
self.layer4 = self._make_layer(block, 256, layers[3], stride=strides[4])
self.layer5 = self._make_layer(block, 512, layers[4], stride=strides[5])
self.compress_layer = compress_layer
if compress_layer:
# for handwritten
self.layer6 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=(3, 1), padding=(0, 0), stride=(1, 1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, multiscale=False):
out_features = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
tmp_shape = x.size()[2:]
x = self.layer1(x)
if x.size()[2:] != tmp_shape:
tmp_shape = x.size()[2:]
out_features.append(x)
x = self.layer2(x)
if x.size()[2:] != tmp_shape:
tmp_shape = x.size()[2:]
out_features.append(x)
x = self.layer3(x)
if x.size()[2:] != tmp_shape:
tmp_shape = x.size()[2:]
out_features.append(x)
x = self.layer4(x)
if x.size()[2:] != tmp_shape:
tmp_shape = x.size()[2:]
out_features.append(x)
x = self.layer5(x)
if not self.compress_layer:
out_features.append(x)
else:
if x.size()[2:] != tmp_shape:
tmp_shape = x.size()[2:]
out_features.append(x)
x = self.layer6(x)
out_features.append(x)
return out_features
def resnet45(strides, compress_layer):
model = ResNet(BasicBlock, [3, 4, 6, 6, 3], strides, compress_layer)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.