ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
b4146cfc0933242c94c587b4a769b6a6056c58f7
from django.core.validators import RegexValidator from django.utils.translation import gettext_lazy as _ PASS_NUMBER_VALIDATOR = RegexValidator( r'[A-Z]{2}\d{7}', message=_('Passport number format is: XX1234567') ) PASS_ID_NUMBER_VALIDATOR = RegexValidator( r'\d{7}[A-Z]\d{3}[A-Z]{2}\d', message=_('ID format is: 1234567X123XX1') )
py
b4146d4fd0130045a1cd1ff30790c92eaa008895
""" WSGI config for tiendaenlinea project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tiendaenlinea.settings') application = get_wsgi_application()
py
b4146d8aa4f0b735869dc05a625b16c7d5070bee
from __future__ import absolute_import from base64 import b64decode from collections import namedtuple from functools import partial import logging import os.path import uuid from six.moves.urllib.parse import urlsplit, urlunsplit, urlencode, urljoin import formasaurus import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.settings import Settings from scrapy.crawler import CrawlerRunner from scrapy.exceptions import CloseSpider from scrapy.utils.response import get_base_url from scrapy_splash import SplashRequest from twisted.internet.defer import inlineCallbacks, returnValue from .middleware import get_cookiejar from .app import app, db, server_path from .login_keychain import get_domain USERNAME_FIELD_TYPES = {'username', 'email', 'username or email'} CHECK_CHECKBOXES = {'remember me checkbox'} PASSWORD_FIELD_TYPES = {'password'} CAPTCHA_FIELD_TYPES = {'captcha'} SUBMIT_TYPES = {'submit button'} DEFAULT_POST_HEADERS = {b'Content-Type': b'application/x-www-form-urlencoded'} USER_AGENT = ( 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 ' '(KHTML, like Gecko) Ubuntu Chromium/43.0.2357.130 ' 'Chrome/43.0.2357.130 Safari/537.36' ) base_settings = Settings(values=dict( TELNETCONSOLE_ENABLED = False, ROBOTSTXT_OBEY = False, DOWNLOAD_DELAY = 2.0, DEPTH_PRIORITY = 1, CONCURRENT_REQUESTS = 2, CONCURRENT_REQUESTS_PER_DOMAIN = 2, SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleFifoDiskQueue', SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.FifoMemoryQueue', # DOWNLOADER_MIDDLEWARES are set in get_settings USER_AGENT = USER_AGENT, DOWNLOADER_MIDDLEWARES = { 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None, # Placed before splash middleware 'autologin.middleware.ProxyMiddleware': 720, }, )) def crawl_runner(extra_settings=None): settings = base_settings.copy() if extra_settings is not None: settings.update(extra_settings, priority='cmdline') if settings.get('SPLASH_URL'): settings['DUPEFILTER_CLASS'] = 'scrapy_splash.SplashAwareDupeFilter' settings.setdefault('DOWNLOADER_MIDDLEWARES', {}).update({ 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None, 'scrapy_splash.SplashCookiesMiddleware': 723, 'scrapy_splash.SplashMiddleware': 725, 'scrapy.downloadermiddlewares.httpcompression' '.HttpCompressionMiddleware': 810, }) else: settings.setdefault('DOWNLOADER_MIDDLEWARES', {}).update({ 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None, 'autologin.middleware.ExposeCookiesMiddleware': 700, }) return CrawlerRunner(settings) def splash_request(lua_source, *args, **kwargs): kwargs['endpoint'] = 'execute' splash_args = kwargs.setdefault('args', {}) splash_args['lua_source'] = lua_source splash_args['timeout'] = 60 extra_js = kwargs.pop('extra_js', None) if extra_js: splash_args['extra_js'] = extra_js return SplashRequest(*args, **kwargs) class BaseSpider(scrapy.Spider): """ Base spider. It uses Splash for requests if SPLASH_URL is not None or empty. """ lua_source = 'default.lua' def __init__(self, *args, **kwargs): self.extra_js = kwargs.pop('extra_js', None) super(BaseSpider, self).__init__(*args, **kwargs) def start_requests(self): self._finish_init() for url in self.start_urls: yield self.request(url) def _finish_init(self): self.using_splash = bool(self.settings.get('SPLASH_URL')) if self.using_splash: with open(os.path.join( os.path.dirname(__file__), 'directives', self.lua_source), 'rb') as f: lua_source = f.read().decode('utf-8') self.request = partial( splash_request, lua_source, extra_js=self.extra_js) else: if self.extra_js: raise ValueError( '"extra_js" not supported without "SPLASH_URL"') self.request = scrapy.Request class FormSpider(BaseSpider): """ This spider crawls a website trying to find login and registration forms. When a form is found, its URL is saved to the database. """ name = 'forms' custom_settings = { 'DEPTH_LIMIT': 3, 'CLOSESPIDER_PAGECOUNT': 2000, 'DOWNLOAD_MAXSIZE': 1*1024*1024, # 1MB } priority_patterns = [ # Login links 'login', 'log in', 'logon', 'signin', 'sign in', 'sign-in', # Registration links 'signup', 'sign up', 'sign-up', 'register', 'registration', 'account', 'join', ] def __init__(self, url, credentials, *args, **kwargs): self.credentials = credentials self.start_urls = [url] self.link_extractor = LinkExtractor(allow_domains=[get_domain(url)]) self.found_login = False self.found_registration = False super(FormSpider, self).__init__(*args, **kwargs) def parse(self, response): self.logger.info(response.url) if response.text: for _, meta in formasaurus.extract_forms(response.text): form_type = meta['form'] if form_type == 'login' and not self.found_login: self.found_login = True self.handle_login_form(response.url) elif form_type == 'registration' \ and not self.found_registration: self.found_registration = True self.handle_registration_form(response.url) if self.found_registration and self.found_login: raise CloseSpider('done') for link in self.link_extractor.extract_links(response): priority = 0 text = ' '.join([relative_url(link.url), link.text]).lower() if any(pattern in text for pattern in self.priority_patterns): priority = 100 yield self.request(link.url, self.parse, priority=priority) def handle_login_form(self, url): self.logger.info('Found login form at %s', url) with app.app_context(): self.credentials.login_url = url db.session.add(self.credentials) db.session.commit() def handle_registration_form(self, url): self.logger.info('Found registration form at %s', url) with app.app_context(): self.credentials.registration_url = url db.session.add(self.credentials) db.session.commit() class LoginSpider(BaseSpider): """ This spider tries to login and returns an item with login cookies. """ name = 'login' lua_source = 'login.lua' custom_settings = { 'DEPTH_LIMIT': 0, # retries are tracked explicitly 'LOGIN_MAX_RETRIES': 10, 'DECAPTCHA_DEATHBYCAPTCHA_USERNAME': os.environ.get('DEATHBYCAPTCHA_USERNAME'), 'DECAPTCHA_DEATHBYCAPTCHA_PASSWORD': os.environ.get('DEATHBYCAPTCHA_PASSWORD'), } def __init__(self, url, username, password, *args, **kwargs): self.start_url = url self.start_urls = [self.start_url] self.username = username self.password = password self.solver = None self.retries_left = None self.attempted_captchas = [] super(LoginSpider, self).__init__(*args, **kwargs) def start_requests(self): self._finish_init() settings = self.crawler.settings self.solver = None try: import decaptcha except ImportError: self.logger.warning('Decaptcha not installed') else: from decaptcha.solvers.deathbycaptcha import DeathbycaptchaSolver if (settings.get('DECAPTCHA_DEATHBYCAPTCHA_USERNAME') and settings.get('DECAPTCHA_DEATHBYCAPTCHA_PASSWORD')): self.solver = DeathbycaptchaSolver(self.crawler) else: self.logger.warning('DeathByCaptcha account not provided') self.retries_left = settings.getint('LOGIN_MAX_RETRIES') request_kwargs = {} if self.using_splash: request_kwargs['args'] = {'full_render': True} yield self.request(self.start_url, **request_kwargs) def retry(self, tried_login=False, retry_once=False): self.retries_left -= 1 if retry_once: self.retries_left = min(1, self.retries_left) if self.retries_left: self.logger.debug('Retrying login') return self.request( self.start_url, callback=partial(self.parse, tried_login=tried_login), dont_filter=True) else: self.logger.debug('No retries left, giving up') @inlineCallbacks def parse(self, response, tried_login=False): initial_cookies = _response_cookies(response) page_forms = response.data.get('forms') if self.using_splash else None if page_forms: page_forms = _from_lua(page_forms) forminfo = get_login_form(response.text, page_forms=page_forms) if forminfo is None: if tried_login and initial_cookies: # If we can not find a login form on retry, then we must # have already logged in, but the cookies did not change, # so we did not detect our success. yield self.report_captchas() returnValue({ 'ok': True, 'cookies': initial_cookies, 'start_url': response.url}) returnValue({'ok': False, 'error': 'nologinform'}) form_idx, form, meta = forminfo self.logger.info('found login form: %s' % meta) extra_fields = {} captcha_solved = False captcha_field = _get_captcha_field(meta) if captcha_field and page_forms and self.solver: captcha_value = yield self.solve_captcha(page_forms[form_idx]) if captcha_value: captcha_solved = True extra_fields[captcha_field] = captcha_value else: returnValue(self.retry()) params = login_params( url=get_base_url(response), username=self.username, password=self.password, form=form, meta=meta, extra_fields=extra_fields, ) self.logger.debug('submit parameters: %s' % params) returnValue(self.request( params['url'], # If we did not try solving the captcha, retry just once # to check if the login form dissapears (and we logged in). callback=partial(self.parse_login, retry_once=not captcha_solved), method=params['method'], headers=params['headers'], body=params['body'], meta={'initial_cookies': cookie_dicts(initial_cookies) or []}, dont_filter=True, )) @inlineCallbacks def parse_login(self, response, retry_once=False): cookies = _response_cookies(response) or [] old_cookies = set(_cookie_tuples(response.meta['initial_cookies'])) new_cookies = set(_cookie_tuples(cookie_dicts(cookies))) if self.using_splash: self.debug_screenshot('page', b64decode(response.data['page'])) if new_cookies <= old_cookies: # no new or changed cookies fail = {'ok': False, 'error': 'badauth'} returnValue( self.retry(tried_login=True, retry_once=retry_once) or fail) yield self.report_captchas() returnValue({'ok': True, 'cookies': cookies, 'start_url': response.url}) @inlineCallbacks def solve_captcha(self, page_form): from decaptcha.exceptions import DecaptchaError form_screenshot = b64decode(page_form['screenshot']) self.debug_screenshot('captcha', form_screenshot) try: captcha_value = yield self.solver.solve(form_screenshot) except DecaptchaError as e: self.logger.error('captcha not solved', exc=e) returnValue(None) else: self.logger.debug('captcha solved: "%s"' % captcha_value) self.attempted_captchas.append(form_screenshot) returnValue(captcha_value) @inlineCallbacks def report_captchas(self): # We assume that if we have logged in, then all previous failed attempts # were due to incorrectly solved captchas. if self.attempted_captchas: for captcha_image in self.attempted_captchas[:-1]: yield self.solver.report(captcha_image) self.attempted_captchas = [] def debug_screenshot(self, name, screenshot): if not self.logger.isEnabledFor(logging.DEBUG): return browser_dir = os.path.join(server_path, 'static/browser') filename = os.path.join(browser_dir, '{}.jpeg'.format(uuid.uuid4())) with open(filename, 'wb') as f: f.write(screenshot) self.logger.debug('saved %s screenshot to %s' % (name, filename)) def get_login_form(html_source, page_forms=None): matches = [] Match = namedtuple('Match', ['idx', 'form', 'meta']) for idx, (form, meta) in enumerate(formasaurus.extract_forms(html_source)): if meta['form'] == 'login': matches.append(Match(idx, form, meta)) if matches: if page_forms: return max(matches, key=lambda match: ( _get_captcha_field(match.meta) is not None, _form_area(page_forms[match.idx]))) else: return matches[0] def _form_area(form_meta): region = form_meta['region'] left, top, right, bottom = region return (right - left) * (bottom - top) def _from_lua(table): return [table[str(idx + 1)] for idx in range(len(table))] def _get_captcha_field(meta): for name, field_type in meta['fields'].items(): if field_type in CAPTCHA_FIELD_TYPES: return name def relative_url(url): parts = urlsplit(url) return urlunsplit(('', '') + parts[2:]) def login_params(url, username, password, form, meta, extra_fields=None): """ Return ``{'url': url, 'method': method, 'body': body}`` with all required information for submitting a login form. """ fields = list(meta['fields'].items()) username_field = password_field = None for field_name, field_type in fields: if field_type in USERNAME_FIELD_TYPES: username_field = field_name elif field_type in PASSWORD_FIELD_TYPES: password_field = field_name if username_field is None or password_field is None: return for field_name, field_type in fields: if field_type in CHECK_CHECKBOXES: try: form.fields[field_name] = 'on' except ValueError: pass # This could be not a checkbox after all form.fields[username_field] = username form.fields[password_field] = password if extra_fields: for k, v in extra_fields.items(): form.fields[k] = v submit_values = form.form_values() for field_name, field_type in fields: if field_type in SUBMIT_TYPES: submit_values.append((field_name, form.fields[field_name])) return dict( url=form.action if url is None else urljoin(url, form.action), method=form.method, headers=DEFAULT_POST_HEADERS.copy() if form.method == 'POST' else {}, body=urlencode(submit_values), ) def cookie_dicts(cookiejar): if cookiejar is None: return None return [c.__dict__ for c in cookiejar] def _response_cookies(response): if hasattr(response, 'cookiejar'): # using splash return response.cookiejar else: # using ExposeCookiesMiddleware return get_cookiejar(response) def _cookie_tuples(cookie_dicts_): return [(c['name'], c['value'], c['domain'], c['path'], c['port']) for c in cookie_dicts_]
py
b4146de0906e68e96de9668b060569de5a3b81d3
# Copyright 2019 The Texar Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base classes for Executor metrics. """ from abc import ABC, abstractmethod from typing import Generic, List, Optional, Sequence, TypeVar __all__ = [ "Metric", "SimpleMetric", "StreamingMetric", ] Input = TypeVar('Input') Value = TypeVar('Value') class Metric(Generic[Input, Value], ABC): r"""Base class of all metrics. You should not directly inherit this class, but inherit from :class:`SimpleMetric` or :class:`StreamingMetric` instead. Subclasses can override the class attributes to indicate their behaviors: - :attr:`higher_is_better`: If `True`, higher (comparison using the greater than operator ``>`` returns `True`) values are considered better metric values. If `False`, lower values are considered better. Defaults to `True`. - :attr:`required_pred`: If `True`, predicted values are required to compute the metric value. Defaults to `True`. - :attr:`requires_label`: If `True`, labels are required to compute the metric value. Defaults to `True`. Keyword Args: pred_name (str, optional): Name of the predicted value. This will be used as the key to the dictionary returned by the model. label_name (str, optional): Name of the label. This will be used as the key to the batch object returned by the dataset. Defaults to ``"label"``. higher_is_better (bool, optional): If specified, the :attr:`higher_is_better` attribute for the instance is overwritten by the specified value. """ higher_is_better: bool = True requires_pred: bool = True requires_label: bool = True def __init__(self, *, pred_name: Optional[str], label_name: Optional[str] = "label", higher_is_better: Optional[bool] = None): self.reset() if self.requires_pred and pred_name is None: raise ValueError(f"Metric {self.metric_name} requires a " f"prediction name, but None is provided") if self.requires_label and label_name is None: raise ValueError(f"Metric {self.metric_name} requires a " f"label name, but None is provided") if higher_is_better is not None: self.higher_is_better = higher_is_better if not self.requires_pred: pred_name = None if not self.requires_label: label_name = None self._pred_name = pred_name self._label_name = label_name @property def metric_name(self) -> str: r"""Name of the metric. By default, the class name is used.""" return self.__class__.__name__ @property def pred_name(self) -> Optional[str]: r"""Name of the predicted value. This will be used as the key to the dictionary returned by the model. """ return self._pred_name @property def label_name(self) -> Optional[str]: r"""Name of the label (ground truth / gold value). This will be used as the key to the batch object returned by the dataset. """ return self._label_name @abstractmethod def reset(self) -> None: r"""Reset the internal state of the metric, and erase all previously added data points. """ raise NotImplementedError @abstractmethod def add(self, predicted: Sequence[Input], labels: Sequence[Input]) -> None: r"""Record a data batch in the metric. Args: predicted: The list of predicted values. labels: The list of labels. """ raise NotImplementedError @abstractmethod def value(self) -> Value: r"""Compute the metric value. Returns: The metric value. """ raise NotImplementedError def better(self, cur: Value, prev: Value) -> Optional[bool]: r"""Compare two metric values and return which is better. Args: cur: The "current" metric value. prev: The "previous" metric value. Returns: Return value is either a `bool` or `None`. - If `True`, the current metric value is considered better. - If `False`, the previous metric value is considered better. - If `None`, the two values are considered to be the same, or uncomparable. """ result = (True if cur > prev else # type: ignore False if cur < prev else None) # type: ignore if not self.higher_is_better and result is not None: result = not result return result class SimpleMetric(Metric[Input, Value], ABC): r"""Base class of simple metrics. Simple metrics are metrics that do not support incremental computation. The value of the metric is computed only after all data points have been added. The default implementation of :meth:`add` simply stores the predicted values and labels into lists. """ labels: List[Input] predicted: List[Input] _cached_value: Optional[Value] def reset(self) -> None: self.labels = [] self.predicted = [] self._cached_value = None def add(self, predicted: Sequence[Input], labels: Sequence[Input]): if len(predicted) != len(labels): raise ValueError( "Lists `predicted` and `labels` should have the same length") self.predicted.extend(predicted) self.labels.extend(labels) self._cached_value = None def value(self): if self._cached_value is not None: return self._cached_value self._cached_value = self._value() return self._cached_value def _value(self) -> Value: r"""Compute the metric value. This function is called in :meth:`texar.torch.run.metric.SimpleMetric.value` and the output is cached. This prevents recalculation of metrics which may be time consuming. Returns: The metric value. """ raise NotImplementedError class StreamingMetric(Metric[Input, Value], ABC): r"""Base class of streaming metrics. Streaming metrics are metrics that support incremental computation. The value of the metric may be queried before all data points have been added, and the computation should not be expensive. The default implementation of :meth:`add` only keeps track of the number of data points added. You should override this method. """ count: int def reset(self) -> None: self.count = 0 def add(self, predicted: Sequence[Input], labels: Sequence[Input]) -> None: if len(predicted) != len(labels): raise ValueError( "Lists `predicted` and `labels` should have the same length") self.count += len(predicted)
py
b4146eadf3f2b5e614c6984649a6b630868193dc
import numpy import numpy as np import datetime import pytest from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, assert_raises_regex, assert_array_equal, ) from numpy.compat import pickle # Use pytz to test out various time zones if available try: from pytz import timezone as tz _has_pytz = True except ImportError: _has_pytz = False try: RecursionError except NameError: RecursionError = RuntimeError # python < 3.5 class TestDateTime: def test_datetime_dtype_creation(self): for unit in ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'μs', # alias for us 'ns', 'ps', 'fs', 'as']: dt1 = np.dtype('M8[750%s]' % unit) assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) dt2 = np.dtype('m8[%s]' % unit) assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) # Generic units shouldn't add [] to the end assert_equal(str(np.dtype("M8")), "datetime64") # Should be possible to specify the endianness assert_equal(np.dtype("=M8"), np.dtype("M8")) assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) assert_(np.dtype(">M8") == np.dtype("M8") or np.dtype("<M8") == np.dtype("M8")) assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or np.dtype("<M8[D]") == np.dtype("M8[D]")) assert_(np.dtype(">M8") != np.dtype("<M8")) assert_equal(np.dtype("=m8"), np.dtype("m8")) assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]")) assert_(np.dtype(">m8") == np.dtype("m8") or np.dtype("<m8") == np.dtype("m8")) assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or np.dtype("<m8[D]") == np.dtype("m8[D]")) assert_(np.dtype(">m8") != np.dtype("<m8")) # Check that the parser rejects bad datetime types assert_raises(TypeError, np.dtype, 'M8[badunit]') assert_raises(TypeError, np.dtype, 'm8[badunit]') assert_raises(TypeError, np.dtype, 'M8[YY]') assert_raises(TypeError, np.dtype, 'm8[YY]') assert_raises(TypeError, np.dtype, 'm4') assert_raises(TypeError, np.dtype, 'M7') assert_raises(TypeError, np.dtype, 'm7') assert_raises(TypeError, np.dtype, 'M16') assert_raises(TypeError, np.dtype, 'm16') def test_datetime_casting_rules(self): # Cannot cast safely/same_kind between timedelta and datetime assert_(not np.can_cast('m8', 'M8', casting='same_kind')) assert_(not np.can_cast('M8', 'm8', casting='same_kind')) assert_(not np.can_cast('m8', 'M8', casting='safe')) assert_(not np.can_cast('M8', 'm8', casting='safe')) # Can cast safely/same_kind from integer to timedelta assert_(np.can_cast('i8', 'm8', casting='same_kind')) assert_(np.can_cast('i8', 'm8', casting='safe')) assert_(np.can_cast('i4', 'm8', casting='same_kind')) assert_(np.can_cast('i4', 'm8', casting='safe')) assert_(np.can_cast('u4', 'm8', casting='same_kind')) assert_(np.can_cast('u4', 'm8', casting='safe')) # Cannot cast safely from unsigned integer of the same size, which # could overflow assert_(np.can_cast('u8', 'm8', casting='same_kind')) assert_(not np.can_cast('u8', 'm8', casting='safe')) # Cannot cast safely/same_kind from float to timedelta assert_(not np.can_cast('f4', 'm8', casting='same_kind')) assert_(not np.can_cast('f4', 'm8', casting='safe')) # Cannot cast safely/same_kind from integer to datetime assert_(not np.can_cast('i8', 'M8', casting='same_kind')) assert_(not np.can_cast('i8', 'M8', casting='safe')) # Cannot cast safely/same_kind from bool to datetime assert_(not np.can_cast('b1', 'M8', casting='same_kind')) assert_(not np.can_cast('b1', 'M8', casting='safe')) # Can cast safely/same_kind from bool to timedelta assert_(np.can_cast('b1', 'm8', casting='same_kind')) assert_(np.can_cast('b1', 'm8', casting='safe')) # Can cast datetime safely from months/years to days assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe')) assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe')) # Cannot cast timedelta safely from months/years to days assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe')) assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe')) # Can cast datetime same_kind from months/years to days assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind')) assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind')) # Can't cast timedelta same_kind from months/years to days assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind')) assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind')) # Can cast datetime same_kind across the date/time boundary assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind')) # Can cast timedelta same_kind across the date/time boundary assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind')) assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind')) # Cannot cast safely if the integer multiplier doesn't divide assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe')) assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe')) # But can cast same_kind assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind')) # Can cast safely if the integer multiplier does divide assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe')) # We can always cast types with generic units (corresponding to NaT) to # more specific types assert_(np.can_cast('m8', 'm8[h]', casting='same_kind')) assert_(np.can_cast('m8', 'm8[h]', casting='safe')) assert_(np.can_cast('M8', 'M8[h]', casting='same_kind')) assert_(np.can_cast('M8', 'M8[h]', casting='safe')) # but not the other way around assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind')) assert_(not np.can_cast('m8[h]', 'm8', casting='safe')) assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind')) assert_(not np.can_cast('M8[h]', 'M8', casting='safe')) def test_compare_generic_nat(self): # regression tests for gh-6452 assert_(np.datetime64('NaT') != np.datetime64('2000') + np.timedelta64('NaT')) assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) @pytest.mark.parametrize("size", [ 3, 21, 217, 1000]) def test_datetime_nat_argsort_stability(self, size): # NaT < NaT should be False internally for # sort stability expected = np.arange(size) arr = np.tile(np.datetime64('NaT'), size) assert_equal(np.argsort(arr, kind='mergesort'), expected) @pytest.mark.parametrize("size", [ 3, 21, 217, 1000]) def test_timedelta_nat_argsort_stability(self, size): # NaT < NaT should be False internally for # sort stability expected = np.arange(size) arr = np.tile(np.timedelta64('NaT'), size) assert_equal(np.argsort(arr, kind='mergesort'), expected) @pytest.mark.parametrize("arr, expected", [ # the example provided in gh-12629 (['NaT', 1, 2, 3], [1, 2, 3, 'NaT']), # multiple NaTs (['NaT', 9, 'NaT', -707], [-707, 9, 'NaT', 'NaT']), # this sort explores another code path for NaT ([1, -2, 3, 'NaT'], [-2, 1, 3, 'NaT']), # 2-D array ([[51, -220, 'NaT'], [-17, 'NaT', -90]], [[-220, 51, 'NaT'], [-90, -17, 'NaT']]), ]) @pytest.mark.parametrize("dtype", [ 'M8[ns]', 'M8[us]', 'm8[ns]', 'm8[us]']) def test_datetime_timedelta_sort_nat(self, arr, expected, dtype): # fix for gh-12629 and gh-15063; NaT sorting to end of array arr = np.array(arr, dtype=dtype) expected = np.array(expected, dtype=dtype) arr.sort() assert_equal(arr, expected) def test_datetime_scalar_construction(self): # Construct with different units assert_equal(np.datetime64('1950-03-12', 'D'), np.datetime64('1950-03-12')) assert_equal(np.datetime64('1950-03-12T13', 's'), np.datetime64('1950-03-12T13', 'm')) # Default construction means NaT assert_equal(np.datetime64(), np.datetime64('NaT')) # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), "numpy.datetime64('NaT')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "numpy.datetime64('2011-02')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) # Default construction of NaT is in generic units assert_equal(np.datetime64().dtype, np.dtype('M8')) assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) # Construction from integers requires a specified unit assert_raises(ValueError, np.datetime64, 17) # When constructing from a scalar or zero-dimensional array, # it either keeps the units or you can override them. a = np.datetime64('2000-03-18T16', 'h') b = np.array('2000-03-18T16', dtype='M8[h]') assert_equal(a.dtype, np.dtype('M8[h]')) assert_equal(b.dtype, np.dtype('M8[h]')) assert_equal(np.datetime64(a), a) assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]')) assert_equal(np.datetime64(b), a) assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]')) assert_equal(np.datetime64(a, 's'), a) assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]')) assert_equal(np.datetime64(b, 's'), a) assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]')) # Construction from datetime.date assert_equal(np.datetime64('1945-03-25'), np.datetime64(datetime.date(1945, 3, 25))) assert_equal(np.datetime64('2045-03-25', 'D'), np.datetime64(datetime.date(2045, 3, 25), 'D')) # Construction from datetime.datetime assert_equal(np.datetime64('1980-01-25T14:36:22.5'), np.datetime64(datetime.datetime(1980, 1, 25, 14, 36, 22, 500000))) # Construction with time units from a date is okay assert_equal(np.datetime64('1920-03-13', 'h'), np.datetime64('1920-03-13T00')) assert_equal(np.datetime64('1920-03', 'm'), np.datetime64('1920-03-01T00:00')) assert_equal(np.datetime64('1920', 's'), np.datetime64('1920-01-01T00:00:00')) assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'), np.datetime64('2045-03-25T00:00:00.000')) # Construction with date units from a datetime is also okay assert_equal(np.datetime64('1920-03-13T18', 'D'), np.datetime64('1920-03-13')) assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'), np.datetime64('1920-03')) assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'), np.datetime64('1920')) def test_datetime_scalar_construction_timezone(self): # verify that supplying an explicit timezone works, but is deprecated with assert_warns(DeprecationWarning): assert_equal(np.datetime64('2000-01-01T00Z'), np.datetime64('2000-01-01T00')) with assert_warns(DeprecationWarning): assert_equal(np.datetime64('2000-01-01T00-08'), np.datetime64('2000-01-01T08')) def test_datetime_array_find_type(self): dt = np.datetime64('1970-01-01', 'M') arr = np.array([dt]) assert_equal(arr.dtype, np.dtype('M8[M]')) # at the moment, we don't automatically convert these to datetime64 dt = datetime.date(1970, 1, 1) arr = np.array([dt]) assert_equal(arr.dtype, np.dtype('O')) dt = datetime.datetime(1970, 1, 1, 12, 30, 40) arr = np.array([dt]) assert_equal(arr.dtype, np.dtype('O')) # find "supertype" for non-dates and dates b = np.bool_(True) dm = np.datetime64('1970-01-01', 'M') d = datetime.date(1970, 1, 1) dt = datetime.datetime(1970, 1, 1, 12, 30, 40) arr = np.array([b, dm]) assert_equal(arr.dtype, np.dtype('O')) arr = np.array([b, d]) assert_equal(arr.dtype, np.dtype('O')) arr = np.array([b, dt]) assert_equal(arr.dtype, np.dtype('O')) arr = np.array([d, d]).astype('datetime64') assert_equal(arr.dtype, np.dtype('M8[D]')) arr = np.array([dt, dt]).astype('datetime64') assert_equal(arr.dtype, np.dtype('M8[us]')) @pytest.mark.parametrize("unit", [ # test all date / time units and use # "generic" to select generic unit ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"), ("s"), ("ms"), ("us"), ("ns"), ("ps"), ("fs"), ("as"), ("generic") ]) def test_timedelta_np_int_construction(self, unit): # regression test for gh-7617 if unit != "generic": assert_equal(np.timedelta64(np.int64(123), unit), np.timedelta64(123, unit)) else: assert_equal(np.timedelta64(np.int64(123)), np.timedelta64(123)) def test_timedelta_scalar_construction(self): # Construct with different units assert_equal(np.timedelta64(7, 'D'), np.timedelta64(1, 'W')) assert_equal(np.timedelta64(120, 's'), np.timedelta64(2, 'm')) # Default construction means 0 assert_equal(np.timedelta64(), np.timedelta64(0)) # None gets constructed as NaT assert_equal(np.timedelta64(None), np.timedelta64('NaT')) # Some basic strings and repr assert_equal(str(np.timedelta64('NaT')), 'NaT') assert_equal(repr(np.timedelta64('NaT')), "numpy.timedelta64('NaT')") assert_equal(str(np.timedelta64(3, 's')), '3 seconds') assert_equal(repr(np.timedelta64(-3, 's')), "numpy.timedelta64(-3,'s')") assert_equal(repr(np.timedelta64(12)), "numpy.timedelta64(12)") # Construction from an integer produces generic units assert_equal(np.timedelta64(12).dtype, np.dtype('m8')) # When constructing from a scalar or zero-dimensional array, # it either keeps the units or you can override them. a = np.timedelta64(2, 'h') b = np.array(2, dtype='m8[h]') assert_equal(a.dtype, np.dtype('m8[h]')) assert_equal(b.dtype, np.dtype('m8[h]')) assert_equal(np.timedelta64(a), a) assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]')) assert_equal(np.timedelta64(b), a) assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]')) assert_equal(np.timedelta64(a, 's'), a) assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]')) assert_equal(np.timedelta64(b, 's'), a) assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]')) # Construction from datetime.timedelta assert_equal(np.timedelta64(5, 'D'), np.timedelta64(datetime.timedelta(days=5))) assert_equal(np.timedelta64(102347621, 's'), np.timedelta64(datetime.timedelta(seconds=102347621))) assert_equal(np.timedelta64(-10234760000, 'us'), np.timedelta64(datetime.timedelta( microseconds=-10234760000))) assert_equal(np.timedelta64(10234760000, 'us'), np.timedelta64(datetime.timedelta( microseconds=10234760000))) assert_equal(np.timedelta64(1023476, 'ms'), np.timedelta64(datetime.timedelta(milliseconds=1023476))) assert_equal(np.timedelta64(10, 'm'), np.timedelta64(datetime.timedelta(minutes=10))) assert_equal(np.timedelta64(281, 'h'), np.timedelta64(datetime.timedelta(hours=281))) assert_equal(np.timedelta64(28, 'W'), np.timedelta64(datetime.timedelta(weeks=28))) # Cannot construct across nonlinear time unit boundaries a = np.timedelta64(3, 's') assert_raises(TypeError, np.timedelta64, a, 'M') assert_raises(TypeError, np.timedelta64, a, 'Y') a = np.timedelta64(6, 'M') assert_raises(TypeError, np.timedelta64, a, 'D') assert_raises(TypeError, np.timedelta64, a, 'h') a = np.timedelta64(1, 'Y') assert_raises(TypeError, np.timedelta64, a, 'D') assert_raises(TypeError, np.timedelta64, a, 'm') a = datetime.timedelta(seconds=3) assert_raises(TypeError, np.timedelta64, a, 'M') assert_raises(TypeError, np.timedelta64, a, 'Y') a = datetime.timedelta(weeks=3) assert_raises(TypeError, np.timedelta64, a, 'M') assert_raises(TypeError, np.timedelta64, a, 'Y') a = datetime.timedelta() assert_raises(TypeError, np.timedelta64, a, 'M') assert_raises(TypeError, np.timedelta64, a, 'Y') def test_timedelta_object_array_conversion(self): # Regression test for gh-11096 inputs = [datetime.timedelta(28), datetime.timedelta(30), datetime.timedelta(31)] expected = np.array([28, 30, 31], dtype='timedelta64[D]') actual = np.array(inputs, dtype='timedelta64[D]') assert_equal(expected, actual) def test_timedelta_0_dim_object_array_conversion(self): # Regression test for gh-11151 test = np.array(datetime.timedelta(seconds=20)) actual = test.astype(np.timedelta64) # expected value from the array constructor workaround # described in above issue expected = np.array(datetime.timedelta(seconds=20), np.timedelta64) assert_equal(actual, expected) def test_timedelta_nat_format(self): # gh-17552 assert_equal('NaT', '{0}'.format(np.timedelta64('nat'))) def test_timedelta_scalar_construction_units(self): # String construction detecting units assert_equal(np.datetime64('2010').dtype, np.dtype('M8[Y]')) assert_equal(np.datetime64('2010-03').dtype, np.dtype('M8[M]')) assert_equal(np.datetime64('2010-03-12').dtype, np.dtype('M8[D]')) assert_equal(np.datetime64('2010-03-12T17').dtype, np.dtype('M8[h]')) assert_equal(np.datetime64('2010-03-12T17:15').dtype, np.dtype('M8[m]')) assert_equal(np.datetime64('2010-03-12T17:15:08').dtype, np.dtype('M8[s]')) assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype, np.dtype('M8[ms]')) assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype, np.dtype('M8[ms]')) assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype, np.dtype('M8[ms]')) assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype, np.dtype('M8[us]')) assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype, np.dtype('M8[us]')) assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype, np.dtype('M8[us]')) assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype, np.dtype('M8[ns]')) assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype, np.dtype('M8[ns]')) assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype, np.dtype('M8[ns]')) assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype, np.dtype('M8[ps]')) assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype, np.dtype('M8[ps]')) assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype, np.dtype('M8[ps]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.1234567890123').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.12345678901234').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.123456789012345').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.1234567890123456').dtype, np.dtype('M8[as]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.12345678901234567').dtype, np.dtype('M8[as]')) assert_equal(np.datetime64( '1970-01-01T00:00:02.123456789012345678').dtype, np.dtype('M8[as]')) # Python date object assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype, np.dtype('M8[D]')) # Python datetime object assert_equal(np.datetime64( datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype, np.dtype('M8[us]')) # 'today' special value assert_equal(np.datetime64('today').dtype, np.dtype('M8[D]')) # 'now' special value assert_equal(np.datetime64('now').dtype, np.dtype('M8[s]')) def test_datetime_nat_casting(self): a = np.array('NaT', dtype='M8[D]') b = np.datetime64('NaT', '[D]') # Arrays assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]')) assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]')) assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]')) assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]')) assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]')) # Scalars -> Scalars assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) # Arrays -> Scalars assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) # NaN -> NaT nan = np.array([np.nan] * 8) fnan = nan.astype('f') lnan = nan.astype('g') cnan = nan.astype('D') cfnan = nan.astype('F') clnan = nan.astype('G') nat = np.array([np.datetime64('NaT')] * 8) assert_equal(nan.astype('M8[ns]'), nat) assert_equal(fnan.astype('M8[ns]'), nat) assert_equal(lnan.astype('M8[ns]'), nat) assert_equal(cnan.astype('M8[ns]'), nat) assert_equal(cfnan.astype('M8[ns]'), nat) assert_equal(clnan.astype('M8[ns]'), nat) nat = np.array([np.timedelta64('NaT')] * 8) assert_equal(nan.astype('timedelta64[ns]'), nat) assert_equal(fnan.astype('timedelta64[ns]'), nat) assert_equal(lnan.astype('timedelta64[ns]'), nat) assert_equal(cnan.astype('timedelta64[ns]'), nat) assert_equal(cfnan.astype('timedelta64[ns]'), nat) assert_equal(clnan.astype('timedelta64[ns]'), nat) def test_days_creation(self): assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), (1600-1970)*365 - (1972-1600)/4 + 3 - 365) assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), (1600-1970)*365 - (1972-1600)/4 + 3) assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), (1600-1970)*365 - (1972-1600)/4 + 3 + 366) assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), (1900-1970)*365 - (1970-1900)//4) assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), (1900-1970)*365 - (1970-1900)//4 + 365) assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), (2000 - 1970)*365 + (2000 - 1972)//4) assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), (2000 - 1970)*365 + (2000 - 1972)//4 + 366) assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), (2400 - 1970)*365 + (2400 - 1972)//4 - 3) assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) def test_days_to_pydate(self): assert_equal(np.array('1599', dtype='M8[D]').astype('O'), datetime.date(1599, 1, 1)) assert_equal(np.array('1600', dtype='M8[D]').astype('O'), datetime.date(1600, 1, 1)) assert_equal(np.array('1601', dtype='M8[D]').astype('O'), datetime.date(1601, 1, 1)) assert_equal(np.array('1900', dtype='M8[D]').astype('O'), datetime.date(1900, 1, 1)) assert_equal(np.array('1901', dtype='M8[D]').astype('O'), datetime.date(1901, 1, 1)) assert_equal(np.array('2000', dtype='M8[D]').astype('O'), datetime.date(2000, 1, 1)) assert_equal(np.array('2001', dtype='M8[D]').astype('O'), datetime.date(2001, 1, 1)) assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), datetime.date(1600, 2, 29)) assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), datetime.date(1600, 3, 1)) assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), datetime.date(2001, 3, 22)) def test_dtype_comparison(self): assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) def test_pydatetime_creation(self): a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') assert_equal(a[0], a[1]) a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') assert_equal(a[0], a[1]) a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') assert_equal(a[0], a[1]) # Will fail if the date changes during the exact right moment a = np.array(['today', datetime.date.today()], dtype='M8[D]') assert_equal(a[0], a[1]) # datetime.datetime.now() returns local time, not UTC #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') #assert_equal(a[0], a[1]) # we can give a datetime.date time units assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), np.array(np.datetime64('1960-03-12T00:00:00'))) def test_datetime_string_conversion(self): a = ['2011-03-16', '1920-01-01', '2013-05-19'] str_a = np.array(a, dtype='S') uni_a = np.array(a, dtype='U') dt_a = np.array(a, dtype='M') # String to datetime assert_equal(dt_a, str_a.astype('M')) assert_equal(dt_a.dtype, str_a.astype('M').dtype) dt_b = np.empty_like(dt_a) dt_b[...] = str_a assert_equal(dt_a, dt_b) # Datetime to string assert_equal(str_a, dt_a.astype('S0')) str_b = np.empty_like(str_a) str_b[...] = dt_a assert_equal(str_a, str_b) # Unicode to datetime assert_equal(dt_a, uni_a.astype('M')) assert_equal(dt_a.dtype, uni_a.astype('M').dtype) dt_b = np.empty_like(dt_a) dt_b[...] = uni_a assert_equal(dt_a, dt_b) # Datetime to unicode assert_equal(uni_a, dt_a.astype('U')) uni_b = np.empty_like(uni_a) uni_b[...] = dt_a assert_equal(uni_a, uni_b) # Datetime to long string - gh-9712 assert_equal(str_a, dt_a.astype((np.string_, 128))) str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) str_b[...] = dt_a assert_equal(str_a, str_b) @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) def test_time_byteswapping(self, time_dtype): times = np.array(["2017", "NaT"], dtype=time_dtype) times_swapped = times.astype(times.dtype.newbyteorder()) assert_array_equal(times, times_swapped) unswapped = times_swapped.view(np.int64).newbyteorder() assert_array_equal(unswapped, times.view(np.int64)) @pytest.mark.parametrize(["time1", "time2"], [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) def test_time_byteswapped_cast(self, time1, time2): dtype1 = np.dtype(time1) dtype2 = np.dtype(time2) times = np.array(["2017", "NaT"], dtype=dtype1) expected = times.astype(dtype2) # Test that every byte-swapping combination also returns the same # results (previous tests check that this comparison works fine). res = times.astype(dtype1.newbyteorder()).astype(dtype2) assert_array_equal(res, expected) res = times.astype(dtype2.newbyteorder()) assert_array_equal(res, expected) res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) assert_array_equal(res, expected) @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) @pytest.mark.parametrize("str_dtype", ["U", "S"]) def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): times = np.array(["2017", "NaT"], dtype=time_dtype) # Unfortunately, timedelta does not roundtrip: from_strings = np.array(["2017", "NaT"], dtype=str_dtype) to_strings = times.astype(str_dtype) # assume this is correct # Check that conversion from times to string works if src is swapped: times_swapped = times.astype(times.dtype.newbyteorder()) res = times_swapped.astype(str_dtype) assert_array_equal(res, to_strings) # And also if both are swapped: res = times_swapped.astype(to_strings.dtype.newbyteorder()) assert_array_equal(res, to_strings) # only destination is swapped: res = times.astype(to_strings.dtype.newbyteorder()) assert_array_equal(res, to_strings) # Check that conversion from string to times works if src is swapped: from_strings_swapped = from_strings.astype( from_strings.dtype.newbyteorder()) res = from_strings_swapped.astype(time_dtype) assert_array_equal(res, times) # And if both are swapped: res = from_strings_swapped.astype(times.dtype.newbyteorder()) assert_array_equal(res, times) # Only destination is swapped: res = from_strings.astype(times.dtype.newbyteorder()) assert_array_equal(res, times) def test_datetime_array_str(self): a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: "'%s'" % np.datetime_as_string(x, timezone='UTC')}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') assert_equal(str(a), "['2010' 'NaT' '2030']") def test_timedelta_array_str(self): a = np.array([-1, 0, 100], dtype='m') assert_equal(str(a), "[ -1 0 100]") a = np.array(['NaT', 'NaT'], dtype='m') assert_equal(str(a), "['NaT' 'NaT']") # Check right-alignment with NaTs a = np.array([-1, 'NaT', 0], dtype='m') assert_equal(str(a), "[ -1 'NaT' 0]") a = np.array([-1, 'NaT', 1234567], dtype='m') assert_equal(str(a), "[ -1 'NaT' 1234567]") # Test with other byteorder: a = np.array([-1, 'NaT', 1234567], dtype='>m') assert_equal(str(a), "[ -1 'NaT' 1234567]") a = np.array([-1, 'NaT', 1234567], dtype='<m') assert_equal(str(a), "[ -1 'NaT' 1234567]") def test_pickle(self): # Check that pickle roundtripping works for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): dt = np.dtype('M8[7D]') assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt) dt = np.dtype('M8[W]') assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt) scalar = np.datetime64('2016-01-01T00:00:00.000000000') assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)), scalar) delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000') assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)), delta) # Check that loading pickles from 1.6 works pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \ b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]')) pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." assert_equal(pickle.loads(pkl), np.dtype('<M8[W]')) pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): # datetime <op> datetime computes the metadata gcd # timedelta <op> timedelta computes the metadata gcd for mM in ['m', 'M']: assert_equal( np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), np.dtype(mM+'8[2Y]')) assert_equal( np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), np.dtype(mM+'8[3Y]')) assert_equal( np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), np.dtype(mM+'8[2M]')) assert_equal( np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), np.dtype(mM+'8[1D]')) assert_equal( np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), np.dtype(mM+'8[s]')) assert_equal( np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), np.dtype(mM+'8[7s]')) # timedelta <op> timedelta raises when there is no reasonable gcd assert_raises(TypeError, np.promote_types, np.dtype('m8[Y]'), np.dtype('m8[D]')) assert_raises(TypeError, np.promote_types, np.dtype('m8[M]'), np.dtype('m8[W]')) # timedelta and float cannot be safely cast with each other assert_raises(TypeError, np.promote_types, "float32", "m8") assert_raises(TypeError, np.promote_types, "m8", "float32") assert_raises(TypeError, np.promote_types, "uint64", "m8") assert_raises(TypeError, np.promote_types, "m8", "uint64") # timedelta <op> timedelta may overflow with big unit ranges assert_raises(OverflowError, np.promote_types, np.dtype('m8[W]'), np.dtype('m8[fs]')) assert_raises(OverflowError, np.promote_types, np.dtype('m8[s]'), np.dtype('m8[as]')) def test_cast_overflow(self): # gh-4486 def cast(): numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]") assert_raises(OverflowError, cast) def cast2(): numpy.datetime64("2014").astype("<M8[fs]") assert_raises(OverflowError, cast2) def test_pyobject_roundtrip(self): # All datetime types should be able to roundtrip through object a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176], dtype=np.int64) # With date units for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']: b = a.copy().view(dtype=unit) b[0] = '-0001-01-01' b[1] = '-0001-12-31' b[2] = '0000-01-01' b[3] = '0001-01-01' b[4] = '1969-12-31' b[5] = '1970-01-01' b[6] = '9999-12-31' b[7] = '10000-01-01' b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, "Error roundtripping unit %s" % unit) # With time units for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: b = a.copy().view(dtype=unit) b[0] = '-0001-01-01T00' b[1] = '-0001-12-31T00' b[2] = '0000-01-01T00' b[3] = '0001-01-01T00' b[4] = '1969-12-31T23:59:59.999999' b[5] = '1970-01-01T00' b[6] = '9999-12-31T23:59:59.999999' b[7] = '10000-01-01T00' b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, "Error roundtripping unit %s" % unit) def test_month_truncation(self): # Make sure that months are truncating correctly assert_equal(np.array('1945-03-01', dtype='M8[M]'), np.array('1945-03-31', dtype='M8[M]')) assert_equal(np.array('1969-11-01', dtype='M8[M]'), np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1969-12-01', dtype='M8[M]'), np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1970-01-01', dtype='M8[M]'), np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1980-02-01', dtype='M8[M]'), np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]')) def test_different_unit_comparison(self): # Check some years with date units for unit1 in ['Y', 'M', 'D']: dt1 = np.dtype('M8[%s]' % unit1) for unit2 in ['Y', 'M', 'D']: dt2 = np.dtype('M8[%s]' % unit2) assert_equal(np.array('1945', dtype=dt1), np.array('1945', dtype=dt2)) assert_equal(np.array('1970', dtype=dt1), np.array('1970', dtype=dt2)) assert_equal(np.array('9999', dtype=dt1), np.array('9999', dtype=dt2)) assert_equal(np.array('10000', dtype=dt1), np.array('10000-01-01', dtype=dt2)) assert_equal(np.datetime64('1945', unit1), np.datetime64('1945', unit2)) assert_equal(np.datetime64('1970', unit1), np.datetime64('1970', unit2)) assert_equal(np.datetime64('9999', unit1), np.datetime64('9999', unit2)) assert_equal(np.datetime64('10000', unit1), np.datetime64('10000-01-01', unit2)) # Check some datetimes with time units for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']: dt1 = np.dtype('M8[%s]' % unit1) for unit2 in ['h', 'm', 's', 'ms', 'us']: dt2 = np.dtype('M8[%s]' % unit2) assert_equal(np.array('1945-03-12T18', dtype=dt1), np.array('1945-03-12T18', dtype=dt2)) assert_equal(np.array('1970-03-12T18', dtype=dt1), np.array('1970-03-12T18', dtype=dt2)) assert_equal(np.array('9999-03-12T18', dtype=dt1), np.array('9999-03-12T18', dtype=dt2)) assert_equal(np.array('10000-01-01T00', dtype=dt1), np.array('10000-01-01T00', dtype=dt2)) assert_equal(np.datetime64('1945-03-12T18', unit1), np.datetime64('1945-03-12T18', unit2)) assert_equal(np.datetime64('1970-03-12T18', unit1), np.datetime64('1970-03-12T18', unit2)) assert_equal(np.datetime64('9999-03-12T18', unit1), np.datetime64('9999-03-12T18', unit2)) assert_equal(np.datetime64('10000-01-01T00', unit1), np.datetime64('10000-01-01T00', unit2)) # Check some days with units that won't overflow for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']: dt1 = np.dtype('M8[%s]' % unit1) for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']: dt2 = np.dtype('M8[%s]' % unit2) assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1), np.array('1932-02-17T00:00:00', dtype='M').astype(dt2), casting='unsafe')) assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1), np.array('10000-04-27T00:00:00', dtype='M').astype(dt2), casting='unsafe')) # Shouldn't be able to compare datetime and timedelta # TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by # default is needed to properly catch this kind of thing... a = np.array('2012-12-21', dtype='M8[D]') b = np.array(3, dtype='m8[D]') #assert_raises(TypeError, np.less, a, b) assert_raises(TypeError, np.less, a, b, casting='same_kind') def test_datetime_like(self): a = np.array([3], dtype='m8[4D]') b = np.array(['2012-12-21'], dtype='M8[D]') assert_equal(np.ones_like(a).dtype, a.dtype) assert_equal(np.zeros_like(a).dtype, a.dtype) assert_equal(np.empty_like(a).dtype, a.dtype) assert_equal(np.ones_like(b).dtype, b.dtype) assert_equal(np.zeros_like(b).dtype, b.dtype) assert_equal(np.empty_like(b).dtype, b.dtype) def test_datetime_unary(self): for tda, tdb, tdzero, tdone, tdmone in \ [ # One-dimensional arrays (np.array([3], dtype='m8[D]'), np.array([-3], dtype='m8[D]'), np.array([0], dtype='m8[D]'), np.array([1], dtype='m8[D]'), np.array([-1], dtype='m8[D]')), # NumPy scalars (np.timedelta64(3, '[D]'), np.timedelta64(-3, '[D]'), np.timedelta64(0, '[D]'), np.timedelta64(1, '[D]'), np.timedelta64(-1, '[D]'))]: # negative ufunc assert_equal(-tdb, tda) assert_equal((-tdb).dtype, tda.dtype) assert_equal(np.negative(tdb), tda) assert_equal(np.negative(tdb).dtype, tda.dtype) # positive ufunc assert_equal(np.positive(tda), tda) assert_equal(np.positive(tda).dtype, tda.dtype) assert_equal(np.positive(tdb), tdb) assert_equal(np.positive(tdb).dtype, tdb.dtype) # absolute ufunc assert_equal(np.absolute(tdb), tda) assert_equal(np.absolute(tdb).dtype, tda.dtype) # sign ufunc assert_equal(np.sign(tda), tdone) assert_equal(np.sign(tdb), tdmone) assert_equal(np.sign(tdzero), tdzero) assert_equal(np.sign(tda).dtype, tda.dtype) # The ufuncs always produce native-endian results assert_ def test_datetime_add(self): for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ [ # One-dimensional arrays (np.array(['2012-12-21'], dtype='M8[D]'), np.array(['2012-12-24'], dtype='M8[D]'), np.array(['2012-12-21T11'], dtype='M8[h]'), np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), np.array([3*24 + 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), np.datetime64('2012-12-21T11', '[h]'), np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), np.timedelta64(3*24 + 11, '[h]'))]: # m8 + m8 assert_equal(tda + tdb, tdc) assert_equal((tda + tdb).dtype, np.dtype('m8[h]')) # m8 + bool assert_equal(tdb + True, tdb + 1) assert_equal((tdb + True).dtype, np.dtype('m8[h]')) # m8 + int assert_equal(tdb + 3*24, tdc) assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]')) # bool + m8 assert_equal(False + tdb, tdb) assert_equal((False + tdb).dtype, np.dtype('m8[h]')) # int + m8 assert_equal(3*24 + tdb, tdc) assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]')) # M8 + bool assert_equal(dta + True, dta + 1) assert_equal(dtnat + True, dtnat) assert_equal((dta + True).dtype, np.dtype('M8[D]')) # M8 + int assert_equal(dta + 3, dtb) assert_equal(dtnat + 3, dtnat) assert_equal((dta + 3).dtype, np.dtype('M8[D]')) # bool + M8 assert_equal(False + dta, dta) assert_equal(False + dtnat, dtnat) assert_equal((False + dta).dtype, np.dtype('M8[D]')) # int + M8 assert_equal(3 + dta, dtb) assert_equal(3 + dtnat, dtnat) assert_equal((3 + dta).dtype, np.dtype('M8[D]')) # M8 + m8 assert_equal(dta + tda, dtb) assert_equal(dtnat + tda, dtnat) assert_equal((dta + tda).dtype, np.dtype('M8[D]')) # m8 + M8 assert_equal(tda + dta, dtb) assert_equal(tda + dtnat, dtnat) assert_equal((tda + dta).dtype, np.dtype('M8[D]')) # In M8 + m8, the result goes to higher precision assert_equal(np.add(dta, tdb, casting='unsafe'), dtc) assert_equal(np.add(dta, tdb, casting='unsafe').dtype, np.dtype('M8[h]')) assert_equal(np.add(tdb, dta, casting='unsafe'), dtc) assert_equal(np.add(tdb, dta, casting='unsafe').dtype, np.dtype('M8[h]')) # M8 + M8 assert_raises(TypeError, np.add, dta, dtb) def test_datetime_subtract(self): for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \ [ # One-dimensional arrays (np.array(['2012-12-21'], dtype='M8[D]'), np.array(['2012-12-24'], dtype='M8[D]'), np.array(['1940-12-24'], dtype='M8[D]'), np.array(['1940-12-24T00'], dtype='M8[h]'), np.array(['1940-12-23T13'], dtype='M8[h]'), np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), np.array([3*24 - 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), np.datetime64('1940-12-24', '[D]'), np.datetime64('1940-12-24T00', '[h]'), np.datetime64('1940-12-23T13', '[h]'), np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), np.timedelta64(3*24 - 11, '[h]'))]: # m8 - m8 assert_equal(tda - tdb, tdc) assert_equal((tda - tdb).dtype, np.dtype('m8[h]')) assert_equal(tdb - tda, -tdc) assert_equal((tdb - tda).dtype, np.dtype('m8[h]')) # m8 - bool assert_equal(tdc - True, tdc - 1) assert_equal((tdc - True).dtype, np.dtype('m8[h]')) # m8 - int assert_equal(tdc - 3*24, -tdb) assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]')) # int - m8 assert_equal(False - tdb, -tdb) assert_equal((False - tdb).dtype, np.dtype('m8[h]')) # int - m8 assert_equal(3*24 - tdb, tdc) assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]')) # M8 - bool assert_equal(dtb - True, dtb - 1) assert_equal(dtnat - True, dtnat) assert_equal((dtb - True).dtype, np.dtype('M8[D]')) # M8 - int assert_equal(dtb - 3, dta) assert_equal(dtnat - 3, dtnat) assert_equal((dtb - 3).dtype, np.dtype('M8[D]')) # M8 - m8 assert_equal(dtb - tda, dta) assert_equal(dtnat - tda, dtnat) assert_equal((dtb - tda).dtype, np.dtype('M8[D]')) # In M8 - m8, the result goes to higher precision assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte) assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype, np.dtype('M8[h]')) # M8 - M8 with different goes to higher precision assert_equal(np.subtract(dtc, dtd, casting='unsafe'), np.timedelta64(0, 'h')) assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype, np.dtype('m8[h]')) assert_equal(np.subtract(dtd, dtc, casting='unsafe'), np.timedelta64(0, 'h')) assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype, np.dtype('m8[h]')) # m8 - M8 assert_raises(TypeError, np.subtract, tda, dta) # bool - M8 assert_raises(TypeError, np.subtract, False, dta) # int - M8 assert_raises(TypeError, np.subtract, 3, dta) def test_datetime_multiply(self): for dta, tda, tdb, tdc in \ [ # One-dimensional arrays (np.array(['2012-12-21'], dtype='M8[D]'), np.array([6], dtype='m8[h]'), np.array([9], dtype='m8[h]'), np.array([12], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.timedelta64(6, '[h]'), np.timedelta64(9, '[h]'), np.timedelta64(12, '[h]'))]: # m8 * int assert_equal(tda * 2, tdc) assert_equal((tda * 2).dtype, np.dtype('m8[h]')) # int * m8 assert_equal(2 * tda, tdc) assert_equal((2 * tda).dtype, np.dtype('m8[h]')) # m8 * float assert_equal(tda * 1.5, tdb) assert_equal((tda * 1.5).dtype, np.dtype('m8[h]')) # float * m8 assert_equal(1.5 * tda, tdb) assert_equal((1.5 * tda).dtype, np.dtype('m8[h]')) # m8 * m8 assert_raises(TypeError, np.multiply, tda, tdb) # m8 * M8 assert_raises(TypeError, np.multiply, dta, tda) # M8 * m8 assert_raises(TypeError, np.multiply, tda, dta) # M8 * int assert_raises(TypeError, np.multiply, dta, 2) # int * M8 assert_raises(TypeError, np.multiply, 2, dta) # M8 * float assert_raises(TypeError, np.multiply, dta, 1.5) # float * M8 assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") nat = np.timedelta64('NaT') def check(a, b, res): assert_equal(a * b, res) assert_equal(b * a, res) for tp in (int, float): check(nat, tp(2), nat) check(nat, tp(0), nat) for f in (float('inf'), float('nan')): check(np.timedelta64(1), f, nat) check(np.timedelta64(0), f, nat) check(nat, f, nat) @pytest.mark.parametrize("op1, op2, exp", [ # m8 same units round down (np.timedelta64(7, 's'), np.timedelta64(4, 's'), 1), # m8 same units round down with negative (np.timedelta64(7, 's'), np.timedelta64(-4, 's'), -2), # m8 same units negative no round down (np.timedelta64(8, 's'), np.timedelta64(-4, 's'), -2), # m8 different units (np.timedelta64(1, 'm'), np.timedelta64(31, 's'), 1), # m8 generic units (np.timedelta64(1890), np.timedelta64(31), 60), # Y // M works (np.timedelta64(2, 'Y'), np.timedelta64('13', 'M'), 1), # handle 1D arrays (np.array([1, 2, 3], dtype='m8'), np.array([2], dtype='m8'), np.array([0, 1, 1], dtype=np.int64)), ]) def test_timedelta_floor_divide(self, op1, op2, exp): assert_equal(op1 // op2, exp) @pytest.mark.parametrize("op1, op2", [ # div by 0 (np.timedelta64(10, 'us'), np.timedelta64(0, 'us')), # div with NaT (np.timedelta64('NaT'), np.timedelta64(50, 'us')), # special case for int64 min # in integer floor division (np.timedelta64(np.iinfo(np.int64).min), np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): with assert_warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @pytest.mark.parametrize("val1, val2", [ # the smallest integer that can't be represented # exactly in a double should be preserved if we avoid # casting to double in floordiv operation (9007199254740993, 1), # stress the alternate floordiv code path where # operand signs don't match and remainder isn't 0 (9007199254740999, -2), ]) def test_timedelta_floor_div_precision(self, val1, val2): op1 = np.timedelta64(val1) op2 = np.timedelta64(val2) actual = op1 // op2 # Python reference integer floor expected = val1 // val2 assert_equal(actual, expected) @pytest.mark.parametrize("val1, val2", [ # years and months sometimes can't be unambiguously # divided for floor division operation (np.timedelta64(7, 'Y'), np.timedelta64(3, 's')), (np.timedelta64(7, 'M'), np.timedelta64(1, 'D')), ]) def test_timedelta_floor_div_error(self, val1, val2): with assert_raises_regex(TypeError, "common metadata divisor"): val1 // val2 @pytest.mark.parametrize("op1, op2", [ # reuse the test cases from floordiv (np.timedelta64(7, 's'), np.timedelta64(4, 's')), # m8 same units round down with negative (np.timedelta64(7, 's'), np.timedelta64(-4, 's')), # m8 same units negative no round down (np.timedelta64(8, 's'), np.timedelta64(-4, 's')), # m8 different units (np.timedelta64(1, 'm'), np.timedelta64(31, 's')), # m8 generic units (np.timedelta64(1890), np.timedelta64(31)), # Y // M works (np.timedelta64(2, 'Y'), np.timedelta64('13', 'M')), # handle 1D arrays (np.array([1, 2, 3], dtype='m8'), np.array([2], dtype='m8')), ]) def test_timedelta_divmod(self, op1, op2): expected = (op1 // op2, op1 % op2) assert_equal(divmod(op1, op2), expected) @pytest.mark.parametrize("op1, op2", [ # reuse cases from floordiv # div by 0 (np.timedelta64(10, 'us'), np.timedelta64(0, 'us')), # div with NaT (np.timedelta64('NaT'), np.timedelta64(50, 'us')), # special case for int64 min # in integer floor division (np.timedelta64(np.iinfo(np.int64).min), np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): with assert_warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) with assert_warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) def test_datetime_divide(self): for dta, tda, tdb, tdc, tdd in \ [ # One-dimensional arrays (np.array(['2012-12-21'], dtype='M8[D]'), np.array([6], dtype='m8[h]'), np.array([9], dtype='m8[h]'), np.array([12], dtype='m8[h]'), np.array([6], dtype='m8[m]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.timedelta64(6, '[h]'), np.timedelta64(9, '[h]'), np.timedelta64(12, '[h]'), np.timedelta64(6, '[m]'))]: # m8 / int assert_equal(tdc / 2, tda) assert_equal((tdc / 2).dtype, np.dtype('m8[h]')) # m8 / float assert_equal(tda / 0.5, tdc) assert_equal((tda / 0.5).dtype, np.dtype('m8[h]')) # m8 / m8 assert_equal(tda / tdb, 6.0 / 9.0) assert_equal(np.divide(tda, tdb), 6.0 / 9.0) assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0) assert_equal(tdb / tda, 9.0 / 6.0) assert_equal((tda / tdb).dtype, np.dtype('f8')) assert_equal(tda / tdd, 60.0) assert_equal(tdd / tda, 1.0 / 60.0) # int / m8 assert_raises(TypeError, np.divide, 2, tdb) # float / m8 assert_raises(TypeError, np.divide, 0.5, tdb) # m8 / M8 assert_raises(TypeError, np.divide, dta, tda) # M8 / m8 assert_raises(TypeError, np.divide, tda, dta) # M8 / int assert_raises(TypeError, np.divide, dta, 2) # int / M8 assert_raises(TypeError, np.divide, 2, dta) # M8 / float assert_raises(TypeError, np.divide, dta, 1.5) # float / M8 assert_raises(TypeError, np.divide, 1.5, dta) # NaTs with suppress_warnings() as sup: sup.filter(RuntimeWarning, r".*encountered in true\_divide") nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) assert_equal(np.timedelta64(0) / tp(0), nat) assert_equal(nat / tp(0), nat) assert_equal(nat / tp(2), nat) # Division by inf assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0)) assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0)) assert_equal(nat / float('inf'), nat) # Division by nan assert_equal(np.timedelta64(1) / float('nan'), nat) assert_equal(np.timedelta64(0) / float('nan'), nat) assert_equal(nat / float('nan'), nat) def test_datetime_compare(self): # Test all the comparison operators a = np.datetime64('2000-03-12T18:00:00.000000') b = np.array(['2000-03-12T18:00:00.000000', '2000-03-12T17:59:59.999999', '2000-03-12T18:00:00.000001', '1970-01-11T12:00:00.909090', '2016-01-11T12:00:00.909090'], dtype='datetime64[us]') assert_equal(np.equal(a, b), [1, 0, 0, 0, 0]) assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1]) assert_equal(np.less(a, b), [0, 0, 1, 0, 1]) assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1]) assert_equal(np.greater(a, b), [0, 1, 0, 1, 0]) assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0]) def test_datetime_compare_nat(self): dt_nat = np.datetime64('NaT', 'D') dt_other = np.datetime64('2000-01-01') td_nat = np.timedelta64('NaT', 'h') td_other = np.timedelta64(1, 'h') for op in [np.equal, np.less, np.less_equal, np.greater, np.greater_equal]: assert_(not op(dt_nat, dt_nat)) assert_(not op(dt_nat, dt_other)) assert_(not op(dt_other, dt_nat)) assert_(not op(td_nat, td_nat)) assert_(not op(td_nat, td_other)) assert_(not op(td_other, td_nat)) assert_(np.not_equal(dt_nat, dt_nat)) assert_(np.not_equal(dt_nat, dt_other)) assert_(np.not_equal(dt_other, dt_nat)) assert_(np.not_equal(td_nat, td_nat)) assert_(np.not_equal(td_nat, td_other)) assert_(np.not_equal(td_other, td_nat)) def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata a = np.array('1999-03-12T13', dtype='M8[2m]') b = np.array('1999-03-12T12', dtype='M8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.fmin(a, b), b) assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.maximum(a, b), a) assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.fmax(a, b), a) assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # Interaction with NaT a = np.array('1999-03-12T13', dtype='M8[2m]') dtnat = np.array('NaT', dtype='M8[h]') assert_equal(np.minimum(a, dtnat), dtnat) assert_equal(np.minimum(dtnat, a), dtnat) assert_equal(np.maximum(a, dtnat), dtnat) assert_equal(np.maximum(dtnat, a), dtnat) assert_equal(np.fmin(dtnat, a), a) assert_equal(np.fmin(a, dtnat), a) assert_equal(np.fmax(dtnat, a), a) assert_equal(np.fmax(a, dtnat), a) # Also do timedelta a = np.array(3, dtype='m8[h]') b = np.array(3*3600 - 3, dtype='m8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmin(a, b), b) assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.maximum(a, b), a) assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmax(a, b), a) assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # should raise between datetime and timedelta # # TODO: Allowing unsafe casting by # default in ufuncs strikes again... :( a = np.array(3, dtype='m8[h]') b = np.array('1999-03-12T12', dtype='M8[s]') #assert_raises(TypeError, np.minimum, a, b) #assert_raises(TypeError, np.maximum, a, b) #assert_raises(TypeError, np.fmin, a, b) #assert_raises(TypeError, np.fmax, a, b) assert_raises(TypeError, np.minimum, a, b, casting='same_kind') assert_raises(TypeError, np.maximum, a, b, casting='same_kind') assert_raises(TypeError, np.fmin, a, b, casting='same_kind') assert_raises(TypeError, np.fmax, a, b, casting='same_kind') def test_hours(self): t = np.ones(3, dtype='M8[s]') t[0] = 60*60*24 + 60*60*10 assert_(t[0].item().hour == 10) def test_divisor_conversion_year(self): assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]')) assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]')) assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]')) def test_divisor_conversion_month(self): assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]')) assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]')) assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]')) def test_divisor_conversion_week(self): assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]')) assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]')) assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]')) def test_divisor_conversion_day(self): assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]')) assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]')) assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]')) def test_divisor_conversion_hour(self): assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]')) assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]')) def test_divisor_conversion_minute(self): assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]')) assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]')) def test_divisor_conversion_second(self): assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]')) assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]')) def test_divisor_conversion_fs(self): assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]')) assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]')) def test_divisor_conversion_as(self): assert_raises(ValueError, lambda: np.dtype('M8[as/10]')) def test_string_parser_variants(self): # Allow space instead of 'T' between date and time assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]'))) # Allow positive years assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]'))) # Allow negative years assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]'))) # UTC specifier with assert_warns(DeprecationWarning): assert_equal( np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) with assert_warns(DeprecationWarning): assert_equal( np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) # Time zone offset with assert_warns(DeprecationWarning): assert_equal( np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')), np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]'))) with assert_warns(DeprecationWarning): assert_equal( np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')), np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]'))) with assert_warns(DeprecationWarning): assert_equal( np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')), np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]'))) with assert_warns(DeprecationWarning): assert_equal(np.datetime64('1977-03-02T12:30-0230'), np.datetime64('1977-03-02T15:00')) def test_string_parser_error_check(self): # Arbitrary bad string assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]')) # Character after year must be '-' assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]')) # Cannot have trailing '-' assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]')) # Month must be in range [1,12] assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]')) # Month must have two digits assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]')) # 'Mor' is not a valid month assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]')) # Cannot have trailing '-' assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]')) # Day must be in range [1,len(month)] assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]')) # Cannot have trailing characters assert_raises(ValueError, np.array, ['1980-02-03%'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03 q'], np.dtype('M8[us]')) # Hours must be in range [0, 23] assert_raises(ValueError, np.array, ['1980-02-03 25'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03T25'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03 24:01'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03T24:01'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03 -1'], np.dtype('M8[us]')) # No trailing ':' assert_raises(ValueError, np.array, ['1980-02-03 01:'], np.dtype('M8[us]')) # Minutes must be in range [0, 59] assert_raises(ValueError, np.array, ['1980-02-03 01:-1'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03 01:60'], np.dtype('M8[us]')) # No trailing ':' assert_raises(ValueError, np.array, ['1980-02-03 01:60:'], np.dtype('M8[us]')) # Seconds must be in range [0, 59] assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'], np.dtype('M8[us]')) assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'], np.dtype('M8[us]')) # Timezone offset must within a reasonable range with assert_warns(DeprecationWarning): assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'], np.dtype('M8[us]')) with assert_warns(DeprecationWarning): assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'], np.dtype('M8[us]')) with assert_warns(DeprecationWarning): assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'], np.dtype('M8[us]')) with assert_warns(DeprecationWarning): assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'], np.dtype('M8[us]')) with assert_warns(DeprecationWarning): assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'], np.dtype('M8[us]')) def test_creation_overflow(self): date = '1980-03-23 20:00:00' timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) for unit in ['ms', 'us', 'ns']: timesteps *= 1000 x = np.array([date], dtype='datetime64[%s]' % unit) assert_equal(timesteps, x[0].astype(np.int64), err_msg='Datetime conversion error for unit %s' % unit) assert_equal(x[0].astype(np.int64), 322689600000000000) # gh-13062 with pytest.raises(OverflowError): np.datetime64(2**64, 'D') with pytest.raises(OverflowError): np.timedelta64(2**64, 'D') def test_datetime_as_string(self): # Check all the units with default string conversion date = '1959-10-13' datetime = '1959-10-13T12:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')), '1959') assert_equal(np.datetime_as_string(np.datetime64(date, 'M')), '1959-10') assert_equal(np.datetime_as_string(np.datetime64(date, 'D')), '1959-10-13') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')), '1959-10-13T12') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')), '1959-10-13T12:34') assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')), '1959-10-13T12:34:56') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')), '1959-10-13T12:34:56.789') for us in ['us', 'μs', b'us']: # check non-ascii and bytes too assert_equal(np.datetime_as_string(np.datetime64(datetime, us)), '1959-10-13T12:34:56.789012') datetime = '1969-12-31T23:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), '1969-12-31T23:34:56.789012345') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), '1969-12-31T23:34:56.789012345678') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), '1969-12-31T23:34:56.789012345678901') datetime = '1969-12-31T23:59:57.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), datetime) datetime = '1970-01-01T00:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), '1970-01-01T00:34:56.789012345') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), '1970-01-01T00:34:56.789012345678') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), '1970-01-01T00:34:56.789012345678901') datetime = '1970-01-01T00:00:05.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), datetime) # String conversion with the unit= parameter a = np.datetime64('2032-07-18T12:23:34.123456', 'us') assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'), '2032') assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'), '2032-07') assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'), '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') assert_equal(np.datetime_as_string(a, unit='s'), '2032-07-18T12:23:34') assert_equal(np.datetime_as_string(a, unit='ms'), '2032-07-18T12:23:34.123') assert_equal(np.datetime_as_string(a, unit='us'), '2032-07-18T12:23:34.123456') assert_equal(np.datetime_as_string(a, unit='ns'), '2032-07-18T12:23:34.123456000') assert_equal(np.datetime_as_string(a, unit='ps'), '2032-07-18T12:23:34.123456000000') assert_equal(np.datetime_as_string(a, unit='fs'), '2032-07-18T12:23:34.123456000000000') assert_equal(np.datetime_as_string(a, unit='as'), '2032-07-18T12:23:34.123456000000000000') # unit='auto' parameter assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'), '2032-07-18T12:23:34.123456') assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'), '2032-07-18T12:23:34.120') assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'), '2032-07-18T12:23:34') assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'), '2032-07-18T12:23') # 'auto' doesn't split up hour and minute assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'), '2032-07-18T12:00') assert_equal(np.datetime_as_string( np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'), '2032-07-18') # 'auto' doesn't split up the date assert_equal(np.datetime_as_string( np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'), '2032-07-01') assert_equal(np.datetime_as_string( np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') assert_equal(np.datetime_as_string(a), '2010-03-15T06:30') assert_equal(np.datetime_as_string(a, timezone='naive'), '2010-03-15T06:30') assert_equal(np.datetime_as_string(a, timezone='UTC'), '2010-03-15T06:30Z') assert_(np.datetime_as_string(a, timezone='local') != '2010-03-15T06:30') b = np.datetime64('2010-02-15T06:30', 'm') assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), '2010-03-15T01:30-0500') assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), '2010-03-15T02:30-0400') assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), '2010-03-14T23:30-0700') assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), '2010-02-15T00:30-0600') assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), '2010-02-15T01:30-0500') assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', timezone=tz('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', timezone=tz('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', timezone=tz('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): # With two datetimes provided as strings a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]') assert_equal(a.dtype, np.dtype('M8[D]')) assert_equal(a, np.array(['2010-01-05', '2010-01-06', '2010-01-07', '2010-01-08', '2010-01-09'], dtype='M8[D]')) a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]') assert_equal(a.dtype, np.dtype('M8[D]')) assert_equal(a, np.array(['1950-02-10', '1950-02-09', '1950-02-08', '1950-02-07'], dtype='M8[D]')) # Unit should be detected as months here a = np.arange('1969-05', '1970-05', 2, dtype='M8') assert_equal(a.dtype, np.dtype('M8[M]')) assert_equal(a, np.datetime64('1969-05') + np.arange(12, step=2)) # datetime, integer|timedelta works as well # produces arange (start, start + stop) in this case a = np.arange('1969', 18, 3, dtype='M8') assert_equal(a.dtype, np.dtype('M8[Y]')) assert_equal(a, np.datetime64('1969') + np.arange(18, step=3)) a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8') assert_equal(a.dtype, np.dtype('M8[D]')) assert_equal(a, np.datetime64('1969-12-19') + np.arange(22, step=2)) # Step of 0 is disallowed assert_raises(ValueError, np.arange, np.datetime64('today'), np.datetime64('today') + 3, 0) # Promotion across nonlinear unit boundaries is disallowed assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'), np.timedelta64(5, 'M')) assert_raises(TypeError, np.arange, np.datetime64('2012-02-03T14', 's'), np.timedelta64(5, 'Y')) def test_datetime_arange_no_dtype(self): d = np.array('2010-01-04', dtype="M8[D]") assert_equal(np.arange(d, d + 1), d) assert_raises(ValueError, np.arange, d) def test_timedelta_arange(self): a = np.arange(3, 10, dtype='m8') assert_equal(a.dtype, np.dtype('m8')) assert_equal(a, np.timedelta64(0) + np.arange(3, 10)) a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8') assert_equal(a.dtype, np.dtype('m8[s]')) assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2)) # Step of 0 is disallowed assert_raises(ValueError, np.arange, np.timedelta64(0), np.timedelta64(5), 0) # Promotion across nonlinear unit boundaries is disallowed assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'), np.timedelta64(5, 'M')) assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'), np.timedelta64(5, 'D')) @pytest.mark.parametrize("val1, val2, expected", [ # case from gh-12092 (np.timedelta64(7, 's'), np.timedelta64(3, 's'), np.timedelta64(1, 's')), # negative value cases (np.timedelta64(3, 's'), np.timedelta64(-2, 's'), np.timedelta64(-1, 's')), (np.timedelta64(-3, 's'), np.timedelta64(2, 's'), np.timedelta64(1, 's')), # larger value cases (np.timedelta64(17, 's'), np.timedelta64(22, 's'), np.timedelta64(17, 's')), (np.timedelta64(22, 's'), np.timedelta64(17, 's'), np.timedelta64(5, 's')), # different units (np.timedelta64(1, 'm'), np.timedelta64(57, 's'), np.timedelta64(3, 's')), (np.timedelta64(1, 'us'), np.timedelta64(727, 'ns'), np.timedelta64(273, 'ns')), # NaT is propagated (np.timedelta64('NaT'), np.timedelta64(50, 'ns'), np.timedelta64('NaT')), # Y % M works (np.timedelta64(2, 'Y'), np.timedelta64(22, 'M'), np.timedelta64(2, 'M')), ]) def test_timedelta_modulus(self, val1, val2, expected): assert_equal(val1 % val2, expected) @pytest.mark.parametrize("val1, val2", [ # years and months sometimes can't be unambiguously # divided for modulus operation (np.timedelta64(7, 'Y'), np.timedelta64(3, 's')), (np.timedelta64(7, 'M'), np.timedelta64(1, 'D')), ]) def test_timedelta_modulus_error(self, val1, val2): with assert_raises_regex(TypeError, "common metadata divisor"): val1 % val2 def test_timedelta_modulus_div_by_zero(self): with assert_warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) @pytest.mark.parametrize("val1, val2", [ # cases where one operand is not # timedelta64 (np.timedelta64(7, 'Y'), 15,), (7.5, np.timedelta64(1, 'D')), ]) def test_timedelta_modulus_type_resolution(self, val1, val2): # NOTE: some of the operations may be supported # in the future with assert_raises_regex(TypeError, "'remainder' cannot use operands with types"): val1 % val2 def test_timedelta_arange_no_dtype(self): d = np.array(5, dtype="m8[D]") assert_equal(np.arange(d, d + 1), d) assert_equal(np.arange(d), np.arange(0, d)) def test_datetime_maximum_reduce(self): a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]') assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]')) assert_equal(np.maximum.reduce(a), np.datetime64('2010-01-02')) a = np.array([1, 4, 0, 7, 2], dtype='m8[s]') assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]')) assert_equal(np.maximum.reduce(a), np.timedelta64(7, 's')) def test_datetime_busday_offset(self): # First Monday in June assert_equal( np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'), np.datetime64('2011-06-06')) # Last Monday in June assert_equal( np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), np.datetime64('2011-06-27')) assert_equal( np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), np.datetime64('2011-06-27')) # Default M-F business days, different roll modes assert_equal(np.busday_offset('2010-08', 0, roll='backward'), np.datetime64('2010-07-30')) assert_equal(np.busday_offset('2010-08', 0, roll='preceding'), np.datetime64('2010-07-30')) assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'), np.datetime64('2010-08-02')) assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'), np.datetime64('2010-08-02')) assert_equal(np.busday_offset('2010-08', 0, roll='forward'), np.datetime64('2010-08-02')) assert_equal(np.busday_offset('2010-08', 0, roll='following'), np.datetime64('2010-08-02')) assert_equal(np.busday_offset('2010-10-30', 0, roll='following'), np.datetime64('2010-11-01')) assert_equal( np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'), np.datetime64('2010-10-29')) assert_equal( np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'), np.datetime64('2010-10-29')) assert_equal( np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'), np.datetime64('2010-10-18')) assert_equal( np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'), np.datetime64('2010-10-15')) # roll='raise' by default assert_raises(ValueError, np.busday_offset, '2011-06-04', 0) # Bigger offset values assert_equal(np.busday_offset('2006-02-01', 25), np.datetime64('2006-03-08')) assert_equal(np.busday_offset('2006-03-08', -25), np.datetime64('2006-02-01')) assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'), np.datetime64('2007-04-07')) assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'), np.datetime64('2007-02-25')) # NaT values when roll is not raise assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'), np.datetime64('NaT')) assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'), np.datetime64('NaT')) assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), np.datetime64('NaT')) def test_datetime_busdaycalendar(self): # Check that it removes NaT, duplicates, and weekends # and sorts the result. bdd = np.busdaycalendar( holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT', '2011-12-26', '2011-05-30', '2011-01-17']) assert_equal(bdd.holidays, np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8')) # Default M-F weekmask assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?')) # Check string weekmask with varying whitespace. bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri") assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?')) # Check length 7 0/1 string bdd = np.busdaycalendar(weekmask="0011001") assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) # Check length 7 string weekmask. bdd = np.busdaycalendar(weekmask="Mon Tue") assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) # All-zeros weekmask should raise assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0]) # weekday names must be correct case assert_raises(ValueError, np.busdaycalendar, weekmask="satsun") # All-zeros weekmask should raise assert_raises(ValueError, np.busdaycalendar, weekmask="") # Invalid weekday name codes should raise assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We") assert_raises(ValueError, np.busdaycalendar, weekmask="Max") assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue") def test_datetime_busday_holidays_offset(self): # With exactly one holiday assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']), np.datetime64('2011-11-18')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']), np.datetime64('2011-11-10')) assert_equal( np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']), np.datetime64('2011-11-10')) assert_equal( np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']), np.datetime64('2011-11-04')) # With the holiday appearing twice assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-11-11', '2011-11-11']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['2011-11-11', '2011-11-11']), np.datetime64('2011-11-10')) # With a NaT holiday assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-11-11', 'NaT']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['NaT', '2011-11-11']), np.datetime64('2011-11-10')) # With another holiday after assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-11-11', '2011-11-24']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['2011-11-11', '2011-11-24']), np.datetime64('2011-11-10')) # With another holiday before assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-10-10', '2011-11-11']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['2011-10-10', '2011-11-11']), np.datetime64('2011-11-10')) # With another holiday before and after assert_equal( np.busday_offset('2011-11-10', 1, holidays=['2011-10-10', '2011-11-11', '2011-11-24']), np.datetime64('2011-11-14')) assert_equal( np.busday_offset('2011-11-14', -1, holidays=['2011-10-10', '2011-11-11', '2011-11-24']), np.datetime64('2011-11-10')) # A bigger forward jump across more than one week/holiday holidays = ['2011-10-10', '2011-11-11', '2011-11-24', '2011-12-25', '2011-05-30', '2011-02-21', '2011-12-26', '2012-01-02'] bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) assert_equal( np.busday_offset('2011-10-03', 4, holidays=holidays), np.busday_offset('2011-10-03', 4)) assert_equal( np.busday_offset('2011-10-03', 5, holidays=holidays), np.busday_offset('2011-10-03', 5 + 1)) assert_equal( np.busday_offset('2011-10-03', 27, holidays=holidays), np.busday_offset('2011-10-03', 27 + 1)) assert_equal( np.busday_offset('2011-10-03', 28, holidays=holidays), np.busday_offset('2011-10-03', 28 + 2)) assert_equal( np.busday_offset('2011-10-03', 35, holidays=holidays), np.busday_offset('2011-10-03', 35 + 2)) assert_equal( np.busday_offset('2011-10-03', 36, holidays=holidays), np.busday_offset('2011-10-03', 36 + 3)) assert_equal( np.busday_offset('2011-10-03', 56, holidays=holidays), np.busday_offset('2011-10-03', 56 + 3)) assert_equal( np.busday_offset('2011-10-03', 57, holidays=holidays), np.busday_offset('2011-10-03', 57 + 4)) assert_equal( np.busday_offset('2011-10-03', 60, holidays=holidays), np.busday_offset('2011-10-03', 60 + 4)) assert_equal( np.busday_offset('2011-10-03', 61, holidays=holidays), np.busday_offset('2011-10-03', 61 + 5)) assert_equal( np.busday_offset('2011-10-03', 61, busdaycal=bdd), np.busday_offset('2011-10-03', 61 + 5)) # A bigger backward jump across more than one week/holiday assert_equal( np.busday_offset('2012-01-03', -1, holidays=holidays), np.busday_offset('2012-01-03', -1 - 1)) assert_equal( np.busday_offset('2012-01-03', -4, holidays=holidays), np.busday_offset('2012-01-03', -4 - 1)) assert_equal( np.busday_offset('2012-01-03', -5, holidays=holidays), np.busday_offset('2012-01-03', -5 - 2)) assert_equal( np.busday_offset('2012-01-03', -25, holidays=holidays), np.busday_offset('2012-01-03', -25 - 2)) assert_equal( np.busday_offset('2012-01-03', -26, holidays=holidays), np.busday_offset('2012-01-03', -26 - 3)) assert_equal( np.busday_offset('2012-01-03', -33, holidays=holidays), np.busday_offset('2012-01-03', -33 - 3)) assert_equal( np.busday_offset('2012-01-03', -34, holidays=holidays), np.busday_offset('2012-01-03', -34 - 4)) assert_equal( np.busday_offset('2012-01-03', -56, holidays=holidays), np.busday_offset('2012-01-03', -56 - 4)) assert_equal( np.busday_offset('2012-01-03', -57, holidays=holidays), np.busday_offset('2012-01-03', -57 - 5)) assert_equal( np.busday_offset('2012-01-03', -57, busdaycal=bdd), np.busday_offset('2012-01-03', -57 - 5)) # Can't supply both a weekmask/holidays and busdaycal assert_raises(ValueError, np.busday_offset, '2012-01-03', -15, weekmask='1111100', busdaycal=bdd) assert_raises(ValueError, np.busday_offset, '2012-01-03', -15, holidays=holidays, busdaycal=bdd) # Roll with the holidays assert_equal( np.busday_offset('2011-12-25', 0, roll='forward', holidays=holidays), np.datetime64('2011-12-27')) assert_equal( np.busday_offset('2011-12-26', 0, roll='forward', holidays=holidays), np.datetime64('2011-12-27')) assert_equal( np.busday_offset('2011-12-26', 0, roll='backward', holidays=holidays), np.datetime64('2011-12-23')) assert_equal( np.busday_offset('2012-02-27', 0, roll='modifiedfollowing', holidays=['2012-02-27', '2012-02-26', '2012-02-28', '2012-03-01', '2012-02-29']), np.datetime64('2012-02-24')) assert_equal( np.busday_offset('2012-03-06', 0, roll='modifiedpreceding', holidays=['2012-03-02', '2012-03-03', '2012-03-01', '2012-03-05', '2012-03-07', '2012-03-06']), np.datetime64('2012-03-08')) def test_datetime_busday_holidays_count(self): holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30', '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10'] bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) # Validate against busday_offset broadcast against # a range of offsets dates = np.busday_offset('2011-01-01', np.arange(366), roll='forward', busdaycal=bdd) assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd), np.arange(366)) # Returns negative value when reversed assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd), -np.arange(366)) dates = np.busday_offset('2011-12-31', -np.arange(366), roll='forward', busdaycal=bdd) assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd), np.arange(366)) # Returns negative value when reversed assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd), -np.arange(366)) # Can't supply both a weekmask/holidays and busdaycal assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03', weekmask='1111100', busdaycal=bdd) assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03', holidays=holidays, busdaycal=bdd) # Number of Mondays in March 2011 assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4) # Returns negative value when reversed assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4) def test_datetime_is_busday(self): holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30', '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10', 'NaT'] bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) # Weekend/weekday tests assert_equal(np.is_busday('2011-01-01'), False) assert_equal(np.is_busday('2011-01-02'), False) assert_equal(np.is_busday('2011-01-03'), True) # All the holidays are not business days assert_equal(np.is_busday(holidays, busdaycal=bdd), np.zeros(len(holidays), dtype='?')) def test_datetime_y2038(self): # Test parsing on either side of the Y2038 boundary a = np.datetime64('2038-01-19T03:14:07') assert_equal(a.view(np.int64), 2**31 - 1) a = np.datetime64('2038-01-19T03:14:08') assert_equal(a.view(np.int64), 2**31) # Test parsing on either side of the Y2038 boundary with # a manually specified timezone offset with assert_warns(DeprecationWarning): a = np.datetime64('2038-01-19T04:14:07+0100') assert_equal(a.view(np.int64), 2**31 - 1) with assert_warns(DeprecationWarning): a = np.datetime64('2038-01-19T04:14:08+0100') assert_equal(a.view(np.int64), 2**31) # Test parsing a date after Y2038 a = np.datetime64('2038-01-20T13:21:14') assert_equal(str(a), '2038-01-20T13:21:14') def test_isnat(self): assert_(np.isnat(np.datetime64('NaT', 'ms'))) assert_(np.isnat(np.datetime64('NaT', 'ns'))) assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07'))) assert_(np.isnat(np.timedelta64('NaT', "ms"))) assert_(not np.isnat(np.timedelta64(34, "ms"))) res = np.array([False, False, True]) for unit in ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as']: arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit) assert_equal(np.isnat(arr), res) arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit) assert_equal(np.isnat(arr), res) arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit) assert_equal(np.isnat(arr), res) arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit) assert_equal(np.isnat(arr), res) def test_isnat_error(self): # Test that only datetime dtype arrays are accepted for t in np.typecodes["All"]: if t in np.typecodes["Datetime"]: continue assert_raises(TypeError, np.isnat, np.zeros(10, t)) def test_isfinite_scalar(self): assert_(not np.isfinite(np.datetime64('NaT', 'ms'))) assert_(not np.isfinite(np.datetime64('NaT', 'ns'))) assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07'))) assert_(not np.isfinite(np.timedelta64('NaT', "ms"))) assert_(np.isfinite(np.timedelta64(34, "ms"))) @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as']) @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]', '<timedelta64[%s]', '>timedelta64[%s]']) def test_isfinite_isinf_isnan_units(self, unit, dstr): '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes ''' arr_val = [123, -321, "NaT"] arr = np.array(arr_val, dtype= dstr % unit) pos = np.array([True, True, False]) neg = np.array([False, False, True]) false = np.array([False, False, False]) assert_equal(np.isfinite(arr), pos) assert_equal(np.isinf(arr), false) assert_equal(np.isnan(arr), neg) def test_assert_equal(self): assert_raises(AssertionError, assert_equal, np.datetime64('nat'), np.timedelta64('nat')) def test_corecursive_input(self): # construct a co-recursive list a, b = [], [] a.append(b) b.append(a) obj_arr = np.array([None]) obj_arr[0] = a # At some point this caused a stack overflow (gh-11154). Now raises # ValueError since the nested list cannot be converted to a datetime. assert_raises(ValueError, obj_arr.astype, 'M8') assert_raises(ValueError, obj_arr.astype, 'm8') @pytest.mark.parametrize("shape", [(), (1,)]) def test_discovery_from_object_array(self, shape): arr = np.array("2020-10-10", dtype=object).reshape(shape) res = np.array("2020-10-10", dtype="M8").reshape(shape) assert res.dtype == np.dtype("M8[D]") assert_equal(arr.astype("M8"), res) arr[...] = np.bytes_("2020-10-10") # try a numpy string type assert_equal(arr.astype("M8"), res) arr = arr.astype("S") assert_equal(arr.astype("S").astype("M8"), res) @pytest.mark.parametrize("time_unit", [ "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", # compound units "10D", "2M", ]) def test_limit_symmetry(self, time_unit): """ Dates should have symmetric limits around the unix epoch at +/-np.int64 """ epoch = np.datetime64(0, time_unit) latest = np.datetime64(np.iinfo(np.int64).max, time_unit) earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) # above should not have overflowed assert earliest < epoch < latest @pytest.mark.parametrize("time_unit", [ "Y", "M", pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), ]) @pytest.mark.parametrize("sign", [-1, 1]) def test_limit_str_roundtrip(self, time_unit, sign): """ Limits should roundtrip when converted to strings. This tests the conversion to and from npy_datetimestruct. """ # TODO: add absolute (gold standard) time span limit strings limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) # Convert to string and back. Explicit unit needed since the day and # week reprs are not distinguishable. limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit class TestDateTimeData: def test_basic(self): a = np.array(['1980-03-23'], dtype=np.datetime64) assert_equal(np.datetime_data(a.dtype), ('D', 1)) def test_bytes(self): # byte units are converted to unicode dt = np.datetime64('2000', (b'ms', 5)) assert np.datetime_data(dt.dtype) == ('ms', 5) dt = np.datetime64('2000', b'5ms') assert np.datetime_data(dt.dtype) == ('ms', 5) def test_non_ascii(self): # μs is normalized to μ dt = np.datetime64('2000', ('μs', 5)) assert np.datetime_data(dt.dtype) == ('us', 5) dt = np.datetime64('2000', '5μs') assert np.datetime_data(dt.dtype) == ('us', 5)
py
b4146fd3ed1167d4dee9b3d0001f655d810dc60b
import argparse import logging import time import resource import numpy as np import faiss from preprocess_datasets import DATASETS, get_dataset def knn_ground_truth(X, k): print("knn_ground_truth queries size %s k=%d" % (X.shape, k)) t0 = time.time() _, d = X.shape index = faiss.IndexFlat(d, faiss.METRIC_L2) index.add(X) index.train(X) D, I = index.search(X, k) return D, I def usbin_write(ids, dist, fname): ids = np.ascontiguousarray(ids, dtype="int32") dist = np.ascontiguousarray(dist, dtype="float32") assert ids.shape == dist.shape f = open(fname, "wb") n, d = dist.shape np.array([n, d], dtype='int32').tofile(f) ids.tofile(f) dist.tofile(f) def write_data(X, fname): f = open(fname, "wb") X = np.ascontiguousarray(X, dtype="float64") X.tofile(f) if __name__ == "__main__": parser = argparse.ArgumentParser() def aa(*args, **kwargs): group.add_argument(*args, **kwargs) group = parser.add_argument_group('dataset options') aa('--dataset', choices=DATASETS.keys(), required=True) group = parser.add_argument_group('computation options') # determined from ds # aa('--range_search', action="store_true", help="do range search instead of kNN search") aa('--k', default=100, type=int, help="number of nearest kNN neighbors to search") args = parser.parse_args() ds = get_dataset(args.dataset, "gaussian") print(ds) for query_type in ('train', 'test', 'validation'): D, I = knn_ground_truth(np.array(ds[query_type]).astype(np.float32), k=args.k) print(f"writing index matrix of size {I.shape}") # write in the usbin format usbin_write(I, D, args.dataset + f".{query_type}.knn") write_data(np.array(ds[query_type]).astype(np.float64), args.dataset + f".{query_type}.data")
py
b4146fea07066fbaee27533e61c44195a8fa6956
""" WSGI config for havresac project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'havresac.settings') application = get_wsgi_application()
py
b4146ff49c3cb379e605d2cc8ed4287b596c8567
from collections import OrderedDict import numpy as np from gym.spaces import Dict , Box from metaworld.envs.env_util import get_stat_in_paths, \ create_stats_ordered_dict, get_asset_full_path from metaworld.core.multitask_env import MultitaskEnv from metaworld.envs.mujoco.ur3_xyz.ur3_base import UR3XYZEnv from metaworld.envs.mujoco.utils.rotation import euler2quat from metaworld.envs.env_util import quat_to_zangle, zangle_to_quat, quat_create, quat_mul class UR3PickAndPlaceEnv(UR3XYZEnv): def __init__( self, obj_low=None, obj_high=None, random_init=False, tasks = [{'goal': np.array([0.1, 0.8, 0.2]), 'obj_init_pos':np.array([0, 0.6, 0.02]), 'obj_init_angle': 0.3}], goal_low=None, goal_high=None, hand_init_pos = (0, 0.6, 0.2), liftThresh = 0.04, rewMode = 'orig', rotMode='rotz',#'fixed', **kwargs ): self.quick_init(locals()) # hand_low=(-0.5, 0.40, 0.05) # hand_high=(0.5, 1, 0.5) # obj_low=(-0.5, 0.40, 0.05) # obj_high=(0.5, 1, 0.5) hand_low=(-0.3, 0.35, 0.05) hand_high=(0.3, 0.65, 0.3) obj_low=(-0.3, 0.35, 0.05) obj_high=(0.3, 0.65, 0.3) UR3XYZEnv.__init__( self, frame_skip=5, action_scale=1./100, hand_low=hand_low, hand_high=hand_high, model_name=self.model_name, **kwargs ) if obj_low is None: obj_low = self.hand_low if goal_low is None: goal_low = self.hand_low if obj_high is None: obj_high = self.hand_high if goal_high is None: goal_high = self.hand_high self.random_init = random_init self.liftThresh = liftThresh self.max_path_length = 200#150 self.tasks = tasks self.num_tasks = len(tasks) self.rewMode = rewMode self.rotMode = rotMode self.hand_init_pos = np.array(hand_init_pos) if rotMode == 'fixed': self.action_space = Box( np.array([-1, -1, -1, -1]), np.array([1, 1, 1, 1]), ) elif rotMode == 'rotz': self.action_rot_scale = 1./50 self.action_space = Box( np.array([-1, -1, -1, -np.pi, -1]), np.array([1, 1, 1, np.pi, 1]), ) elif rotMode == 'quat': self.action_rot_scale = 1./10 #dscho mod self.action_space = Box( np.array([-1, -1, -1, 0, -1, -1, -1, -1]), np.array([1, 1, 1, 2*np.pi, 1, 1, 1, 1]), ) else: self.action_rot_scale = 1./10 #dscho mod self.action_space = Box( np.array([-1, -1, -1, -np.pi/2, -np.pi/2, 0, -1]), np.array([1, 1, 1, np.pi/2, np.pi/2, np.pi*2, 1]), ) self.hand_and_obj_space = Box( np.hstack((self.hand_low, obj_low)), np.hstack((self.hand_high, obj_high)), ) self.goal_space = Box(goal_low, goal_high) self.observation_space = Box( np.hstack((self.hand_low, obj_low, obj_low)), np.hstack((self.hand_high, obj_high, obj_high)), ) # self.observation_space = Dict([ # ('state_observation', self.hand_and_obj_space), # ('state_desired_goal', self.goal_space), # ('state_achieved_goal', self.goal_space), # ]) def get_goal(self): return { 'state_desired_goal': self._state_goal, } @property def model_name(self): return get_asset_full_path('ur3_xyz/ur3_pick_and_place.xml') def viewer_setup(self): # top view # self.viewer.cam.trackbodyid = 0 # self.viewer.cam.lookat[0] = 0 # self.viewer.cam.lookat[1] = 1.0 # self.viewer.cam.lookat[2] = 0.5 # self.viewer.cam.distance = 0.6 # self.viewer.cam.elevation = -45 # self.viewer.cam.azimuth = 270 # self.viewer.cam.trackbodyid = -1 # side view # self.viewer.cam.trackbodyid = 0 # self.viewer.cam.lookat[0] = 0.2 # self.viewer.cam.lookat[1] = 0.75 # self.viewer.cam.lookat[2] = 0.4 # self.viewer.cam.distance = 0.4 # self.viewer.cam.elevation = -55 # self.viewer.cam.azimuth = 180 # self.viewer.cam.trackbodyid = -1 v = self.viewer v.cam.trackbodyid = 0 v.cam.distance = self.model.stat.extent * 0.7 def step(self, action): # self.set_xyz_action_rot(action[:7]) if self.rotMode == 'euler': #ee pos xyz control + xyz rotation by euler action_ = np.zeros(7) action_[:3] = action[:3] action_[3:] = euler2quat(action[3:6]) self.set_xyz_action_rot(action_) elif self.rotMode == 'fixed': self.set_xyz_action(action[:3]) #ee pos xyz control elif self.rotMode == 'rotz': self.set_xyz_action_rotz(action[:4]) #ee pos xyz control + z rotation else: self.set_xyz_action_rot(action[:7]) #ee pos xyz control + xyz rotation by quat? 불확실 # self.do_simulation([action[-1], -action[-1]]) #gripper 여닫는거인듯 self.do_simulation([action[-1], action[-1]]) #gripper 여닫는거인듯 # The marker seems to get reset every time you do a simulation self._set_goal_marker(self._state_goal) ob = self._get_obs() obs_dict = self._get_obs_dict() reward , reachRew, reachDist, pickRew, placeRew , placingDist = self.compute_reward(action, obs_dict, mode = self.rewMode) self.curr_path_length +=1 #info = self._get_info() if self.curr_path_length == self.max_path_length: done = True else: done = False return ob, reward, done, { 'reachRew':reachRew, 'reachDist': reachDist, 'pickRew':pickRew, 'placeRew': placeRew, 'epRew' : reward, 'placingDist': placingDist} def _get_obs(self): hand = self.get_endeff_pos() objPos = self.data.get_geom_xpos('objGeom') flat_obs = np.concatenate((hand, objPos)) return np.concatenate([ flat_obs, self._state_goal ]) def _get_obs_dict(self): hand = self.get_endeff_pos() objPos = self.data.get_geom_xpos('objGeom') flat_obs = np.concatenate((hand, objPos)) return dict( state_observation=flat_obs, state_desired_goal=self._state_goal, state_achieved_goal=objPos, ) def _get_info(self): pass def _set_goal_marker(self, goal): """ This should be use ONLY for visualization. Use self._state_goal for logging, learning, etc. """ self.data.site_xpos[self.model.site_name2id('goal')] = ( goal[:3] ) def _set_objCOM_marker(self): """ This should be use ONLY for visualization. Use self._state_goal for logging, learning, etc. """ objPos = self.data.get_geom_xpos('objGeom') self.data.site_xpos[self.model.site_name2id('objSite')] = ( objPos ) def _set_obj_xyz(self, pos): # qpos = self.data.qpos.flat.copy() # qvel = self.data.qvel.flat.copy() # qpos[9:12] = pos.copy() #0 ~ 6 sawyer, 7~8 gripper, 9~11 object pos, 12~15 object quat # qvel[9:15] = 0 #0~6 sawyer, 7~8 gripper, 9~15 object vel, angvel # self.set_state(qpos, qvel) #dscho modified qpos = self.data.qpos.flat.copy() qvel = self.data.qvel.flat.copy() qpos[14:17] = pos.copy() #0 ~ 5 ur3, 6~9 grip_r, 10~13 grip_l, 14~16 object pos, 17~20 object quat qvel[14:20] = 0 #0~5 ur3, 6~9 grip_r, 10~13 grip_l, 14~19 object vel, angvel self.set_state(qpos, qvel) def sample_goals(self, batch_size): #Required by HER-TD3 goals = [] for i in range(batch_size): task = self.tasks[np.random.randint(0, self.num_tasks)] goals.append(task['goal']) return { 'state_desired_goal': goals, } def sample_task(self): task_idx = np.random.randint(0, self.num_tasks) return self.tasks[task_idx] def adjust_initObjPos(self, orig_init_pos): #This is to account for meshes for the geom and object are not aligned #If this is not done, the object could be initialized in an extreme position diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2] adjustedPos = orig_init_pos[:2] + diff #The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height return [adjustedPos[0], adjustedPos[1],self.data.get_geom_xpos('objGeom')[-1]] def reset_model(self): self._reset_hand() task = self.sample_task() self._state_goal = np.array(task['goal']) self.obj_init_pos = self.adjust_initObjPos(task['obj_init_pos']) self.obj_init_angle = task['obj_init_angle'] self.objHeight = self.data.get_geom_xpos('objGeom')[2] self.heightTarget = self.objHeight + self.liftThresh if self.random_init: goal_pos = np.random.uniform( self.hand_and_obj_space.low, self.hand_and_obj_space.high, size=(self.hand_and_obj_space.low.size), ) while np.linalg.norm(goal_pos[:3] - goal_pos[-3:]) < 0.1: goal_pos = np.random.uniform( self.hand_and_obj_space.low, self.hand_and_obj_space.high, size=(self.hand_and_obj_space.low.size), ) self._state_goal = goal_pos[:3] self.obj_init_pos = np.concatenate((goal_pos[-3:-1], np.array([self.obj_init_pos[-1]]))) self._set_goal_marker(self._state_goal) self._set_obj_xyz(self.obj_init_pos) #self._set_obj_xyz_quat(self.obj_init_pos, self.obj_init_angle) self.curr_path_length = 0 self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget #Can try changing this return self._get_obs() def _reset_hand(self): #10번씩 하는건 gripper 닫는 시간때문 for _ in range(10): self.data.set_mocap_pos('mocap', self.hand_init_pos) # self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0])) #w v 순인듯 # self.data.set_mocap_quat('mocap', np.array([1, 0, 0, 0])) #w v 순인듯 quat = quat_mul(quat_create(np.array([1., 0, 0]), np.pi) ,quat_create(np.array([0, 0, 1.]), np.pi/2)) #ref 기준 x축 180, z축 90순 # quat = quat_create(np.array([1., 0, 0]), -np.pi) self.data.set_mocap_quat('mocap', quat) #w v 순인듯 # self.do_simulation([-1,1], self.frame_skip) self.do_simulation([0,0], self.frame_skip) rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector') self.init_fingerCOM = (rightFinger + leftFinger)/2 self.pickCompleted = False def get_site_pos(self, siteName): _id = self.model.site_names.index(siteName) return self.data.site_xpos[_id].copy() def compute_rewards(self, actions, obsBatch): #Required by HER-TD3 assert isinstance(obsBatch, dict) == True obsList = obsBatch['state_observation'] rewards = [self.compute_reward(action, obs)[0] for action, obs in zip(actions, obsList)] return np.array(rewards) def compute_reward(self, actions, obs, mode = 'general'): if isinstance(obs, dict): obs = obs['state_observation'] objPos = obs[3:6] rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector') fingerCOM = (rightFinger + leftFinger)/2 heightTarget = self.heightTarget placingGoal = self._state_goal reachDist = np.linalg.norm(objPos - fingerCOM) placingDist = np.linalg.norm(objPos - placingGoal) def reachReward(): reachRew = -reachDist# + min(actions[-1], -1)/50 reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1]) zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1]) if reachDistxy < 0.05: #0.02 reachRew = -reachDist else: reachRew = -reachDistxy - 2*zRew #incentive to close fingers when reachDist is small if reachDist < 0.05: reachRew = -reachDist + max(actions[-1],0)/50 return reachRew , reachDist def pickCompletionCriteria(): tolerance = 0.01 if objPos[2] >= (heightTarget- tolerance): return True else: return False if pickCompletionCriteria(): self.pickCompleted = True def objDropped(): return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02) # Object on the ground, far away from the goal, and from the gripper #Can tweak the margin limits def objGrasped(thresh = 0): sensorData = self.data.sensordata return (sensorData[0]>thresh) and (sensorData[1]> thresh) def orig_pickReward(): # hScale = 50 hScale = 100 if self.pickCompleted and not(objDropped()): return hScale*heightTarget # elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) : elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) : return hScale* min(heightTarget, objPos[2]) else: return 0 def general_pickReward(): hScale = 50 if self.pickCompleted and objGrasped(): return hScale*heightTarget elif objGrasped() and (objPos[2]> (self.objHeight + 0.005)): return hScale* min(heightTarget, objPos[2]) else: return 0 def placeReward(): # c1 = 1000 ; c2 = 0.03 ; c3 = 0.003 c1 = 1000 ; c2 = 0.01 ; c3 = 0.001 if mode == 'general': cond = self.pickCompleted and objGrasped() else: cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped()) if cond: placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3)) placeRew = max(placeRew,0) return [placeRew , placingDist] else: return [0 , placingDist] reachRew, reachDist = reachReward() if mode == 'general': pickRew = general_pickReward() else: pickRew = orig_pickReward() placeRew , placingDist = placeReward() assert ((placeRew >=0) and (pickRew>=0)) reward = reachRew + pickRew + placeRew return [reward, reachRew, reachDist, pickRew, placeRew, placingDist] def get_diagnostics(self, paths, prefix=''): statistics = OrderedDict() return statistics def log_diagnostics(self, paths = None, logger = None): pass
py
b4147078af5101a67dbf63a2671f781084bdcec0
def square_sum(numbers): return sum(a ** 2 for a in numbers)
py
b414713ba4a7c1eee29831ff24909d77b6401ca6
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/core/node/common/action/get_set_property.proto """Generated protocol buffer code.""" # third party from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() # syft absolute from syft.proto.core.common import ( common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2, ) from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2 from syft.proto.core.pointer import ( pointer_pb2 as proto_dot_core_dot_pointer_dot_pointer__pb2, ) DESCRIPTOR = _descriptor.FileDescriptor( name="proto/core/node/common/action/get_set_property.proto", package="syft.core.node.common.action", syntax="proto3", serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n4proto/core/node/common/action/get_set_property.proto\x12\x1csyft.core.node.common.action\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto\x1a proto/core/pointer/pointer.proto"\xe7\x03\n\x16GetOrSetPropertyAction\x12\x0c\n\x04path\x18\x01 \x01(\t\x12)\n\x05_self\x18\x02 \x01(\x0b\x32\x1a.syft.core.pointer.Pointer\x12(\n\x04\x61rgs\x18\x03 \x03(\x0b\x32\x1a.syft.core.pointer.Pointer\x12P\n\x06kwargs\x18\x04 \x03(\x0b\x32@.syft.core.node.common.action.GetOrSetPropertyAction.KwargsEntry\x12-\n\x0eid_at_location\x18\x05 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x06 \x01(\x0b\x32\x15.syft.core.io.Address\x12+\n\x07set_arg\x18\x07 \x01(\x0b\x32\x1a.syft.core.pointer.Pointer\x12%\n\x06msg_id\x18\x08 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x0e\n\x06\x61\x63tion\x18\t \x01(\x05\x12\x12\n\nmap_to_dyn\x18\n \x01(\x08\x1aI\n\x0bKwargsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.syft.core.pointer.Pointer:\x02\x38\x01\x62\x06proto3', dependencies=[ proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR, proto_dot_core_dot_io_dot_address__pb2.DESCRIPTOR, proto_dot_core_dot_pointer_dot_pointer__pb2.DESCRIPTOR, ], ) _GETORSETPROPERTYACTION_KWARGSENTRY = _descriptor.Descriptor( name="KwargsEntry", full_name="syft.core.node.common.action.GetOrSetPropertyAction.KwargsEntry", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", full_name="syft.core.node.common.action.GetOrSetPropertyAction.KwargsEntry.key", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", full_name="syft.core.node.common.action.GetOrSetPropertyAction.KwargsEntry.value", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=603, serialized_end=676, ) _GETORSETPROPERTYACTION = _descriptor.Descriptor( name="GetOrSetPropertyAction", full_name="syft.core.node.common.action.GetOrSetPropertyAction", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="path", full_name="syft.core.node.common.action.GetOrSetPropertyAction.path", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="_self", full_name="syft.core.node.common.action.GetOrSetPropertyAction._self", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="args", full_name="syft.core.node.common.action.GetOrSetPropertyAction.args", index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="kwargs", full_name="syft.core.node.common.action.GetOrSetPropertyAction.kwargs", index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="id_at_location", full_name="syft.core.node.common.action.GetOrSetPropertyAction.id_at_location", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="address", full_name="syft.core.node.common.action.GetOrSetPropertyAction.address", index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="set_arg", full_name="syft.core.node.common.action.GetOrSetPropertyAction.set_arg", index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="msg_id", full_name="syft.core.node.common.action.GetOrSetPropertyAction.msg_id", index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="action", full_name="syft.core.node.common.action.GetOrSetPropertyAction.action", index=8, number=9, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="map_to_dyn", full_name="syft.core.node.common.action.GetOrSetPropertyAction.map_to_dyn", index=9, number=10, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _GETORSETPROPERTYACTION_KWARGSENTRY, ], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=189, serialized_end=676, ) _GETORSETPROPERTYACTION_KWARGSENTRY.fields_by_name[ "value" ].message_type = proto_dot_core_dot_pointer_dot_pointer__pb2._POINTER _GETORSETPROPERTYACTION_KWARGSENTRY.containing_type = _GETORSETPROPERTYACTION _GETORSETPROPERTYACTION.fields_by_name[ "_self" ].message_type = proto_dot_core_dot_pointer_dot_pointer__pb2._POINTER _GETORSETPROPERTYACTION.fields_by_name[ "args" ].message_type = proto_dot_core_dot_pointer_dot_pointer__pb2._POINTER _GETORSETPROPERTYACTION.fields_by_name[ "kwargs" ].message_type = _GETORSETPROPERTYACTION_KWARGSENTRY _GETORSETPROPERTYACTION.fields_by_name[ "id_at_location" ].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID _GETORSETPROPERTYACTION.fields_by_name[ "address" ].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS _GETORSETPROPERTYACTION.fields_by_name[ "set_arg" ].message_type = proto_dot_core_dot_pointer_dot_pointer__pb2._POINTER _GETORSETPROPERTYACTION.fields_by_name[ "msg_id" ].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID DESCRIPTOR.message_types_by_name["GetOrSetPropertyAction"] = _GETORSETPROPERTYACTION _sym_db.RegisterFileDescriptor(DESCRIPTOR) GetOrSetPropertyAction = _reflection.GeneratedProtocolMessageType( "GetOrSetPropertyAction", (_message.Message,), { "KwargsEntry": _reflection.GeneratedProtocolMessageType( "KwargsEntry", (_message.Message,), { "DESCRIPTOR": _GETORSETPROPERTYACTION_KWARGSENTRY, "__module__": "proto.core.node.common.action.get_set_property_pb2" # @@protoc_insertion_point(class_scope:syft.core.node.common.action.GetOrSetPropertyAction.KwargsEntry) }, ), "DESCRIPTOR": _GETORSETPROPERTYACTION, "__module__": "proto.core.node.common.action.get_set_property_pb2" # @@protoc_insertion_point(class_scope:syft.core.node.common.action.GetOrSetPropertyAction) }, ) _sym_db.RegisterMessage(GetOrSetPropertyAction) _sym_db.RegisterMessage(GetOrSetPropertyAction.KwargsEntry) _GETORSETPROPERTYACTION_KWARGSENTRY._options = None # @@protoc_insertion_point(module_scope)
py
b414714914c69c774db6c77b520c0750c788aa25
# encoding: utf-8 """Unit-test suite for pptx.chart.axis module.""" import pytest from pptx.chart.axis import ( AxisTitle, _BaseAxis, CategoryAxis, DateAxis, Gridlines, TickLabels, ValueAxis, ) from pptx.dml.chtfmt import ChartFormat from pptx.enum.chart import ( XL_AXIS_CROSSES, XL_CATEGORY_TYPE, XL_TICK_LABEL_POSITION as XL_TICK_LBL_POS, XL_TICK_MARK, ) from pptx.text.text import Font from ..unitutil.cxml import element, xml from ..unitutil.mock import class_mock, instance_mock class Describe_BaseAxis(object): """Unit-test suite for `pptx.chart.axis._BaseAxis` objects.""" def it_provides_access_to_its_title(self, title_fixture): axis, AxisTitle_, axis_title_ = title_fixture axis_title = axis.axis_title AxisTitle_.assert_called_once_with(axis._element.title) assert axis_title is axis_title_ def it_provides_access_to_its_format(self, format_fixture): axis, ChartFormat_, format_ = format_fixture format = axis.format ChartFormat_.assert_called_once_with(axis._xAx) assert format is format_ def it_knows_whether_it_has_major_gridlines(self, major_gridlines_get_fixture): base_axis, expected_value = major_gridlines_get_fixture assert base_axis.has_major_gridlines is expected_value def it_can_change_whether_it_has_major_gridlines(self, major_gridlines_set_fixture): base_axis, new_value, expected_xml = major_gridlines_set_fixture base_axis.has_major_gridlines = new_value assert base_axis._element.xml == expected_xml def it_knows_whether_it_has_minor_gridlines(self, minor_gridlines_get_fixture): base_axis, expected_value = minor_gridlines_get_fixture assert base_axis.has_minor_gridlines is expected_value def it_can_change_whether_it_has_minor_gridlines(self, minor_gridlines_set_fixture): base_axis, new_value, expected_xml = minor_gridlines_set_fixture base_axis.has_minor_gridlines = new_value assert base_axis._element.xml == expected_xml def it_knows_whether_it_has_a_title(self, has_title_get_fixture): axis, expected_value = has_title_get_fixture assert axis.has_title is expected_value def it_can_change_whether_it_has_a_title(self, has_title_set_fixture): axis, new_value, expected_xml = has_title_set_fixture axis.has_title = new_value assert axis._element.xml == expected_xml def it_provides_access_to_its_major_gridlines(self, maj_grdlns_fixture): axis, MajorGridlines_, xAx, major_gridlines_ = maj_grdlns_fixture major_gridlines = axis.major_gridlines MajorGridlines_.assert_called_once_with(xAx) assert major_gridlines is major_gridlines_ def it_knows_its_major_tick_setting(self, major_tick_get_fixture): axis, expected_value = major_tick_get_fixture assert axis.major_tick_mark == expected_value def it_can_change_its_major_tick_mark(self, major_tick_set_fixture): axis, new_value, expected_xml = major_tick_set_fixture axis.major_tick_mark = new_value assert axis._element.xml == expected_xml def it_knows_its_maximum_scale(self, maximum_scale_get_fixture): axis, expected_value = maximum_scale_get_fixture assert axis.maximum_scale == expected_value def it_can_change_its_maximum_scale(self, maximum_scale_set_fixture): axis, new_value, expected_xml = maximum_scale_set_fixture axis.maximum_scale = new_value assert axis._element.xml == expected_xml def it_knows_its_minimum_scale(self, minimum_scale_get_fixture): axis, expected_value = minimum_scale_get_fixture assert axis.minimum_scale == expected_value def it_can_change_its_minimum_scale(self, minimum_scale_set_fixture): axis, new_value, expected_xml = minimum_scale_set_fixture axis.minimum_scale = new_value assert axis._element.xml == expected_xml def it_knows_its_minor_tick_setting(self, minor_tick_get_fixture): axis, expected_value = minor_tick_get_fixture assert axis.minor_tick_mark == expected_value def it_can_change_its_minor_tick_mark(self, minor_tick_set_fixture): axis, new_value, expected_xml = minor_tick_set_fixture axis.minor_tick_mark = new_value assert axis._element.xml == expected_xml def it_knows_whether_it_renders_in_reverse_order(self, reverse_order_get_fixture): xAx, expected_value = reverse_order_get_fixture assert _BaseAxis(xAx).reverse_order == expected_value def it_can_change_whether_it_renders_in_reverse_order( self, reverse_order_set_fixture ): xAx, new_value, expected_xml = reverse_order_set_fixture axis = _BaseAxis(xAx) axis.reverse_order = new_value assert axis._element.xml == expected_xml def it_knows_its_tick_label_position(self, tick_lbl_pos_get_fixture): axis, expected_value = tick_lbl_pos_get_fixture assert axis.tick_label_position == expected_value def it_can_change_its_tick_label_position(self, tick_lbl_pos_set_fixture): axis, new_value, expected_xml = tick_lbl_pos_set_fixture axis.tick_label_position = new_value assert axis._element.xml == expected_xml def it_provides_access_to_its_title(self, title_fixture): axis, AxisTitle_, axis_title_ = title_fixture axis_title = axis.axis_title AxisTitle_.assert_called_once_with(axis._element.title) assert axis_title is axis_title_ def it_provides_access_to_its_format(self, format_fixture): axis, ChartFormat_, format_ = format_fixture format = axis.format ChartFormat_.assert_called_once_with(axis._xAx) assert format is format_ def it_provides_access_to_its_major_gridlines(self, maj_grdlns_fixture): axis, MajorGridlines_, xAx, major_gridlines_ = maj_grdlns_fixture major_gridlines = axis.major_gridlines MajorGridlines_.assert_called_once_with(xAx, 'major') assert major_gridlines is major_gridlines_ def it_provides_access_to_its_minor_gridlines(self, min_grdlns_fixture): axis, MinorGridlines_, xAx, minor_gridlines_ = min_grdlns_fixture minor_gridlines = axis.minor_gridlines MinorGridlines_.assert_called_once_with(xAx, 'minor') assert minor_gridlines is minor_gridlines_ def it_provides_access_to_the_tick_labels(self, tick_labels_fixture): axis, tick_labels_, TickLabels_, xAx = tick_labels_fixture tick_labels = axis.tick_labels TickLabels_.assert_called_once_with(xAx) assert tick_labels is tick_labels_ def it_knows_whether_it_is_visible(self, visible_get_fixture): axis, expected_bool_value = visible_get_fixture assert axis.visible is expected_bool_value def it_can_change_whether_it_is_visible(self, visible_set_fixture): axis, new_value, expected_xml = visible_set_fixture axis.visible = new_value assert axis._element.xml == expected_xml def but_it_raises_on_assign_non_bool_to_visible(self): axis = _BaseAxis(None) with pytest.raises(ValueError): axis.visible = "foobar" # fixtures ------------------------------------------------------- @pytest.fixture(params=["c:catAx", "c:dateAx", "c:valAx"]) def format_fixture(self, request, ChartFormat_, format_): xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, ChartFormat_, format_ @pytest.fixture( params=[ ("c:catAx", False), ("c:catAx/c:title", True), ("c:dateAx", False), ("c:dateAx/c:title", True), ("c:valAx", False), ("c:valAx/c:title", True), ] ) def has_title_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx", True, "c:catAx/c:title/(c:layout,c:overlay{val=0})"), ("c:catAx/c:title", True, "c:catAx/c:title"), ("c:catAx/c:title", False, "c:catAx"), ("c:catAx", False, "c:catAx"), ("c:dateAx", True, "c:dateAx/c:title/(c:layout,c:overlay{val=0})"), ("c:dateAx/c:title", True, "c:dateAx/c:title"), ("c:dateAx/c:title", False, "c:dateAx"), ("c:dateAx", False, "c:dateAx"), ("c:valAx", True, "c:valAx/c:title/(c:layout,c:overlay{val=0})"), ("c:valAx/c:title", True, "c:valAx/c:title"), ("c:valAx/c:title", False, "c:valAx"), ("c:valAx", False, "c:valAx"), ] ) def has_title_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture(params=["c:catAx", "c:dateAx", "c:valAx"]) def maj_grdlns_fixture(self, request, MajorGridlines_, major_gridlines_): xAx_cxml = request.param xAx = element(xAx_cxml) axis = _BaseAxis(xAx) return axis, MajorGridlines_, xAx, major_gridlines_ @pytest.fixture(params=["c:catAx", "c:dateAx", "c:valAx"]) def min_grdlns_fixture(self, request, MinorGridlines_, minor_gridlines_): xAx_cxml = request.param xAx = element(xAx_cxml) axis = _BaseAxis(xAx) return axis, MinorGridlines_, xAx, minor_gridlines_ @pytest.fixture( params=[ ("c:catAx", False), ("c:catAx/c:majorGridlines", True), ("c:dateAx", False), ("c:dateAx/c:majorGridlines", True), ("c:valAx", False), ("c:valAx/c:majorGridlines", True), ] ) def major_gridlines_get_fixture(self, request): xAx_cxml, expected_value = request.param base_axis = _BaseAxis(element(xAx_cxml)) return base_axis, expected_value @pytest.fixture( params=[ ("c:catAx", True, "c:catAx/c:majorGridlines"), ("c:catAx/c:majorGridlines", True, "c:catAx/c:majorGridlines"), ("c:catAx/c:majorGridlines", False, "c:catAx"), ("c:catAx", False, "c:catAx"), ("c:dateAx", True, "c:dateAx/c:majorGridlines"), ("c:dateAx/c:majorGridlines", True, "c:dateAx/c:majorGridlines"), ("c:dateAx/c:majorGridlines", False, "c:dateAx"), ("c:dateAx", False, "c:dateAx"), ("c:valAx", True, "c:valAx/c:majorGridlines"), ("c:valAx/c:majorGridlines", True, "c:valAx/c:majorGridlines"), ("c:valAx/c:majorGridlines", False, "c:valAx"), ("c:valAx", False, "c:valAx"), ] ) def major_gridlines_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param base_axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return base_axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx", XL_TICK_MARK.CROSS), ("c:catAx/c:majorTickMark", XL_TICK_MARK.CROSS), ("c:catAx/c:majorTickMark{val=out}", XL_TICK_MARK.OUTSIDE), ("c:dateAx", XL_TICK_MARK.CROSS), ("c:dateAx/c:majorTickMark", XL_TICK_MARK.CROSS), ("c:dateAx/c:majorTickMark{val=out}", XL_TICK_MARK.OUTSIDE), ("c:valAx", XL_TICK_MARK.CROSS), ("c:valAx/c:majorTickMark", XL_TICK_MARK.CROSS), ("c:valAx/c:majorTickMark{val=in}", XL_TICK_MARK.INSIDE), ] ) def major_tick_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx", XL_TICK_MARK.INSIDE, "c:catAx/c:majorTickMark{val=in}"), ( "c:catAx/c:majorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:catAx/c:majorTickMark{val=out}", ), ("c:catAx/c:majorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:catAx"), ("c:catAx", XL_TICK_MARK.CROSS, "c:catAx"), ("c:catAx/c:majorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:catAx"), ("c:dateAx", XL_TICK_MARK.INSIDE, "c:dateAx/c:majorTickMark{val=in}"), ( "c:dateAx/c:majorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:dateAx/c:majorTickMark{val=out}", ), ("c:dateAx/c:majorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:dateAx", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:dateAx/c:majorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:valAx", XL_TICK_MARK.INSIDE, "c:valAx/c:majorTickMark{val=in}"), ( "c:valAx/c:majorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:valAx/c:majorTickMark{val=out}", ), ("c:valAx/c:majorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:valAx"), ("c:valAx", XL_TICK_MARK.CROSS, "c:valAx"), ("c:valAx/c:majorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:valAx"), ] ) def major_tick_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx/c:scaling", None), ("c:catAx/c:scaling/c:max{val=12.34}", 12.34), ("c:dateAx/c:scaling", None), ("c:dateAx/c:scaling/c:max{val=42.24}", 42.24), ("c:valAx/c:scaling", None), ("c:valAx/c:scaling/c:max{val=23.45}", 23.45), ] ) def maximum_scale_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx/c:scaling", 34.56, "c:catAx/c:scaling/c:max{val=34.56}"), ( "c:catAx/c:scaling/c:max{val=34.56}", 42.42, "c:catAx/c:scaling/c:max{val=42.42}", ), ("c:catAx/c:scaling/c:max{val=42.42}", None, "c:catAx/c:scaling"), ("c:catAx/c:scaling", None, "c:catAx/c:scaling"), ("c:dateAx/c:scaling", 45.67, "c:dateAx/c:scaling/c:max{val=45.67}"), ( "c:dateAx/c:scaling/c:max{val=45.67}", 42.42, "c:dateAx/c:scaling/c:max{val=42.42}", ), ("c:dateAx/c:scaling/c:max{val=42.42}", None, "c:dateAx/c:scaling"), ("c:dateAx/c:scaling", None, "c:dateAx/c:scaling"), ("c:valAx/c:scaling", 56.78, "c:valAx/c:scaling/c:max{val=56.78}"), ( "c:valAx/c:scaling/c:max{val=56.78}", 42.42, "c:valAx/c:scaling/c:max{val=42.42}", ), ("c:valAx/c:scaling/c:max{val=42.42}", None, "c:valAx/c:scaling"), ("c:valAx/c:scaling", None, "c:valAx/c:scaling"), ] ) def maximum_scale_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx/c:scaling", None), ("c:catAx/c:scaling/c:min{val=12.34}", 12.34), ("c:dateAx/c:scaling", None), ("c:dateAx/c:scaling/c:min{val=42.24}", 42.24), ("c:valAx/c:scaling", None), ("c:valAx/c:scaling/c:min{val=23.45}", 23.45), ] ) def minimum_scale_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx/c:scaling", 34.56, "c:catAx/c:scaling/c:min{val=34.56}"), ( "c:catAx/c:scaling/c:min{val=34.56}", 42.42, "c:catAx/c:scaling/c:min{val=42.42}", ), ("c:catAx/c:scaling/c:min{val=42.42}", None, "c:catAx/c:scaling"), ("c:catAx/c:scaling", None, "c:catAx/c:scaling"), ("c:dateAx/c:scaling", 45.67, "c:dateAx/c:scaling/c:min{val=45.67}"), ( "c:dateAx/c:scaling/c:min{val=45.67}", 42.42, "c:dateAx/c:scaling/c:min{val=42.42}", ), ("c:dateAx/c:scaling/c:min{val=42.42}", None, "c:dateAx/c:scaling"), ("c:dateAx/c:scaling", None, "c:dateAx/c:scaling"), ("c:valAx/c:scaling", 56.78, "c:valAx/c:scaling/c:min{val=56.78}"), ( "c:valAx/c:scaling/c:min{val=56.78}", 42.42, "c:valAx/c:scaling/c:min{val=42.42}", ), ("c:valAx/c:scaling/c:min{val=42.42}", None, "c:valAx/c:scaling"), ("c:valAx/c:scaling", None, "c:valAx/c:scaling"), ] ) def minimum_scale_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx", False), ("c:catAx/c:minorGridlines", True), ("c:dateAx", False), ("c:dateAx/c:minorGridlines", True), ("c:valAx", False), ("c:valAx/c:minorGridlines", True), ] ) def minor_gridlines_get_fixture(self, request): xAx_cxml, expected_value = request.param base_axis = _BaseAxis(element(xAx_cxml)) return base_axis, expected_value @pytest.fixture( params=[ ("c:catAx", True, "c:catAx/c:minorGridlines"), ("c:catAx/c:minorGridlines", True, "c:catAx/c:minorGridlines"), ("c:catAx/c:minorGridlines", False, "c:catAx"), ("c:catAx", False, "c:catAx"), ("c:dateAx", True, "c:dateAx/c:minorGridlines"), ("c:dateAx/c:minorGridlines", True, "c:dateAx/c:minorGridlines"), ("c:dateAx/c:minorGridlines", False, "c:dateAx"), ("c:dateAx", False, "c:dateAx"), ("c:valAx", True, "c:valAx/c:minorGridlines"), ("c:valAx/c:minorGridlines", True, "c:valAx/c:minorGridlines"), ("c:valAx/c:minorGridlines", False, "c:valAx"), ("c:valAx", False, "c:valAx"), ] ) def minor_gridlines_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param base_axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return base_axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx", XL_TICK_MARK.CROSS), ("c:catAx/c:minorTickMark", XL_TICK_MARK.CROSS), ("c:catAx/c:minorTickMark{val=out}", XL_TICK_MARK.OUTSIDE), ("c:dateAx", XL_TICK_MARK.CROSS), ("c:dateAx/c:minorTickMark", XL_TICK_MARK.CROSS), ("c:dateAx/c:minorTickMark{val=out}", XL_TICK_MARK.OUTSIDE), ("c:valAx", XL_TICK_MARK.CROSS), ("c:valAx/c:minorTickMark", XL_TICK_MARK.CROSS), ("c:valAx/c:minorTickMark{val=in}", XL_TICK_MARK.INSIDE), ] ) def minor_tick_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx", XL_TICK_MARK.INSIDE, "c:catAx/c:minorTickMark{val=in}"), ( "c:catAx/c:minorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:catAx/c:minorTickMark{val=out}", ), ("c:catAx/c:minorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:catAx"), ("c:catAx", XL_TICK_MARK.CROSS, "c:catAx"), ("c:catAx/c:minorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:catAx"), ("c:dateAx", XL_TICK_MARK.INSIDE, "c:dateAx/c:minorTickMark{val=in}"), ( "c:dateAx/c:minorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:dateAx/c:minorTickMark{val=out}", ), ("c:dateAx/c:minorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:dateAx", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:dateAx/c:minorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:dateAx"), ("c:valAx", XL_TICK_MARK.INSIDE, "c:valAx/c:minorTickMark{val=in}"), ( "c:valAx/c:minorTickMark{val=in}", XL_TICK_MARK.OUTSIDE, "c:valAx/c:minorTickMark{val=out}", ), ("c:valAx/c:minorTickMark{val=out}", XL_TICK_MARK.CROSS, "c:valAx"), ("c:valAx", XL_TICK_MARK.CROSS, "c:valAx"), ("c:valAx/c:minorTickMark{val=cross}", XL_TICK_MARK.CROSS, "c:valAx"), ] ) def minor_tick_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx/c:scaling", False), ("c:valAx/c:scaling/c:orientation", False), ("c:catAx/c:scaling/c:orientation{val=minMax}", False), ("c:valAx/c:scaling/c:orientation{val=maxMin}", True), ] ) def reverse_order_get_fixture(self, request): xAx_cxml, expected_value = request.param return element(xAx_cxml), expected_value @pytest.fixture( params=[ ("c:catAx/c:scaling", False, "c:catAx/c:scaling"), ("c:catAx/c:scaling", True, "c:catAx/c:scaling/c:orientation{val=maxMin}"), ("c:valAx/c:scaling/c:orientation", False, "c:valAx/c:scaling"), ( "c:valAx/c:scaling/c:orientation", True, "c:valAx/c:scaling/c:orientation{val=maxMin}", ), ( "c:dateAx/c:scaling/c:orientation{val=minMax}", False, "c:dateAx/c:scaling", ), ( "c:dateAx/c:scaling/c:orientation{val=minMax}", True, "c:dateAx/c:scaling/c:orientation{val=maxMin}", ), ( "c:catAx/c:scaling/c:orientation{val=maxMin}", False, "c:catAx/c:scaling", ), ( "c:catAx/c:scaling/c:orientation{val=maxMin}", True, "c:catAx/c:scaling/c:orientation{val=maxMin}", ), ] ) def reverse_order_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param xAx, expected_xml = element(xAx_cxml), xml(expected_xAx_cxml) return xAx, new_value, expected_xml @pytest.fixture(params=["c:catAx", "c:dateAx", "c:valAx"]) def tick_labels_fixture(self, request, TickLabels_, tick_labels_): xAx_cxml = request.param xAx = element(xAx_cxml) axis = _BaseAxis(xAx) return axis, tick_labels_, TickLabels_, xAx @pytest.fixture( params=[ ("c:catAx", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:catAx/c:tickLblPos", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:catAx/c:tickLblPos{val=high}", XL_TICK_LBL_POS.HIGH), ("c:dateAx", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:dateAx/c:tickLblPos", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:dateAx/c:tickLblPos{val=low}", XL_TICK_LBL_POS.LOW), ("c:valAx", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:valAx/c:tickLblPos", XL_TICK_LBL_POS.NEXT_TO_AXIS), ("c:valAx/c:tickLblPos{val=none}", XL_TICK_LBL_POS.NONE), ] ) def tick_lbl_pos_get_fixture(self, request): xAx_cxml, expected_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_value @pytest.fixture( params=[ ("c:catAx", XL_TICK_LBL_POS.HIGH, "c:catAx/c:tickLblPos{val=high}"), ( "c:catAx/c:tickLblPos{val=high}", XL_TICK_LBL_POS.LOW, "c:catAx/c:tickLblPos{val=low}", ), ("c:catAx/c:tickLblPos{val=low}", None, "c:catAx/c:tickLblPos"), ("c:catAx", None, "c:catAx/c:tickLblPos"), ( "c:dateAx", XL_TICK_LBL_POS.NEXT_TO_AXIS, "c:dateAx/c:tickLblPos{val=nextTo}", ), ( "c:dateAx/c:tickLblPos{val=nextTo}", XL_TICK_LBL_POS.NONE, "c:dateAx/c:tickLblPos{val=none}", ), ("c:dateAx/c:tickLblPos{val=none}", None, "c:dateAx/c:tickLblPos"), ("c:valAx", XL_TICK_LBL_POS.HIGH, "c:valAx/c:tickLblPos{val=high}"), ( "c:valAx/c:tickLblPos{val=high}", XL_TICK_LBL_POS.LOW, "c:valAx/c:tickLblPos{val=low}", ), ("c:valAx/c:tickLblPos{val=low}", None, "c:valAx/c:tickLblPos"), ] ) def tick_lbl_pos_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml @pytest.fixture(params=["c:catAx", "c:dateAx", "c:valAx"]) def title_fixture(self, request, AxisTitle_, axis_title_): xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, AxisTitle_, axis_title_ @pytest.fixture( params=[ ("c:catAx", False), ("c:catAx/c:delete", False), ("c:catAx/c:delete{val=0}", True), ("c:catAx/c:delete{val=1}", False), ("c:catAx/c:delete{val=false}", True), ("c:dateAx", False), ("c:dateAx/c:delete", False), ("c:dateAx/c:delete{val=0}", True), ("c:dateAx/c:delete{val=1}", False), ("c:dateAx/c:delete{val=false}", True), ("c:valAx", False), ("c:valAx/c:delete", False), ("c:valAx/c:delete{val=0}", True), ("c:valAx/c:delete{val=1}", False), ("c:valAx/c:delete{val=false}", True), ] ) def visible_get_fixture(self, request): xAx_cxml, expected_bool_value = request.param axis = _BaseAxis(element(xAx_cxml)) return axis, expected_bool_value @pytest.fixture( params=[ ("c:catAx", False, "c:catAx/c:delete"), ("c:catAx/c:delete", True, "c:catAx/c:delete{val=0}"), ("c:catAx/c:delete{val=1}", True, "c:catAx/c:delete{val=0}"), ("c:catAx/c:delete{val=0}", False, "c:catAx/c:delete"), ("c:catAx", True, "c:catAx/c:delete{val=0}"), ("c:dateAx", False, "c:dateAx/c:delete"), ("c:dateAx/c:delete", True, "c:dateAx/c:delete{val=0}"), ("c:dateAx/c:delete{val=0}", False, "c:dateAx/c:delete"), ("c:dateAx", True, "c:dateAx/c:delete{val=0}"), ("c:valAx/c:delete", True, "c:valAx/c:delete{val=0}"), ("c:valAx/c:delete{val=1}", False, "c:valAx/c:delete"), ("c:valAx/c:delete{val=0}", True, "c:valAx/c:delete{val=0}"), ] ) def visible_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param axis = _BaseAxis(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return axis, new_value, expected_xml # fixture components --------------------------------------------- @pytest.fixture def AxisTitle_(self, request, axis_title_): return class_mock( request, "pptx.chart.axis.AxisTitle", return_value=axis_title_ ) @pytest.fixture def axis_title_(self, request): return instance_mock(request, AxisTitle) @pytest.fixture def ChartFormat_(self, request, format_): return class_mock(request, "pptx.chart.axis.ChartFormat", return_value=format_) @pytest.fixture def format_(self, request): return instance_mock(request, ChartFormat) @pytest.fixture def MajorGridlines_(self, request, major_gridlines_): return class_mock( request, "pptx.chart.axis.Gridlines", return_value=major_gridlines_ ) @pytest.fixture def major_gridlines_(self, request): return instance_mock(request, Gridlines) @pytest.fixture def MinorGridlines_(self, request, minor_gridlines_): return class_mock( request, "pptx.chart.axis.Gridlines", return_value=minor_gridlines_ ) @pytest.fixture def minor_gridlines_(self, request): return instance_mock(request, Gridlines) @pytest.fixture def TickLabels_(self, request, tick_labels_): return class_mock( request, "pptx.chart.axis.TickLabels", return_value=tick_labels_ ) @pytest.fixture def tick_labels_(self, request): return instance_mock(request, TickLabels) class DescribeAxisTitle(object): def it_knows_whether_it_has_a_text_frame(self, has_tf_get_fixture): axis_title, expected_value = has_tf_get_fixture value = axis_title.has_text_frame assert value is expected_value def it_can_change_whether_it_has_a_text_frame(self, has_tf_set_fixture): axis_title, value, expected_xml = has_tf_set_fixture axis_title.has_text_frame = value assert axis_title._element.xml == expected_xml def it_provides_access_to_its_format(self, format_fixture): axis_title, ChartFormat_, format_ = format_fixture format = axis_title.format ChartFormat_.assert_called_once_with(axis_title._element) assert format is format_ def it_provides_access_to_its_text_frame(self, text_frame_fixture): axis_title, TextFrame_, text_frame_ = text_frame_fixture text_frame = axis_title.text_frame TextFrame_.assert_called_once_with(axis_title._element.tx.rich, axis_title) assert text_frame is text_frame_ # fixtures ------------------------------------------------------- @pytest.fixture def format_fixture(self, request, ChartFormat_, format_): axis_title = AxisTitle(element("c:title")) return axis_title, ChartFormat_, format_ @pytest.fixture( params=[ ("c:title", False), ("c:title/c:tx", False), ("c:title/c:tx/c:strRef", False), ("c:title/c:tx/c:rich", True), ] ) def has_tf_get_fixture(self, request): title_cxml, expected_value = request.param axis_title = AxisTitle(element(title_cxml)) return axis_title, expected_value @pytest.fixture( params=[ ( "c:title{a:b=c}", True, "c:title{a:b=c}/c:tx/c:rich/(a:bodyPr,a:lstStyle,a:p/a:pPr/a:defRPr" ")", ), ( "c:title{a:b=c}/c:tx", True, "c:title{a:b=c}/c:tx/c:rich/(a:bodyPr,a:lstStyle,a:p/a:pPr/a:defRPr" ")", ), ( "c:title{a:b=c}/c:tx/c:strRef", True, "c:title{a:b=c}/c:tx/c:rich/(a:bodyPr,a:lstStyle,a:p/a:pPr/a:defRPr" ")", ), ("c:title/c:tx/c:rich", True, "c:title/c:tx/c:rich"), ("c:title", False, "c:title"), ("c:title/c:tx", False, "c:title"), ("c:title/c:tx/c:rich", False, "c:title"), ("c:title/c:tx/c:strRef", False, "c:title"), ] ) def has_tf_set_fixture(self, request): title_cxml, value, expected_cxml = request.param axis_title = AxisTitle(element(title_cxml)) expected_xml = xml(expected_cxml) return axis_title, value, expected_xml @pytest.fixture def text_frame_fixture(self, request, TextFrame_): axis_title = AxisTitle(element("c:title")) text_frame_ = TextFrame_.return_value return axis_title, TextFrame_, text_frame_ # fixture components --------------------------------------------- @pytest.fixture def ChartFormat_(self, request, format_): return class_mock(request, "pptx.chart.axis.ChartFormat", return_value=format_) @pytest.fixture def format_(self, request): return instance_mock(request, ChartFormat) @pytest.fixture def TextFrame_(self, request): return class_mock(request, "pptx.chart.axis.TextFrame") class DescribeCategoryAxis(object): def it_knows_its_category_type(self, cat_type_get_fixture): category_axis, expected_value = cat_type_get_fixture assert category_axis.category_type is expected_value # fixtures ------------------------------------------------------- @pytest.fixture def cat_type_get_fixture(self): category_axis = CategoryAxis(None) expected_value = XL_CATEGORY_TYPE.CATEGORY_SCALE return category_axis, expected_value class DescribeDateAxis(object): def it_knows_its_category_type(self, cat_type_get_fixture): date_axis, expected_value = cat_type_get_fixture assert date_axis.category_type is expected_value # fixtures ------------------------------------------------------- @pytest.fixture def cat_type_get_fixture(self): date_axis = DateAxis(None) expected_value = XL_CATEGORY_TYPE.TIME_SCALE return date_axis, expected_value class DescribeGridlines(object): def it_provides_access_to_its_format(self, format_fixture): gridlines, expected_xml, ChartFormat_, format_ = format_fixture format = gridlines.format assert gridlines._xAx.xml == expected_xml ChartFormat_.assert_called_once_with( gridlines._xAx.xpath("c:majorGridlines")[0] ) assert format is format_ # fixtures ------------------------------------------------------- @pytest.fixture( params=[ ("c:valAx", "c:valAx/c:majorGridlines"), ("c:catAx/c:majorGridlines", "c:catAx/c:majorGridlines"), ] ) def format_fixture(self, request, ChartFormat_, format_): xAx_cxml, expected_cxml = request.param gridlines = Gridlines(element(xAx_cxml), 'major') expected_xml = xml(expected_cxml) return gridlines, expected_xml, ChartFormat_, format_ # fixture components --------------------------------------------- @pytest.fixture def ChartFormat_(self, request, format_): return class_mock(request, "pptx.chart.axis.ChartFormat", return_value=format_) @pytest.fixture def format_(self, request): return instance_mock(request, ChartFormat) class DescribeTickLabels(object): def it_provides_access_to_its_font(self, font_fixture): tick_labels, Font_, defRPr, font_ = font_fixture font = tick_labels.font Font_.assert_called_once_with(defRPr) assert font is font_ def it_adds_a_txPr_to_help_font(self, txPr_fixture): tick_labels, expected_xml = txPr_fixture tick_labels.font assert tick_labels._element.xml == expected_xml def it_knows_its_number_format(self, number_format_get_fixture): tick_labels, expected_value = number_format_get_fixture assert tick_labels.number_format == expected_value def it_can_change_its_number_format(self, number_format_set_fixture): tick_labels, new_value, expected_xml = number_format_set_fixture tick_labels.number_format = new_value assert tick_labels._element.xml == expected_xml def it_knows_whether_its_number_format_is_linked( self, number_format_is_linked_get_fixture ): tick_labels, expected_value = number_format_is_linked_get_fixture assert tick_labels.number_format_is_linked is expected_value def it_can_change_whether_its_number_format_is_linked( self, number_format_is_linked_set_fixture ): tick_labels, new_value, expected_xml = number_format_is_linked_set_fixture tick_labels.number_format_is_linked = new_value assert tick_labels._element.xml == expected_xml def it_knows_its_offset(self, offset_get_fixture): tick_labels, expected_value = offset_get_fixture assert tick_labels.offset == expected_value def it_can_change_its_offset(self, offset_set_fixture): tick_labels, new_value, expected_xml = offset_set_fixture tick_labels.offset = new_value assert tick_labels._element.xml == expected_xml # fixtures ------------------------------------------------------- @pytest.fixture def font_fixture(self, Font_, font_): catAx = element("c:catAx/c:txPr/a:p/a:pPr/a:defRPr") defRPr = catAx.xpath(".//a:defRPr")[0] tick_labels = TickLabels(catAx) return tick_labels, Font_, defRPr, font_ @pytest.fixture( params=[ ("c:catAx", "General"), ("c:valAx/c:numFmt{formatCode=General}", "General"), ] ) def number_format_get_fixture(self, request): xAx_cxml, expected_value = request.param tick_labels = TickLabels(element(xAx_cxml)) return tick_labels, expected_value @pytest.fixture( params=[ ( "c:catAx", "General", "c:catAx/c:numFmt{formatCode=General,sourceLinked=0}", ), ( "c:valAx/c:numFmt{formatCode=General}", "00.00", "c:valAx/c:numFmt{formatCode=00.00,sourceLinked=0}", ), ] ) def number_format_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param tick_labels = TickLabels(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return tick_labels, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx", False), ("c:valAx/c:numFmt", True), ("c:valAx/c:numFmt{sourceLinked=0}", False), ("c:catAx/c:numFmt{sourceLinked=1}", True), ] ) def number_format_is_linked_get_fixture(self, request): xAx_cxml, expected_value = request.param tick_labels = TickLabels(element(xAx_cxml)) return tick_labels, expected_value @pytest.fixture( params=[ ("c:valAx", True, "c:valAx/c:numFmt{sourceLinked=1}"), ("c:catAx", False, "c:catAx/c:numFmt{sourceLinked=0}"), ("c:valAx", None, "c:valAx/c:numFmt"), ("c:catAx/c:numFmt", True, "c:catAx/c:numFmt{sourceLinked=1}"), ( "c:valAx/c:numFmt{sourceLinked=1}", False, "c:valAx/c:numFmt{sourceLinked=0}", ), ] ) def number_format_is_linked_set_fixture(self, request): xAx_cxml, new_value, expected_xAx_cxml = request.param tick_labels = TickLabels(element(xAx_cxml)) expected_xml = xml(expected_xAx_cxml) return tick_labels, new_value, expected_xml @pytest.fixture( params=[ ("c:catAx", 100), ("c:catAx/c:lblOffset", 100), ("c:catAx/c:lblOffset{val=420}", 420), ("c:catAx/c:lblOffset{val=004}", 4), ("c:catAx/c:lblOffset{val=42%}", 42), ("c:catAx/c:lblOffset{val=02%}", 2), ] ) def offset_get_fixture(self, request): catAx_cxml, expected_value = request.param tick_labels = TickLabels(element(catAx_cxml)) return tick_labels, expected_value @pytest.fixture( params=[ ("c:catAx", 420, "c:catAx/c:lblOffset{val=420}"), ("c:catAx/c:lblOffset{val=420}", 100, "c:catAx"), ] ) def offset_set_fixture(self, request): catAx_cxml, new_value, expected_catAx_cxml = request.param tick_labels = TickLabels(element(catAx_cxml)) expected_xml = xml(expected_catAx_cxml) return tick_labels, new_value, expected_xml @pytest.fixture( params=[ ( "c:valAx{a:b=c}", "c:valAx{a:b=c}/c:txPr/(a:bodyPr,a:lstStyle,a:p/a:pPr/a:defRPr)", ), ( "c:valAx{a:b=c}/c:txPr/(a:bodyPr,a:p)", "c:valAx{a:b=c}/c:txPr/(a:bodyPr,a:p/a:pPr/a:defRPr)", ), ( "c:valAx{a:b=c}/c:txPr/(a:bodyPr,a:p/a:pPr)", "c:valAx{a:b=c}/c:txPr/(a:bodyPr,a:p/a:pPr/a:defRPr)", ), ] ) def txPr_fixture(self, request): xAx_cxml, expected_cxml = request.param tick_labels = TickLabels(element(xAx_cxml)) expected_xml = xml(expected_cxml) return tick_labels, expected_xml # fixture components --------------------------------------------- @pytest.fixture def Font_(self, request, font_): return class_mock(request, "pptx.chart.axis.Font", return_value=font_) @pytest.fixture def font_(self, request): return instance_mock(request, Font) class DescribeValueAxis(object): def it_knows_the_other_axis_crossing_type(self, crosses_get_fixture): value_axis, expected_value = crosses_get_fixture assert value_axis.crosses == expected_value def it_can_change_the_other_axis_crossing_type(self, crosses_set_fixture): value_axis, new_value, plotArea, expected_xml = crosses_set_fixture value_axis.crosses = new_value assert plotArea.xml == expected_xml def it_knows_the_other_axis_crossing_value(self, crosses_at_get_fixture): value_axis, expected_value = crosses_at_get_fixture assert value_axis.crosses_at == expected_value def it_can_change_the_other_axis_crossing_value(self, crosses_at_set_fixture): value_axis, new_value, plotArea, expected_xml = crosses_at_set_fixture value_axis.crosses_at = new_value assert plotArea.xml == expected_xml def it_knows_its_major_unit(self, major_unit_get_fixture): value_axis, expected_value = major_unit_get_fixture assert value_axis.major_unit == expected_value def it_can_change_its_major_unit(self, major_unit_set_fixture): value_axis, new_value, expected_xml = major_unit_set_fixture value_axis.major_unit = new_value assert value_axis._element.xml == expected_xml def it_knows_its_minor_unit(self, minor_unit_get_fixture): value_axis, expected_value = minor_unit_get_fixture assert value_axis.minor_unit == expected_value def it_can_change_its_minor_unit(self, minor_unit_set_fixture): value_axis, new_value, expected_xml = minor_unit_set_fixture value_axis.minor_unit = new_value assert value_axis._element.xml == expected_xml # fixtures ------------------------------------------------------- @pytest.fixture( params=[ ("c:plotArea/(c:valAx/c:axId{val=42},c:valAx/c:crossAx{val=42})", None), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx/" "c:crossAx{val=42})", 2.4, ), ( "c:plotArea/(c:dateAx/(c:axId{val=42},c:crossesAt{val=-1.2}),c:valA" "x/c:crossAx{val=42})", -1.2, ), ] ) def crosses_at_get_fixture(self, request): plotArea_cxml, expected_value = request.param plotArea = element(plotArea_cxml) valAx = plotArea.xpath('c:valAx[c:crossAx/@val="42"]')[0] value_axis = ValueAxis(valAx) return value_axis, expected_value @pytest.fixture( params=[ ( "c:plotArea/(c:valAx/c:axId{val=42},c:valAx/c:crossAx{val=42})", 2.4, "c:plotArea/(c:valAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx/" "c:crossAx{val=42})", ), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crosses{val=min}),c:valAx/c:" "crossAx{val=42})", 1.5, "c:plotArea/(c:catAx/(c:axId{val=42},c:crossesAt{val=1.5}),c:valAx/" "c:crossAx{val=42})", ), ( "c:plotArea/(c:dateAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx" "/c:crossAx{val=42})", 1.5, "c:plotArea/(c:dateAx/(c:axId{val=42},c:crossesAt{val=1.5}),c:valAx" "/c:crossAx{val=42})", ), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crossesAt{val=1.5}),c:valAx/" "c:crossAx{val=42})", None, "c:plotArea/(c:catAx/(c:axId{val=42}),c:valAx/c:crossAx{val=42})", ), ] ) def crosses_at_set_fixture(self, request): plotArea_cxml, new_value, expected_cxml = request.param plotArea = element(plotArea_cxml) valAx = plotArea.xpath('c:valAx[c:crossAx/@val="42"]')[0] value_axis = ValueAxis(valAx) expected_xml = xml(expected_cxml) return value_axis, new_value, plotArea, expected_xml @pytest.fixture( params=[ ("c:plotArea/(c:valAx/c:axId{val=42},c:valAx/c:crossAx{val=42})", "CUSTOM"), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crosses{val=autoZero}),c:val" "Ax/c:crossAx{val=42})", "AUTOMATIC", ), ( "c:plotArea/(c:valAx/(c:axId{val=42},c:crosses{val=min}),c:valAx/c:" "crossAx{val=42})", "MINIMUM", ), ] ) def crosses_get_fixture(self, request): cxml, member = request.param valAx = element(cxml).xpath('c:valAx[c:crossAx/@val="42"]')[0] value_axis = ValueAxis(valAx) expected_value = getattr(XL_AXIS_CROSSES, member) return value_axis, expected_value @pytest.fixture( params=[ ( "c:plotArea/(c:valAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx/" "c:crossAx{val=42})", "AUTOMATIC", "c:plotArea/(c:valAx/(c:axId{val=42},c:crosses{val=autoZero}),c:val" "Ax/c:crossAx{val=42})", ), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crosses{val=autoZero}),c:val" "Ax/c:crossAx{val=42})", "MINIMUM", "c:plotArea/(c:catAx/(c:axId{val=42},c:crosses{val=min}),c:valAx/c:" "crossAx{val=42})", ), ( "c:plotArea/(c:valAx/(c:axId{val=42},c:crosses{val=min}),c:valAx/c:" "crossAx{val=42})", "CUSTOM", "c:plotArea/(c:valAx/(c:axId{val=42},c:crossesAt{val=0.0}),c:valAx/" "c:crossAx{val=42})", ), ( "c:plotArea/(c:catAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx/" "c:crossAx{val=42})", "CUSTOM", "c:plotArea/(c:catAx/(c:axId{val=42},c:crossesAt{val=2.4}),c:valAx/" "c:crossAx{val=42})", ), ] ) def crosses_set_fixture(self, request): plotArea_cxml, member, expected_cxml = request.param plotArea = element(plotArea_cxml) valAx = plotArea.xpath('c:valAx[c:crossAx/@val="42"]')[0] value_axis = ValueAxis(valAx) new_value = getattr(XL_AXIS_CROSSES, member) expected_xml = xml(expected_cxml) return value_axis, new_value, plotArea, expected_xml @pytest.fixture(params=[("c:valAx", None), ("c:valAx/c:majorUnit{val=4.2}", 4.2)]) def major_unit_get_fixture(self, request): valAx_cxml, expected_value = request.param value_axis = ValueAxis(element(valAx_cxml)) return value_axis, expected_value @pytest.fixture( params=[ ("c:valAx", 42, "c:valAx/c:majorUnit{val=42.0}"), ("c:valAx", None, "c:valAx"), ("c:valAx/c:majorUnit{val=42.0}", 24.0, "c:valAx/c:majorUnit{val=24.0}"), ("c:valAx/c:majorUnit{val=42.0}", None, "c:valAx"), ] ) def major_unit_set_fixture(self, request): valAx_cxml, new_value, expected_valAx_cxml = request.param value_axis = ValueAxis(element(valAx_cxml)) expected_xml = xml(expected_valAx_cxml) return value_axis, new_value, expected_xml @pytest.fixture(params=[("c:valAx", None), ("c:valAx/c:minorUnit{val=2.4}", 2.4)]) def minor_unit_get_fixture(self, request): valAx_cxml, expected_value = request.param value_axis = ValueAxis(element(valAx_cxml)) return value_axis, expected_value @pytest.fixture( params=[ ("c:valAx", 36, "c:valAx/c:minorUnit{val=36.0}"), ("c:valAx", None, "c:valAx"), ("c:valAx/c:minorUnit{val=36.0}", 12.6, "c:valAx/c:minorUnit{val=12.6}"), ("c:valAx/c:minorUnit{val=36.0}", None, "c:valAx"), ] ) def minor_unit_set_fixture(self, request): valAx_cxml, new_value, expected_valAx_cxml = request.param value_axis = ValueAxis(element(valAx_cxml)) expected_xml = xml(expected_valAx_cxml) return value_axis, new_value, expected_xml
py
b41471c8cd15f7577f4880c9f161b9b0694aaf09
# File: ajaxlogclient.py # Author: Carl Allendorph # Date: 29NOV2016 # # Description: # This file contains the implementation of a logging client # to test the ajax logging handler import logging import logging.handlers as loghdlr class HTMLHandler(logging.Handler): """ This handler is used to provide a view on the screen of the logging messages that have been sent over the AJAX handler. This is primarily for debugging purposes. """ def __init__(self, elemId): """ Configure the HTML Handler @param elemId parent element where we will start pushing logging messages. """ logging.Handler.__init__(self) self._elem = document.getElementById(elemId) def emit(self, record): msg = self.format(record) if self._elem: node = document.createElement("LI") content = document.createTextNode(msg) node.appendChild(content) self._elem.appendChild(node) def setupLogger(): root = logging.getLogger() root.setLevel(10) fmt = logging.Formatter("{levelname}[{asctime}]: {message}","%H:%M:%S", style="{") headers = [ ('X-Requested-With', 'XMLHttpRequest'), ('Content-type', 'application/x-www-form-urlencoded'), ] ahdlr = loghdlr.AJAXHandler("http://127.0.0.1:8081/log", "POST") #ahdlr = loghdlr.AJAXHandler("/log") ahdlr.setLevel(10) ahdlr.setFormatter(fmt) htmlHdlr = HTMLHandler("log-output") htmlHdlr.setLevel(10) htmlHdlr.setFormatter(fmt) root.addHandler(ahdlr) root.addHandler(htmlHdlr) logging.info("Started AJAX Logger") setupLogger() def logPeriodic(): logging.info("Message on The Bus goes Round and Round") setInterval(logPeriodic, 1000)
py
b414721370a75e0389ac97d8abe05e85814890b7
#processamento & saida s = 1 i = 3 j = 2 while i <= 39: s += i/j i += 2 j *= 2 print(f'{s:.2f}')
py
b4147213dd447ee398aef3d02290636609b3302f
import math from typing import Optional import torch from torch import nn import torch.nn.functional as F from torch_geometric.nn import MessagePassing from torch_cluster import radius_graph import warnings def visualize_basis(basis_type, num_rbf=50, cutoff_lower=0, cutoff_upper=5): """ Function for quickly visualizing a specific basis. This is useful for inspecting the distance coverage of basis functions for non-default lower and upper cutoffs. Args: basis_type (str): Specifies the type of basis functions used. Can be one of ['gauss',expnorm'] num_rbf (int, optional): The number of basis functions. (default: :obj:`50`) cutoff_lower (float, optional): The lower cutoff of the basis. (default: :obj:`0`) cutoff_upper (float, optional): The upper cutoff of the basis. (default: :obj:`5`) """ import matplotlib.pyplot as plt distances = torch.linspace(cutoff_lower - 1, cutoff_upper + 1, 1000) basis_kwargs = { "num_rbf": num_rbf, "cutoff_lower": cutoff_lower, "cutoff_upper": cutoff_upper, } basis_expansion = rbf_class_mapping[basis_type](**basis_kwargs) expanded_distances = basis_expansion(distances) for i in range(expanded_distances.shape[-1]): plt.plot(distances.numpy(), expanded_distances[:, i].detach().numpy()) plt.show() class NeighborEmbedding(MessagePassing): def __init__(self, hidden_channels, num_rbf, cutoff_lower, cutoff_upper, max_z=100): super(NeighborEmbedding, self).__init__(aggr="add") self.embedding = nn.Embedding(max_z, hidden_channels) self.distance_proj = nn.Linear(num_rbf, hidden_channels) self.combine = nn.Linear(hidden_channels * 2, hidden_channels) self.cutoff = CosineCutoff(cutoff_lower, cutoff_upper) self.reset_parameters() def reset_parameters(self): self.embedding.reset_parameters() nn.init.xavier_uniform_(self.distance_proj.weight) nn.init.xavier_uniform_(self.combine.weight) self.distance_proj.bias.data.fill_(0) self.combine.bias.data.fill_(0) def forward(self, z, x, edge_index, edge_weight, edge_attr): # remove self loops mask = edge_index[0] != edge_index[1] if not mask.all(): edge_index = edge_index[:, mask] edge_weight = edge_weight[mask] edge_attr = edge_attr[mask] C = self.cutoff(edge_weight) W = self.distance_proj(edge_attr) * C.view(-1, 1) x_neighbors = self.embedding(z) # propagate_type: (x: Tensor, W: Tensor) x_neighbors = self.propagate(edge_index, x=x_neighbors, W=W, size=None) x_neighbors = self.combine(torch.cat([x, x_neighbors], dim=1)) return x_neighbors def message(self, x_j, W): return x_j * W class GaussianSmearing(nn.Module): def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50, trainable=True): super(GaussianSmearing, self).__init__() self.cutoff_lower = cutoff_lower self.cutoff_upper = cutoff_upper self.num_rbf = num_rbf self.trainable = trainable offset, coeff = self._initial_params() if trainable: self.register_parameter("coeff", nn.Parameter(coeff)) self.register_parameter("offset", nn.Parameter(offset)) else: self.register_buffer("coeff", coeff) self.register_buffer("offset", offset) def _initial_params(self): offset = torch.linspace(self.cutoff_lower, self.cutoff_upper, self.num_rbf) coeff = -0.5 / (offset[1] - offset[0]) ** 2 return offset, coeff def reset_parameters(self): offset, coeff = self._initial_params() self.offset.data.copy_(offset) self.coeff.data.copy_(coeff) def forward(self, dist): dist = dist.unsqueeze(-1) - self.offset return torch.exp(self.coeff * torch.pow(dist, 2)) class ExpNormalSmearing(nn.Module): def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0, num_rbf=50, trainable=True): super(ExpNormalSmearing, self).__init__() self.cutoff_lower = cutoff_lower self.cutoff_upper = cutoff_upper self.num_rbf = num_rbf self.trainable = trainable self.cutoff_fn = CosineCutoff(0, cutoff_upper) self.alpha = 5.0 / (cutoff_upper - cutoff_lower) means, betas = self._initial_params() if trainable: self.register_parameter("means", nn.Parameter(means)) self.register_parameter("betas", nn.Parameter(betas)) else: self.register_buffer("means", means) self.register_buffer("betas", betas) def _initial_params(self): # initialize means and betas according to the default values in PhysNet # https://pubs.acs.org/doi/10.1021/acs.jctc.9b00181 start_value = torch.exp( torch.scalar_tensor(-self.cutoff_upper + self.cutoff_lower) ) means = torch.linspace(start_value, 1, self.num_rbf) betas = torch.tensor( [(2 / self.num_rbf * (1 - start_value)) ** -2] * self.num_rbf ) return means, betas def reset_parameters(self): means, betas = self._initial_params() self.means.data.copy_(means) self.betas.data.copy_(betas) def forward(self, dist): dist = dist.unsqueeze(-1) return self.cutoff_fn(dist) * torch.exp( -self.betas * (torch.exp(self.alpha * (-dist + self.cutoff_lower)) - self.means) ** 2 ) class ShiftedSoftplus(nn.Module): def __init__(self): super(ShiftedSoftplus, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift class CosineCutoff(nn.Module): def __init__(self, cutoff_lower=0.0, cutoff_upper=5.0): super(CosineCutoff, self).__init__() self.cutoff_lower = cutoff_lower self.cutoff_upper = cutoff_upper def forward(self, distances): if self.cutoff_lower > 0: cutoffs = 0.5 * ( torch.cos( math.pi * ( 2 * (distances - self.cutoff_lower) / (self.cutoff_upper - self.cutoff_lower) + 1.0 ) ) + 1.0 ) # remove contributions below the cutoff radius cutoffs = cutoffs * (distances < self.cutoff_upper).float() cutoffs = cutoffs * (distances > self.cutoff_lower).float() return cutoffs else: cutoffs = 0.5 * (torch.cos(distances * math.pi / self.cutoff_upper) + 1.0) # remove contributions beyond the cutoff radius cutoffs = cutoffs * (distances < self.cutoff_upper).float() return cutoffs class Distance(nn.Module): def __init__( self, cutoff_lower, cutoff_upper, max_num_neighbors=32, return_vecs=False, loop=False, ): super(Distance, self).__init__() self.cutoff_lower = cutoff_lower self.cutoff_upper = cutoff_upper self.max_num_neighbors = max_num_neighbors self.return_vecs = return_vecs self.loop = loop def forward(self, pos, batch): edge_index = radius_graph( pos, r=self.cutoff_upper, batch=batch, loop=self.loop, max_num_neighbors=self.max_num_neighbors + 1, ) # make sure we didn't miss any neighbors due to max_num_neighbors assert not ( torch.unique(edge_index[0], return_counts=True)[1] > self.max_num_neighbors ).any(), ( "The neighbor search missed some atoms due to max_num_neighbors being too low. " "Please increase this parameter to include the maximum number of atoms within the cutoff." ) edge_vec = pos[edge_index[0]] - pos[edge_index[1]] mask : Optional[torch.Tensor]=None if self.loop: # mask out self loops when computing distances because # the norm of 0 produces NaN gradients # NOTE: might influence force predictions as self loop gradients are ignored mask = edge_index[0] != edge_index[1] edge_weight = torch.zeros(edge_vec.size(0), device=edge_vec.device) edge_weight[mask] = torch.norm(edge_vec[mask], dim=-1) else: edge_weight = torch.norm(edge_vec, dim=-1) lower_mask = edge_weight >= self.cutoff_lower if self.loop and mask is not None: # keep self loops even though they might be below the lower cutoff lower_mask = lower_mask | ~mask edge_index = edge_index[:, lower_mask] edge_weight = edge_weight[lower_mask] if self.return_vecs: edge_vec = edge_vec[lower_mask] return edge_index, edge_weight, edge_vec # TODO: return only `edge_index` and `edge_weight` once # Union typing works with TorchScript (https://github.com/pytorch/pytorch/pull/53180) return edge_index, edge_weight, None class GatedEquivariantBlock(nn.Module): """Gated Equivariant Block as defined in Schütt et al. (2021): Equivariant message passing for the prediction of tensorial properties and molecular spectra """ def __init__( self, hidden_channels, out_channels, intermediate_channels=None, activation="silu", scalar_activation=False, ): super(GatedEquivariantBlock, self).__init__() self.out_channels = out_channels if intermediate_channels is None: intermediate_channels = hidden_channels self.vec1_proj = nn.Linear(hidden_channels, hidden_channels, bias=False) self.vec2_proj = nn.Linear(hidden_channels, out_channels, bias=False) act_class = act_class_mapping[activation] self.update_net = nn.Sequential( nn.Linear(hidden_channels * 2, intermediate_channels), act_class(), nn.Linear(intermediate_channels, out_channels * 2), ) self.act = act_class() if scalar_activation else None def reset_parameters(self): nn.init.xavier_uniform_(self.vec1_proj.weight) nn.init.xavier_uniform_(self.vec2_proj.weight) nn.init.xavier_uniform_(self.update_net[0].weight) self.update_net[0].bias.data.fill_(0) nn.init.xavier_uniform_(self.update_net[2].weight) self.update_net[2].bias.data.fill_(0) def forward(self, x, v): vec1_buffer = self.vec1_proj(v) # detach zero-entries to avoid NaN gradients during force loss backpropagation vec1 = torch.zeros( vec1_buffer.size(0), vec1_buffer.size(2), device=vec1_buffer.device ) mask = (vec1_buffer != 0).view(vec1_buffer.size(0), -1).any(dim=1) if not mask.all(): warnings.warn( ( f"Skipping gradients for {(~mask).sum()} atoms due to vector features being zero. " "This is likely due to atoms being outside the cutoff radius of any other atom. " "These atoms will not interact with any other atom unless you change the cutoff." ) ) vec1[mask] = torch.norm(vec1_buffer[mask], dim=-2) vec2 = self.vec2_proj(v) x = torch.cat([x, vec1], dim=-1) x, v = torch.split(self.update_net(x), self.out_channels, dim=-1) v = v.unsqueeze(1) * vec2 if self.act is not None: x = self.act(x) return x, v rbf_class_mapping = {"gauss": GaussianSmearing, "expnorm": ExpNormalSmearing} act_class_mapping = { "ssp": ShiftedSoftplus, "silu": nn.SiLU, "tanh": nn.Tanh, "sigmoid": nn.Sigmoid, }
py
b414722bd58b9dc6106026a35b353cd530a6c95e
from os import path, system from netCDF4 import Dataset import numpy as np import pytest import sys try: from mpi4py import MPI except: MPI = None @pytest.mark.skipif(sys.platform.startswith("darwin"), reason="skipping macOS test as problem with file in pytest") def test_mpi_run(tmpdir): if MPI: repeatdt = 200*86400 stommel_file = path.join(path.dirname(__file__), '..', 'parcels', 'examples', 'example_stommel.py') outputMPI = tmpdir.join('StommelMPI.nc') outputNoMPI = tmpdir.join('StommelNoMPI.nc') system('mpirun -np 2 python %s -p 4 -o %s -r %d' % (stommel_file, outputMPI, repeatdt)) system('python %s -p 4 -o %s -r %d' % (stommel_file, outputNoMPI, repeatdt)) ncfile1 = Dataset(outputMPI, 'r', 'NETCDF4') ncfile2 = Dataset(outputNoMPI, 'r', 'NETCDF4') for v in ncfile2.variables.keys(): assert np.allclose(ncfile1.variables[v][:], ncfile2.variables[v][:]) for a in ncfile2.ncattrs(): if a != 'parcels_version': assert getattr(ncfile1, a) == getattr(ncfile2, a) ncfile1.close() ncfile2.close()
py
b414727eda6f9d70cef7eeda3d589d4366e3a32e
# Leia um angulo qualquer e calcule seu seno, cosseno e tangente from math import sin, cos, tan, radians ang = float(input('Informe um ângulo: ')) angrad = radians(ang) sen = sin(angrad) coss = cos(angrad) tang = tan(angrad) print('O ângulo {}º tem:\nSeno: {:.2f}\nCosseno: {:.2f}\nTangente: {:.2f}'.format(ang, sen, coss, tang))
py
b41472953b787412a4f3d5c9ed32aeb9b3f63c45
# <snippet> import pandas as pd from ruamel import yaml import great_expectations as ge from great_expectations import DataContext from great_expectations.core import ExpectationSuite from great_expectations.core.batch import RuntimeBatchRequest from great_expectations.validator.validator import Validator # </snippet> # <snippet> context: DataContext = ge.get_context() # </snippet> # create and load Expectation Suite # <snippet> context.create_expectation_suite( expectation_suite_name="insert_your_expectation_suite_name_here" ) # </snippet> # <snippet> suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name="insert_your_expectation_suite_name_here" ) # </snippet> # <snippet> datasource_yaml = f""" name: my_pandas_datasource class_name: Datasource module_name: great_expectations.datasource execution_engine: module_name: great_expectations.execution_engine class_name: PandasExecutionEngine data_connectors: my_runtime_data_connector: class_name: RuntimeDataConnector batch_identifiers: - some_key_maybe_pipeline_stage - some_other_key_maybe_airflow_run_id """ # </snippet> # <snippet> context.add_datasource(**yaml.load(datasource_yaml)) # </snippet> # RuntimeBatchRequest with batch_data as Pandas Dataframe # <snippet> path_to_file: str = "some_path.csv" # </snippet> # Please note this override is only to provide good UX for docs and tests. path_to_file: str = "./data/yellow_tripdata_sample_2019-01.csv" # <snippet> df: pd.DataFrame = pd.read_csv(path_to_file) # </snippet> # <snippet> runtime_batch_request = RuntimeBatchRequest( datasource_name="my_pandas_datasource", data_connector_name="my_runtime_data_connector", data_asset_name="insert_your_data_asset_name_here", runtime_parameters={"batch_data": df}, batch_identifiers={ "some_key_maybe_pipeline_stage": "ingestion step 1", "some_other_key_maybe_airflow_run_id": "run 18", }, ) # </snippet> # RuntimeBatchRequest with path # <snippet> runtime_batch_request = RuntimeBatchRequest( datasource_name="my_pandas_datasource", data_connector_name="my_runtime_data_connector", data_asset_name="insert_your_data_asset_name_here", runtime_parameters={"path": path_to_file}, batch_identifiers={ "some_key_maybe_pipeline_stage": "ingestion step 1", "some_other_key_maybe_airflow_run_id": "run 18", }, batch_spec_passthrough={ "reader_method": "read_csv", "reader_options": {"sep": ",", "header": 0}, }, ) # </snippet> # Constructing Validator by passing in RuntimeBatchRequest # <snippet> my_validator: Validator = context.get_validator( batch_request=runtime_batch_request, expectation_suite=suite, # OR # expectation_suite_name=suite_name ) # </snippet> # my_validator.head() # Constructing Validator by passing in arguments # <snippet> my_validator: Validator = context.get_validator( datasource_name="my_pandas_datasource", data_connector_name="my_runtime_data_connector", data_asset_name="insert_your_data_asset_name_here", runtime_parameters={"path": path_to_file}, batch_identifiers={ "some_key_maybe_pipeline_stage": "ingestion step 1", "some_other_key_maybe_airflow_run_id": "run 18", }, batch_spec_passthrough={ "reader_method": "read_csv", "reader_options": {"sep": ",", "header": 0}, }, expectation_suite=suite, # OR # expectation_suite_name=suite_name ) # </snippet> # <snippet> my_validator.head() # </snippet>
py
b4147298b256b99f6c66496b104303ca21ea7471
# coding: utf-8 """ Xero Payroll AU API This is the Xero Payroll API for orgs in Australia region. # noqa: E501 Contact: [email protected] Generated by: https://openapi-generator.tech """ import re # noqa: F401 from xero_python.models import BaseModel class PayRun(BaseModel): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { "payroll_calendar_id": "str", "pay_run_id": "str", "pay_run_period_start_date": "date[ms-format]", "pay_run_period_end_date": "date[ms-format]", "pay_run_status": "PayRunStatus", "payment_date": "date[ms-format]", "payslip_message": "str", "updated_date_utc": "datetime[ms-format]", "payslips": "list[PayslipSummary]", "wages": "float", "deductions": "float", "tax": "float", "super": "float", "reimbursement": "float", "net_pay": "float", "validation_errors": "list[ValidationError]", } attribute_map = { "payroll_calendar_id": "PayrollCalendarID", "pay_run_id": "PayRunID", "pay_run_period_start_date": "PayRunPeriodStartDate", "pay_run_period_end_date": "PayRunPeriodEndDate", "pay_run_status": "PayRunStatus", "payment_date": "PaymentDate", "payslip_message": "PayslipMessage", "updated_date_utc": "UpdatedDateUTC", "payslips": "Payslips", "wages": "Wages", "deductions": "Deductions", "tax": "Tax", "super": "Super", "reimbursement": "Reimbursement", "net_pay": "NetPay", "validation_errors": "ValidationErrors", } def __init__( self, payroll_calendar_id=None, pay_run_id=None, pay_run_period_start_date=None, pay_run_period_end_date=None, pay_run_status=None, payment_date=None, payslip_message=None, updated_date_utc=None, payslips=None, wages=None, deductions=None, tax=None, super=None, reimbursement=None, net_pay=None, validation_errors=None, ): # noqa: E501 """PayRun - a model defined in OpenAPI""" # noqa: E501 self._payroll_calendar_id = None self._pay_run_id = None self._pay_run_period_start_date = None self._pay_run_period_end_date = None self._pay_run_status = None self._payment_date = None self._payslip_message = None self._updated_date_utc = None self._payslips = None self._wages = None self._deductions = None self._tax = None self._super = None self._reimbursement = None self._net_pay = None self._validation_errors = None self.discriminator = None self.payroll_calendar_id = payroll_calendar_id if pay_run_id is not None: self.pay_run_id = pay_run_id if pay_run_period_start_date is not None: self.pay_run_period_start_date = pay_run_period_start_date if pay_run_period_end_date is not None: self.pay_run_period_end_date = pay_run_period_end_date if pay_run_status is not None: self.pay_run_status = pay_run_status if payment_date is not None: self.payment_date = payment_date if payslip_message is not None: self.payslip_message = payslip_message if updated_date_utc is not None: self.updated_date_utc = updated_date_utc if payslips is not None: self.payslips = payslips if wages is not None: self.wages = wages if deductions is not None: self.deductions = deductions if tax is not None: self.tax = tax if super is not None: self.super = super if reimbursement is not None: self.reimbursement = reimbursement if net_pay is not None: self.net_pay = net_pay if validation_errors is not None: self.validation_errors = validation_errors @property def payroll_calendar_id(self): """Gets the payroll_calendar_id of this PayRun. # noqa: E501 Xero identifier for pay run # noqa: E501 :return: The payroll_calendar_id of this PayRun. # noqa: E501 :rtype: str """ return self._payroll_calendar_id @payroll_calendar_id.setter def payroll_calendar_id(self, payroll_calendar_id): """Sets the payroll_calendar_id of this PayRun. Xero identifier for pay run # noqa: E501 :param payroll_calendar_id: The payroll_calendar_id of this PayRun. # noqa: E501 :type: str """ if payroll_calendar_id is None: raise ValueError( "Invalid value for `payroll_calendar_id`, must not be `None`" ) # noqa: E501 self._payroll_calendar_id = payroll_calendar_id @property def pay_run_id(self): """Gets the pay_run_id of this PayRun. # noqa: E501 Xero identifier for pay run # noqa: E501 :return: The pay_run_id of this PayRun. # noqa: E501 :rtype: str """ return self._pay_run_id @pay_run_id.setter def pay_run_id(self, pay_run_id): """Sets the pay_run_id of this PayRun. Xero identifier for pay run # noqa: E501 :param pay_run_id: The pay_run_id of this PayRun. # noqa: E501 :type: str """ self._pay_run_id = pay_run_id @property def pay_run_period_start_date(self): """Gets the pay_run_period_start_date of this PayRun. # noqa: E501 Period Start Date for the PayRun (YYYY-MM-DD) # noqa: E501 :return: The pay_run_period_start_date of this PayRun. # noqa: E501 :rtype: date """ return self._pay_run_period_start_date @pay_run_period_start_date.setter def pay_run_period_start_date(self, pay_run_period_start_date): """Sets the pay_run_period_start_date of this PayRun. Period Start Date for the PayRun (YYYY-MM-DD) # noqa: E501 :param pay_run_period_start_date: The pay_run_period_start_date of this PayRun. # noqa: E501 :type: date """ self._pay_run_period_start_date = pay_run_period_start_date @property def pay_run_period_end_date(self): """Gets the pay_run_period_end_date of this PayRun. # noqa: E501 Period End Date for the PayRun (YYYY-MM-DD) # noqa: E501 :return: The pay_run_period_end_date of this PayRun. # noqa: E501 :rtype: date """ return self._pay_run_period_end_date @pay_run_period_end_date.setter def pay_run_period_end_date(self, pay_run_period_end_date): """Sets the pay_run_period_end_date of this PayRun. Period End Date for the PayRun (YYYY-MM-DD) # noqa: E501 :param pay_run_period_end_date: The pay_run_period_end_date of this PayRun. # noqa: E501 :type: date """ self._pay_run_period_end_date = pay_run_period_end_date @property def pay_run_status(self): """Gets the pay_run_status of this PayRun. # noqa: E501 :return: The pay_run_status of this PayRun. # noqa: E501 :rtype: PayRunStatus """ return self._pay_run_status @pay_run_status.setter def pay_run_status(self, pay_run_status): """Sets the pay_run_status of this PayRun. :param pay_run_status: The pay_run_status of this PayRun. # noqa: E501 :type: PayRunStatus """ self._pay_run_status = pay_run_status @property def payment_date(self): """Gets the payment_date of this PayRun. # noqa: E501 Payment Date for the PayRun (YYYY-MM-DD) # noqa: E501 :return: The payment_date of this PayRun. # noqa: E501 :rtype: date """ return self._payment_date @payment_date.setter def payment_date(self, payment_date): """Sets the payment_date of this PayRun. Payment Date for the PayRun (YYYY-MM-DD) # noqa: E501 :param payment_date: The payment_date of this PayRun. # noqa: E501 :type: date """ self._payment_date = payment_date @property def payslip_message(self): """Gets the payslip_message of this PayRun. # noqa: E501 Payslip message for the PayRun # noqa: E501 :return: The payslip_message of this PayRun. # noqa: E501 :rtype: str """ return self._payslip_message @payslip_message.setter def payslip_message(self, payslip_message): """Sets the payslip_message of this PayRun. Payslip message for the PayRun # noqa: E501 :param payslip_message: The payslip_message of this PayRun. # noqa: E501 :type: str """ self._payslip_message = payslip_message @property def updated_date_utc(self): """Gets the updated_date_utc of this PayRun. # noqa: E501 Last modified timestamp # noqa: E501 :return: The updated_date_utc of this PayRun. # noqa: E501 :rtype: datetime """ return self._updated_date_utc @updated_date_utc.setter def updated_date_utc(self, updated_date_utc): """Sets the updated_date_utc of this PayRun. Last modified timestamp # noqa: E501 :param updated_date_utc: The updated_date_utc of this PayRun. # noqa: E501 :type: datetime """ self._updated_date_utc = updated_date_utc @property def payslips(self): """Gets the payslips of this PayRun. # noqa: E501 The payslips in the payrun # noqa: E501 :return: The payslips of this PayRun. # noqa: E501 :rtype: list[PayslipSummary] """ return self._payslips @payslips.setter def payslips(self, payslips): """Sets the payslips of this PayRun. The payslips in the payrun # noqa: E501 :param payslips: The payslips of this PayRun. # noqa: E501 :type: list[PayslipSummary] """ self._payslips = payslips @property def wages(self): """Gets the wages of this PayRun. # noqa: E501 The total Wages for the Payrun # noqa: E501 :return: The wages of this PayRun. # noqa: E501 :rtype: float """ return self._wages @wages.setter def wages(self, wages): """Sets the wages of this PayRun. The total Wages for the Payrun # noqa: E501 :param wages: The wages of this PayRun. # noqa: E501 :type: float """ self._wages = wages @property def deductions(self): """Gets the deductions of this PayRun. # noqa: E501 The total Deductions for the Payrun # noqa: E501 :return: The deductions of this PayRun. # noqa: E501 :rtype: float """ return self._deductions @deductions.setter def deductions(self, deductions): """Sets the deductions of this PayRun. The total Deductions for the Payrun # noqa: E501 :param deductions: The deductions of this PayRun. # noqa: E501 :type: float """ self._deductions = deductions @property def tax(self): """Gets the tax of this PayRun. # noqa: E501 The total Tax for the Payrun # noqa: E501 :return: The tax of this PayRun. # noqa: E501 :rtype: float """ return self._tax @tax.setter def tax(self, tax): """Sets the tax of this PayRun. The total Tax for the Payrun # noqa: E501 :param tax: The tax of this PayRun. # noqa: E501 :type: float """ self._tax = tax @property def super(self): """Gets the super of this PayRun. # noqa: E501 The total Super for the Payrun # noqa: E501 :return: The super of this PayRun. # noqa: E501 :rtype: float """ return self._super @super.setter def super(self, super): """Sets the super of this PayRun. The total Super for the Payrun # noqa: E501 :param super: The super of this PayRun. # noqa: E501 :type: float """ self._super = super @property def reimbursement(self): """Gets the reimbursement of this PayRun. # noqa: E501 The total Reimbursements for the Payrun # noqa: E501 :return: The reimbursement of this PayRun. # noqa: E501 :rtype: float """ return self._reimbursement @reimbursement.setter def reimbursement(self, reimbursement): """Sets the reimbursement of this PayRun. The total Reimbursements for the Payrun # noqa: E501 :param reimbursement: The reimbursement of this PayRun. # noqa: E501 :type: float """ self._reimbursement = reimbursement @property def net_pay(self): """Gets the net_pay of this PayRun. # noqa: E501 The total NetPay for the Payrun # noqa: E501 :return: The net_pay of this PayRun. # noqa: E501 :rtype: float """ return self._net_pay @net_pay.setter def net_pay(self, net_pay): """Sets the net_pay of this PayRun. The total NetPay for the Payrun # noqa: E501 :param net_pay: The net_pay of this PayRun. # noqa: E501 :type: float """ self._net_pay = net_pay @property def validation_errors(self): """Gets the validation_errors of this PayRun. # noqa: E501 Displays array of validation error messages from the API # noqa: E501 :return: The validation_errors of this PayRun. # noqa: E501 :rtype: list[ValidationError] """ return self._validation_errors @validation_errors.setter def validation_errors(self, validation_errors): """Sets the validation_errors of this PayRun. Displays array of validation error messages from the API # noqa: E501 :param validation_errors: The validation_errors of this PayRun. # noqa: E501 :type: list[ValidationError] """ self._validation_errors = validation_errors
py
b41472adbd96659daa2185f92fedb0c952f16f49
# global import math import tensorflow as tf from numbers import Number from typing import Union, Tuple, Optional, List # Array API Standard # # -------------------# def concat(xs: List[tf.Tensor], axis: int = 0) -> Union[tf.Tensor, tf.Variable]: is_tuple = type(xs) is tuple is_axis_none = axis is None if is_tuple: xs = list(xs) highest_dtype = xs[0].dtype for i in xs: highest_dtype = tf.experimental.numpy.promote_types(highest_dtype, i.dtype) for i in range(len(xs)): if is_axis_none: xs[i] = tf.reshape(xs[i], -1) xs[i] = tf.cast(xs[i], highest_dtype) if is_axis_none: axis = 0 if is_tuple: xs = tuple(xs) ret = tf.concat(xs, axis) return ret def expand_dims( x: Union[tf.Tensor, tf.Variable], axis: int = 0, ) -> Union[tf.Tensor, tf.Variable]: try: ret = tf.expand_dims(x, axis) return ret except tf.errors.InvalidArgumentError as error: raise IndexError(error) def flip( x: Union[tf.Tensor, tf.Variable], axis: Optional[Union[int, Tuple[int], List[int]]] = None, ) -> Union[tf.Tensor, tf.Variable]: num_dims = len(x.shape) if not num_dims: ret = x else: if axis is None: new_axis = list(range(num_dims)) else: new_axis = axis if type(new_axis) is int: new_axis = [new_axis] else: new_axis = new_axis new_axis = [item + num_dims if item < 0 else item for item in new_axis] ret = tf.reverse(x, new_axis) return ret def permute_dims( x: Union[tf.Tensor, tf.Variable], axes: Tuple[int, ...], ) -> Union[tf.Tensor, tf.Variable]: ret = tf.transpose(x, perm=axes) return ret def reshape( x: Union[tf.Tensor, tf.Variable], shape: Tuple[int, ...], ) -> Union[tf.Tensor, tf.Variable]: ret = tf.reshape(x, shape) return ret def roll( x: Union[tf.Tensor, tf.Variable], shift: Union[int, Tuple[int, ...]], axis: Optional[Union[int, Tuple[int, ...]]] = None, ) -> Union[tf.Tensor, tf.Variable]: if axis is None: originalShape = x.shape axis = 0 x = tf.reshape(x, [-1]) roll = tf.roll(x, shift, axis) ret = tf.reshape(roll, originalShape) else: if isinstance(shift, int) and (type(axis) in [list, tuple]): shift = [shift for _ in range(len(axis))] ret = tf.roll(x, shift, axis) return ret def squeeze( x: Union[tf.Tensor, tf.Variable], axis: Union[int, Tuple[int], List[int]], ) -> Union[tf.Tensor, tf.Variable]: if isinstance(axis, int): if x.shape[axis] > 1: raise ValueError( "Expected dimension of size 1, but found dimension size {}".format( x.shape[axis] ) ) ret = tf.squeeze(x, axis) else: if isinstance(axis, tuple): axis = list(axis) normalise_axis = [ (len(x.shape) - abs(element)) if element < 0 else element for element in axis ] normalise_axis.sort() axis_updated_after_squeeze = [ dim - key for (key, dim) in enumerate(normalise_axis) ] for i in axis_updated_after_squeeze: if x.shape[i] > 1: raise ValueError( "Expected dimension of size 1, but found dimension size {}".format( x.shape[i] ) ) else: x = tf.squeeze(x, i) ret = x return ret def stack( x: Union[Tuple[tf.Tensor], List[tf.Tensor]], axis: Optional[int] = 0, ) -> Union[tf.Tensor, tf.Variable]: ret = tf.experimental.numpy.stack(x, axis) return ret # Extra # # ------# def split(x, num_or_size_splits=None, axis=0, with_remainder=False): if x.shape == (): if num_or_size_splits is not None and num_or_size_splits != 1: raise Exception( "input array had no shape, but num_sections specified was {}".format( num_or_size_splits ) ) return [x] if num_or_size_splits is None: dim_size = tf.shape(x)[axis] num_or_size_splits = dim_size elif isinstance(num_or_size_splits, int) and with_remainder: num_chunks = x.shape[axis] / num_or_size_splits num_chunks_int = math.floor(num_chunks) remainder = num_chunks - num_chunks_int if remainder != 0: num_or_size_splits = [num_or_size_splits] * num_chunks_int + [ int(remainder * num_or_size_splits) ] return tf.split(x, num_or_size_splits, axis) def repeat( x: Union[tf.Tensor, tf.Variable], repeats: Union[int, List[int]], axis: int = None, ) -> Union[tf.Tensor, tf.Variable]: ret = tf.repeat(x, repeats, axis) return ret def tile(x, reps): if x.shape == (): x = tf.reshape(x, (-1,)) if isinstance(reps, Number): reps = [reps] if isinstance(reps, tf.Tensor) and reps.shape == (): reps = tf.reshape(reps, (-1,)) ret = tf.tile(x, reps) return ret def constant_pad(x, pad_width, value=0): if x.shape == (): x = tf.reshape(x, (-1,)) ret = tf.pad(x, pad_width, constant_values=value) return ret def zero_pad(x, pad_width): if x.shape == (): x = tf.reshape(x, (-1,)) ret = tf.pad(x, pad_width) return ret def swapaxes(x, axis0, axis1): x_shape = x.shape num_dims = len(x_shape) axis0 %= num_dims axis1 %= num_dims config = list(range(num_dims)) config.pop(axis0) config.insert(axis0, axis1) config.pop(axis1) config.insert(axis1, axis0) ret = tf.transpose(x, config) return ret def clip( x: Union[tf.Tensor, tf.Variable], x_min: Union[Number, tf.Tensor, tf.Variable], x_max: Union[Number, tf.Tensor, tf.Variable], ) -> Union[tf.Tensor, tf.Variable]: if hasattr(x_min, "dtype") and hasattr(x_max, "dtype"): promoted_type = tf.experimental.numpy.promote_types(x.dtype, x_min.dtype) promoted_type = tf.experimental.numpy.promote_types(promoted_type, x_max.dtype) x = tf.cast(x, promoted_type) x_min = tf.cast(x_min, promoted_type) x_max = tf.cast(x_max, promoted_type) if tf.size(x) == 0: ret = x elif x.dtype == tf.bool: ret = tf.clip_by_value(tf.cast(x, tf.float16), x_min, x_max) ret = tf.cast(ret, x.dtype) else: ret = tf.clip_by_value(x, x_min, x_max) return ret
py
b41472d0ce3fe9b445029f0f155a6a25b95762bf
from pathlib import Path from typing import Optional, List {%- if cookiecutter.use_redis == 'y' %} from functools import partial {%- endif %} import aiohttp_jinja2 {%- if cookiecutter.use_postgres == 'y' %} import aiopg.sa {%- endif %} from aiohttp import web {%- if cookiecutter.use_redis == 'y' %} import aioredis {%- endif %} import jinja2 from {{ cookiecutter.project_name }}.routes import init_routes from {{ cookiecutter.project_name }}.utils.common import init_config path = Path(__file__).parent def init_jinja2(app: web.Application) -> None: ''' Initialize jinja2 template for application. ''' aiohttp_jinja2.setup( app, loader=jinja2.FileSystemLoader(str(path / 'templates')) ) {%- if cookiecutter.use_postgres == 'y' %} async def database(app: web.Application) -> None: ''' A function that, when the server is started, connects to postgresql, and after stopping it breaks the connection (after yield) ''' config = app['config']['postgres'] engine = await aiopg.sa.create_engine(**config) app['db'] = engine yield app['db'].close() await app['db'].wait_closed() {%- endif %} {%- if cookiecutter.use_redis == 'y' %} async def redis(app: web.Application) -> None: ''' A function that, when the server is started, connects to redis, and after stopping it breaks the connection (after yield) ''' config = app['config']['redis'] create_redis = partial( aioredis.create_redis, f'redis://{config["host"]}:{config["port"]}' ) sub = await create_redis() pub = await create_redis() app['redis_sub'] = sub app['redis_pub'] = pub app['create_redis'] = create_redis yield app['redis_sub'].close() app['redis_pub'].close() await app['redis_sub'].wait_closed() await app['redis_pub'].wait_closed() {%- endif %} def init_app(config: Optional[List[str]] = None) -> web.Application: app = web.Application() init_jinja2(app) init_config(app, config=config) init_routes(app) {%- if cookiecutter.use_postgres == 'y' and cookiecutter.use_redis == 'y' %} app.cleanup_ctx.extend([ redis, database, ]) {%- elif cookiecutter.use_postgres == 'y' %} app.cleanup_ctx.extend([ database, ]) {%- elif cookiecutter.use_redis == 'y' %} app.cleanup_ctx.extend([ redis, ]) {%- endif %} return app
py
b414730ba89eba95bd31e7e4e9bc8bb65525348a
#!/usr/bin/env python # # Copyright (C) 2014 Narf Industries <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # debug for machine.py DEBUG = False CONFIG = { 'DEFAULT_DEBUG_LEVEL': 0, 'UNDELIVERABLE_MAIL_ADDR': 0, 'LOST_MAIL_ADDR': 1, 'RECEIVED_MAIL_ADDR': 2, 'SORTED_MAIL_ADDR': 3, 'MIN_CUSTOMER_ADDR': 10, 'MAX_SUBJ_LEN': 128, 'MAX_BODY_LEN': 256, } ERRORS = { }
py
b41473782c94bf06189eb37a6fc86b5b7c09769c
""" The MIT License (MIT) Copyright (c) 2021 xXSergeyXx Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---------------------------------------------------------------------- Авторские права (c) 2021 xXSergeyXx Данная лицензия разрешает лицам, получившим копию данного программного обеспечения и сопутствующей документации (в дальнейшем именуемыми «Программное обеспечение»), безвозмездно использовать Программное обеспечение без ограничений, включая неограниченное право на использование, копирование, изменение, слияние, публикацию, распространение, сублицензирование и/или продажу копий Программного обеспечения, а также лицам, которым предоставляется данное Программное обеспечение, при соблюдении следующих условий: Указанное выше уведомление об авторском праве и данные условия должны быть включены во все копии или значимые части данного Программного обеспечения. ДАННОЕ ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ «КАК ЕСТЬ», БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНО ВЫРАЖЕННЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ ГАРАНТИИ ТОВАРНОЙ ПРИГОДНОСТИ, СООТВЕТСТВИЯ ПО ЕГО КОНКРЕТНОМУ НАЗНАЧЕНИЮ И ОТСУТСТВИЯ НАРУШЕНИЙ, НО НЕ ОГРАНИЧИВАЯСЬ ИМИ. НИ В КАКОМ СЛУЧАЕ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ИСКАМ, ЗА УЩЕРБ ИЛИ ПО ИНЫМ ТРЕБОВАНИЯМ, В ТОМ ЧИСЛЕ, ПРИ ДЕЙСТВИИ КОНТРАКТА, ДЕЛИКТЕ ИЛИ ИНОЙ СИТУАЦИИ, ВОЗНИКШИМ ИЗ-ЗА ИСПОЛЬЗОВАНИЯ ПРОГРАММНОГО ОБЕСПЕЧЕНИЯ ИЛИ ИНЫХ ДЕЙСТВИЙ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ. """ from __future__ import annotations from typing import Callable, Dict, Iterable, List, Optional, Union, TYPE_CHECKING import time import asyncio from .mixins import Hashable from .abc import Messageable from .enums import ChannelType, try_enum from .errors import ClientException from .utils import MISSING, parse_time, _get_as_snowflake __all__ = ( 'Thread', 'ThreadMember', ) if TYPE_CHECKING: from .types.threads import ( Thread as ThreadPayload, ThreadMember as ThreadMemberPayload, ThreadMetadata, ThreadArchiveDuration, ) from .types.snowflake import SnowflakeList from .guild import Guild from .channel import TextChannel, CategoryChannel from .member import Member from .message import Message, PartialMessage from .abc import Snowflake, SnowflakeTime from .role import Role from .permissions import Permissions from .state import ConnectionState class Thread(Messageable, Hashable): """Represents a Discord thread. .. container:: operations .. describe:: x == y Checks if two threads are equal. .. describe:: x != y Checks if two threads are not equal. .. describe:: hash(x) Returns the thread's hash. .. describe:: str(x) Returns the thread's name. .. versionadded:: 2.0 Attributes ----------- name: :class:`str` The thread name. guild: :class:`Guild` The guild the thread belongs to. id: :class:`int` The thread ID. parent_id: :class:`int` The parent :class:`TextChannel` ID this thread belongs to. owner_id: :class:`int` The user's ID that created this thread. last_message_id: Optional[:class:`int`] The last message ID of the message sent to this thread. It may *not* point to an existing or valid message. slowmode_delay: :class:`int` The number of seconds a member must wait between sending messages in this thread. A value of `0` denotes that it is disabled. Bots and users with :attr:`~Permissions.manage_channels` or :attr:`~Permissions.manage_messages` bypass slowmode. message_count: :class:`int` An approximate number of messages in this thread. This caps at 50. member_count: :class:`int` An approximate number of members in this thread. This caps at 50. me: Optional[:class:`ThreadMember`] A thread member representing yourself, if you've joined the thread. This could not be available. archived: :class:`bool` Whether the thread is archived. locked: :class:`bool` Whether the thread is locked. invitable: :class:`bool` Whether non-moderators can add other non-moderators to this thread. This is always ``True`` for public threads. archiver_id: Optional[:class:`int`] The user's ID that archived this thread. auto_archive_duration: :class:`int` The duration in minutes until the thread is automatically archived due to inactivity. Usually a value of 60, 1440, 4320 and 10080. archive_timestamp: :class:`datetime.datetime` An aware timestamp of when the thread's archived status was last updated in UTC. """ __slots__ = ( 'name', 'id', 'guild', '_type', '_state', '_members', 'owner_id', 'parent_id', 'last_message_id', 'message_count', 'member_count', 'slowmode_delay', 'me', 'locked', 'archived', 'invitable', 'archiver_id', 'auto_archive_duration', 'archive_timestamp', ) def __init__(self, *, guild: Guild, state: ConnectionState, data: ThreadPayload): self._state: ConnectionState = state self.guild = guild self._members: Dict[int, ThreadMember] = {} self._from_data(data) async def _get_channel(self): return self def __repr__(self) -> str: return ( f'<Thread id={self.id!r} name={self.name!r} parent={self.parent}' f' owner_id={self.owner_id!r} locked={self.locked} archived={self.archived}>' ) def __str__(self) -> str: return self.name def _from_data(self, data: ThreadPayload): self.id = int(data['id']) self.parent_id = int(data['parent_id']) self.owner_id = int(data['owner_id']) self.name = data['name'] self._type = try_enum(ChannelType, data['type']) self.last_message_id = _get_as_snowflake(data, 'last_message_id') self.slowmode_delay = data.get('rate_limit_per_user', 0) self.message_count = data['message_count'] self.member_count = data['member_count'] self._unroll_metadata(data['thread_metadata']) try: member = data['member'] except KeyError: self.me = None else: self.me = ThreadMember(self, member) def _unroll_metadata(self, data: ThreadMetadata): self.archived = data['archived'] self.archiver_id = _get_as_snowflake(data, 'archiver_id') self.auto_archive_duration = data['auto_archive_duration'] self.archive_timestamp = parse_time(data['archive_timestamp']) self.locked = data.get('locked', False) self.invitable = data.get('invitable', True) def _update(self, data): try: self.name = data['name'] except KeyError: pass self.slowmode_delay = data.get('rate_limit_per_user', 0) try: self._unroll_metadata(data['thread_metadata']) except KeyError: pass @property def type(self) -> ChannelType: """:class:`ChannelType`: The channel's Discord type.""" return self._type @property def parent(self) -> Optional[TextChannel]: """Optional[:class:`TextChannel`]: The parent channel this thread belongs to.""" return self.guild.get_channel(self.parent_id) # type: ignore @property def owner(self) -> Optional[Member]: """Optional[:class:`Member`]: The member this thread belongs to.""" return self.guild.get_member(self.owner_id) @property def mention(self) -> str: """:class:`str`: The string that allows you to mention the thread.""" return f'<#{self.id}>' @property def members(self) -> List[ThreadMember]: """List[:class:`ThreadMember`]: A list of thread members in this thread. This requires :attr:`Intents.members` to be properly filled. Most of the time however, this data is not provided by the gateway and a call to :meth:`fetch_members` is needed. """ return list(self._members.values()) @property def last_message(self) -> Optional[Message]: """Fetches the last message from this channel in cache. The message might not be valid or point to an existing message. .. admonition:: Reliable Fetching :class: helpful For a slightly more reliable method of fetching the last message, consider using either :meth:`history` or :meth:`fetch_message` with the :attr:`last_message_id` attribute. Returns --------- Optional[:class:`Message`] The last message in this channel or ``None`` if not found. """ return self._state._get_message(self.last_message_id) if self.last_message_id else None @property def category(self) -> Optional[CategoryChannel]: """The category channel the parent channel belongs to, if applicable. Raises ------- ClientException The parent channel was not cached and returned ``None``. Returns ------- Optional[:class:`CategoryChannel`] The parent channel's category. """ parent = self.parent if parent is None: raise ClientException('Parent channel not found') return parent.category @property def category_id(self) -> Optional[int]: """The category channel ID the parent channel belongs to, if applicable. Raises ------- ClientException The parent channel was not cached and returned ``None``. Returns ------- Optional[:class:`int`] The parent channel's category ID. """ parent = self.parent if parent is None: raise ClientException('Parent channel not found') return parent.category_id def is_private(self) -> bool: """:class:`bool`: Whether the thread is a private thread. A private thread is only viewable by those that have been explicitly invited or have :attr:`~.Permissions.manage_threads`. """ return self._type is ChannelType.private_thread def is_news(self) -> bool: """:class:`bool`: Whether the thread is a news thread. A news thread is a thread that has a parent that is a news channel, i.e. :meth:`.TextChannel.is_news` is ``True``. """ return self._type is ChannelType.news_thread def is_nsfw(self) -> bool: """:class:`bool`: Whether the thread is NSFW or not. An NSFW thread is a thread that has a parent that is an NSFW channel, i.e. :meth:`.TextChannel.is_nsfw` is ``True``. """ parent = self.parent return parent is not None and parent.is_nsfw() def permissions_for(self, obj: Union[Member, Role], /) -> Permissions: """Handles permission resolution for the :class:`~nextcord.Member` or :class:`~nextcord.Role`. Since threads do not have their own permissions, they inherit them from the parent channel. This is a convenience method for calling :meth:`~nextcord.TextChannel.permissions_for` on the parent channel. Parameters ---------- obj: Union[:class:`~nextcord.Member`, :class:`~nextcord.Role`] The object to resolve permissions for. This could be either a member or a role. If it's a role then member overwrites are not computed. Raises ------- ClientException The parent channel was not cached and returned ``None`` Returns ------- :class:`~nextcord.Permissions` The resolved permissions for the member or role. """ parent = self.parent if parent is None: raise ClientException('Parent channel not found') return parent.permissions_for(obj) async def delete_messages(self, messages: Iterable[Snowflake]) -> None: """|coro| Deletes a list of messages. This is similar to :meth:`Message.delete` except it bulk deletes multiple messages. As a special case, if the number of messages is 0, then nothing is done. If the number of messages is 1 then single message delete is done. If it's more than two, then bulk delete is used. You cannot bulk delete more than 100 messages or messages that are older than 14 days old. You must have the :attr:`~Permissions.manage_messages` permission to use this. Usable only by bot accounts. Parameters ----------- messages: Iterable[:class:`abc.Snowflake`] An iterable of messages denoting which ones to bulk delete. Raises ------ ClientException The number of messages to delete was more than 100. Forbidden You do not have proper permissions to delete the messages or you're not using a bot account. NotFound If single delete, then the message was already deleted. HTTPException Deleting the messages failed. """ if not isinstance(messages, (list, tuple)): messages = list(messages) if len(messages) == 0: return # do nothing if len(messages) == 1: message_id = messages[0].id await self._state.http.delete_message(self.id, message_id) return if len(messages) > 100: raise ClientException('Can only bulk delete messages up to 100 messages') message_ids: SnowflakeList = [m.id for m in messages] await self._state.http.delete_messages(self.id, message_ids) async def purge( self, *, limit: Optional[int] = 100, check: Callable[[Message], bool] = MISSING, before: Optional[SnowflakeTime] = None, after: Optional[SnowflakeTime] = None, around: Optional[SnowflakeTime] = None, oldest_first: Optional[bool] = False, bulk: bool = True, ) -> List[Message]: """|coro| Purges a list of messages that meet the criteria given by the predicate ``check``. If a ``check`` is not provided then all messages are deleted without discrimination. You must have the :attr:`~Permissions.manage_messages` permission to delete messages even if they are your own (unless you are a user account). The :attr:`~Permissions.read_message_history` permission is also needed to retrieve message history. Examples --------- Deleting bot's messages :: def is_me(m): return m.author == client.user deleted = await thread.purge(limit=100, check=is_me) await thread.send(f'Deleted {len(deleted)} message(s)') Parameters ----------- limit: Optional[:class:`int`] The number of messages to search through. This is not the number of messages that will be deleted, though it can be. check: Callable[[:class:`Message`], :class:`bool`] The function used to check if a message should be deleted. It must take a :class:`Message` as its sole parameter. before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]] Same as ``before`` in :meth:`history`. after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]] Same as ``after`` in :meth:`history`. around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]] Same as ``around`` in :meth:`history`. oldest_first: Optional[:class:`bool`] Same as ``oldest_first`` in :meth:`history`. bulk: :class:`bool` If ``True``, use bulk delete. Setting this to ``False`` is useful for mass-deleting a bot's own messages without :attr:`Permissions.manage_messages`. When ``True``, will fall back to single delete if messages are older than two weeks. Raises ------- Forbidden You do not have proper permissions to do the actions required. HTTPException Purging the messages failed. Returns -------- List[:class:`.Message`] The list of messages that were deleted. """ if check is MISSING: check = lambda m: True iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around) ret: List[Message] = [] count = 0 minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22 async def _single_delete_strategy(messages: Iterable[Message]): for m in messages: await m.delete() strategy = self.delete_messages if bulk else _single_delete_strategy async for message in iterator: if count == 100: to_delete = ret[-100:] await strategy(to_delete) count = 0 await asyncio.sleep(1) if not check(message): continue if message.id < minimum_time: # older than 14 days old if count == 1: await ret[-1].delete() elif count >= 2: to_delete = ret[-count:] await strategy(to_delete) count = 0 strategy = _single_delete_strategy count += 1 ret.append(message) # SOme messages remaining to poll if count >= 2: # more than 2 messages -> bulk delete to_delete = ret[-count:] await strategy(to_delete) elif count == 1: # delete a single message await ret[-1].delete() return ret async def edit( self, *, name: str = MISSING, archived: bool = MISSING, locked: bool = MISSING, invitable: bool = MISSING, slowmode_delay: int = MISSING, auto_archive_duration: ThreadArchiveDuration = MISSING, ) -> Thread: """|coro| Edits the thread. Editing the thread requires :attr:`.Permissions.manage_threads`. The thread creator can also edit ``name``, ``archived`` or ``auto_archive_duration``. Note that if the thread is locked then only those with :attr:`.Permissions.manage_threads` can unarchive a thread. The thread must be unarchived to be edited. Parameters ------------ name: :class:`str` The new name of the thread. archived: :class:`bool` Whether to archive the thread or not. locked: :class:`bool` Whether to lock the thread or not. invitable: :class:`bool` Whether non-moderators can add other non-moderators to this thread. Only available for private threads. auto_archive_duration: :class:`int` The new duration in minutes before a thread is automatically archived for inactivity. Must be one of ``60``, ``1440``, ``4320``, or ``10080``. slowmode_delay: :class:`int` Specifies the slowmode rate limit for user in this thread, in seconds. A value of ``0`` disables slowmode. The maximum value possible is ``21600``. Raises ------- Forbidden You do not have permissions to edit the thread. HTTPException Editing the thread failed. Returns -------- :class:`Thread` The newly edited thread. """ payload = {} if name is not MISSING: payload['name'] = str(name) if archived is not MISSING: payload['archived'] = archived if auto_archive_duration is not MISSING: payload['auto_archive_duration'] = auto_archive_duration if locked is not MISSING: payload['locked'] = locked if invitable is not MISSING: payload['invitable'] = invitable if slowmode_delay is not MISSING: payload['rate_limit_per_user'] = slowmode_delay data = await self._state.http.edit_channel(self.id, **payload) # The data payload will always be a Thread payload return Thread(data=data, state=self._state, guild=self.guild) # type: ignore async def join(self): """|coro| Joins this thread. You must have :attr:`~Permissions.send_messages_in_threads` to join a thread. If the thread is private, :attr:`~Permissions.manage_threads` is also needed. Raises ------- Forbidden You do not have permissions to join the thread. HTTPException Joining the thread failed. """ await self._state.http.join_thread(self.id) async def leave(self): """|coro| Leaves this thread. Raises ------- HTTPException Leaving the thread failed. """ await self._state.http.leave_thread(self.id) async def add_user(self, user: Snowflake): """|coro| Adds a user to this thread. You must have :attr:`~Permissions.send_messages` and :attr:`~Permissions.use_threads` to add a user to a public thread. If the thread is private then :attr:`~Permissions.send_messages` and either :attr:`~Permissions.use_private_threads` or :attr:`~Permissions.manage_messages` is required to add a user to the thread. Parameters ----------- user: :class:`abc.Snowflake` The user to add to the thread. Raises ------- Forbidden You do not have permissions to add the user to the thread. HTTPException Adding the user to the thread failed. """ await self._state.http.add_user_to_thread(self.id, user.id) async def remove_user(self, user: Snowflake): """|coro| Removes a user from this thread. You must have :attr:`~Permissions.manage_threads` or be the creator of the thread to remove a user. Parameters ----------- user: :class:`abc.Snowflake` The user to add to the thread. Raises ------- Forbidden You do not have permissions to remove the user from the thread. HTTPException Removing the user from the thread failed. """ await self._state.http.remove_user_from_thread(self.id, user.id) async def fetch_members(self) -> List[ThreadMember]: """|coro| Retrieves all :class:`ThreadMember` that are in this thread. This requires :attr:`Intents.members` to get information about members other than yourself. Raises ------- HTTPException Retrieving the members failed. Returns -------- List[:class:`ThreadMember`] All thread members in the thread. """ members = await self._state.http.get_thread_members(self.id) return [ThreadMember(parent=self, data=data) for data in members] async def delete(self): """|coro| Deletes this thread. You must have :attr:`~Permissions.manage_threads` to delete threads. Raises ------- Forbidden You do not have permissions to delete this thread. HTTPException Deleting the thread failed. """ await self._state.http.delete_channel(self.id) def get_partial_message(self, message_id: int, /) -> PartialMessage: """Creates a :class:`PartialMessage` from the message ID. This is useful if you want to work with a message and only have its ID without doing an unnecessary API call. .. versionadded:: 2.0 Parameters ------------ message_id: :class:`int` The message ID to create a partial message for. Returns --------- :class:`PartialMessage` The partial message. """ from .message import PartialMessage return PartialMessage(channel=self, id=message_id) def _add_member(self, member: ThreadMember) -> None: self._members[member.id] = member def _pop_member(self, member_id: int) -> Optional[ThreadMember]: return self._members.pop(member_id, None) class ThreadMember(Hashable): """Represents a Discord thread member. .. container:: operations .. describe:: x == y Checks if two thread members are equal. .. describe:: x != y Checks if two thread members are not equal. .. describe:: hash(x) Returns the thread member's hash. .. describe:: str(x) Returns the thread member's name. .. versionadded:: 2.0 Attributes ----------- id: :class:`int` The thread member's ID. thread_id: :class:`int` The thread's ID. joined_at: :class:`datetime.datetime` The time the member joined the thread in UTC. """ __slots__ = ( 'id', 'thread_id', 'joined_at', 'flags', '_state', 'parent', ) def __init__(self, parent: Thread, data: ThreadMemberPayload): self.parent = parent self._state = parent._state self._from_data(data) def __repr__(self) -> str: return f'<ThreadMember id={self.id} thread_id={self.thread_id} joined_at={self.joined_at!r}>' def _from_data(self, data: ThreadMemberPayload): try: self.id = int(data['user_id']) except KeyError: assert self._state.self_id is not None self.id = self._state.self_id try: self.thread_id = int(data['id']) except KeyError: self.thread_id = self.parent.id self.joined_at = parse_time(data['join_timestamp']) self.flags = data['flags'] @property def thread(self) -> Thread: """:class:`Thread`: The thread this member belongs to.""" return self.parent
py
b4147385759778d106c0fc90371b1441f19d72b3
# Urrios 2016: multicellular memory + Macia 2016 import numpy as np def merge_N(x,y): x1 = np.append(x, np.zeros([x.shape[0], y.shape[1]]), axis=1) y1 = np.append(np.zeros([y.shape[0], x.shape[1]]), y, axis=1) return np.append(x1,y1,axis=0) def not_cell(state, params): L_X, x, y, N_X, N_Y = state delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x = params # presume that the molecules are degraded in the same strain as they are produced N_Y = N_X f = gamma_L_X * (y ** n_y)/(1 + (theta_L_X*y)**n_y ) dL_X_dt = N_X * (f - delta_L * L_X) dx_dt = N_X * (eta_x * (1/(1+ (omega_x*L_X)**m_x))) - N_Y * (delta_x * x) - rho_x * x return dL_X_dt, dx_dt def not_cell_stochastic(state, params, Omega): L_X, x, y, N_X, N_Y = state delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x = params # presume that the molecules are degraded in the same strain as they are produced N_Y = N_X #Omega *= N_X # reaction space volume is proportional to the number of cells gamma_L_X *= Omega eta_x *= Omega theta_L_X /= Omega omega_x /= Omega p = [0]*5 p[0] = N_X * gamma_L_X * (y ** n_y)/(1 + (theta_L_X*y)**n_y ) / Omega #p[0] = gamma_L_X * (y ** n_y)/(1 + (theta_L_X*y)**n_y ) / Omega # N_x already included in reaction space volume (Omega) p[1] = N_X * delta_L * L_X #p[1] = delta_L * L_X # N_x already included in reaction space volume (Omega) p[2] = N_X * (eta_x * (1/(1+ (omega_x*L_X)**m_x))) #p[2] = (eta_x * (1/(1+ (omega_x*L_X)**m_x))) # N_x already included in reaction space volume (Omega) p[3] = N_Y * (delta_x * x) #p[3] = (delta_x * x) # N_y already included in reaction space volume (Omega) p[4] = rho_x * x return p def yes_cell(state, params): x, y, N_X, N_Y = state gamma_x, n_y, theta_x, delta_x, rho_x = params # presume that the molecules are degraded in the same strain as they are produced N_Y = N_X dx_dt = N_X * gamma_x * (y ** n_y)/(1 + (theta_x*y)**n_y ) - N_Y * (delta_x * x) - rho_x * x return dx_dt def yes_cell_stochastic(state, params, Omega): x, y, N_X, N_Y = state gamma_x, n_y, theta_x, delta_x, rho_x = params # presume that the molecules are degraded in the same strain as they are produced N_Y = N_X #Omega *= N_X # reaction space volume is proportional to the number of cells gamma_x *= Omega theta_x /= Omega p = [0]*3 p[0] = N_X * gamma_x * (y ** n_y)/(1 + (theta_x*y)**n_y ) #p[0] = gamma_x * (y ** n_y)/(1 + (theta_x*y)**n_y ) # N_x already included in reaction space volume (Omega) p[1] = N_Y * (delta_x * x) #p[1] = delta_x * x # N_y already included in reaction space volume (Omega) p[2] = rho_x * x return p def population(state, params): N = state r = params dN = r * N * (1 - N) return dN def population_stochastic(state, params, Omega): N = state r = params p = [0]*2 p[0] = r * N p[1] = r * Omega * N**2 return p def toggle_model(state, T, params): L_A, L_B, a, b, N_A, N_B = state state_A = L_A, a, b, N_A, N_B state_B = L_B, b, a, N_B, N_A delta_L, gamma_A, gamma_B, n_a, n_b, theta_A, theta_B, eta_a, eta_b, omega_a, omega_b, m_a, m_b, delta_a, delta_b, rho_a, rho_b, r_A, r_B = params params_A = delta_L, gamma_A, n_b, theta_A, eta_a, omega_a, m_a, delta_a, rho_a params_B = delta_L, gamma_B, n_a, theta_B, eta_b, omega_b, m_b, delta_b, rho_b dL_A_dt, da_dt = not_cell(state_A, params_A) dL_B_dt, db_dt = not_cell(state_B, params_B) dN_A_dt = population(N_A, r_A) dN_B_dt = population(N_B, r_B) return np.array([dL_A_dt, dL_B_dt, da_dt, db_dt, dN_A_dt, dN_B_dt]) def toggle_model_stochastic(state, params, Omega): L_A, L_B, a, b, N_A, N_B = state state_A = L_A, a, b, N_A, N_B state_B = L_B, b, a, N_B, N_A delta_L, gamma_A, gamma_B, n_a, n_b, theta_A, theta_B, eta_a, eta_b, omega_a, omega_b, m_a, m_b, delta_a, delta_b, rho_a, rho_b, r_A, r_B = params params_A = delta_L, gamma_A, n_b, theta_A, eta_a, omega_a, m_a, delta_a, rho_a params_B = delta_L, gamma_B, n_a, theta_B, eta_b, omega_b, m_b, delta_b, rho_b p1 = not_cell_stochastic(state_A, params_A, Omega) p2 = not_cell_stochastic(state_B, params_B, Omega) #p3 = population_stochastic(N_A, r_A, Omega) #p4 = population_stochastic(N_B, r_B, Omega) return p1 + p2 def toggle_generate_stoichiometry(): # # x axis ... species # Y = L_A, L_B, a, b, N_A, N_B # y axis ... reactions # idx_L_A, idx_L_B, idx_a, idx_b, idx_N_A, idx_N_B = 0,1,2,3,4,5 N = np.zeros((6, 10)) # reaction 0 r = 0 # 0 --> L_A N[idx_L_A, r] = 1 # reaction 1 r = 1 # L_A --> 0 N[idx_L_A, r] = -1 # reaction 2 r = 2 # 0 --> a N[idx_a, r] = 1 # reaction 3 r = 3 # a --> 0 N[idx_a, r] = -1 # reaction 4 r = 4 # a --> 0 N[idx_a, r] = -1 # reaction 5 r = 5 # 0 --> L_B N[idx_L_B, r] = 1 # reaction 6 r = 6 # L_B --> 0 N[idx_L_B, r] = -1 # reaction 7 r = 7 # 0 --> b N[idx_b, r] = 1 # reaction 8 r = 8 # b --> 0 N[idx_b, r] = -1 # reaction 9 r = 9 # b --> 0 N[idx_b, r] = -1 return N # L_A ... intermediate # a ... out # b ... in # N_A ... number of cells def not_cell_wrapper(state, params): L_A, a, b, N_A = state state_A = L_A, a, b, N_A, N_A params_A = params return not_cell(state_A, params_A) # a ... out # b ... in # N_A ... number of cells def yes_cell_wrapper(state, params): a, b, N_A = state state_A = a, b, N_A, N_A params_A = params return yes_cell(state_A, params_A) def not_model(state, T, params): L_A, a, b, N_A = state delta_L, gamma_L_A, n_b, theta_L_A, eta_a, omega_a, m_a, delta_a, delta_b, rho_a, rho_b, r_A = params state_not = L_A, a, b, N_A params_not = delta_L, gamma_L_A, n_b, theta_L_A, eta_a, omega_a, m_a, delta_a, rho_a dL_A_dt, da_dt = not_cell_wrapper(state_not, params_not) db_dt = 0#- N_A * delta_b * b - rho_b * b dN_A_dt = population(N_A, r_A) return np.array([dL_A_dt, da_dt, db_dt, dN_A_dt]) def yes_model(state, T, params): a, b, N_A = state gamma_a, n_b, theta_a, delta_a, delta_b, rho_a, rho_b, r_A = params state_yes = a, b, N_A params_yes = gamma_a, n_b, theta_a, delta_a, rho_a da_dt = yes_cell_wrapper(state_yes, params_yes) db_dt = 0 #- N_A * delta_b * b - rho_b * b dN_A_dt = population(N_A, r_A) return np.array([da_dt, db_dt, dN_A_dt]) def MUX_4_1_model(state, T, params): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X = params params_yes = gamma_x, n_y, theta_x, delta_x, rho_x params_not = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x I0, I1, I2, I3, S0, S1 = state[:6] I0_out, I1_out, I2_out, I3_out = state[6:10] L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I0, L_I1, L_I2, L_I3 = state[10:22] N_I0_S0, N_I0_S1, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_I3, N_I0, N_I1, N_I2, N_I3 = state[22:38] out = state[38] """ I0 """ dI0_out = 0 # yes S0: I0_S0 state_yes_I0_S0 = I0_out, S0, N_I0_S0 dI0_out += yes_cell_wrapper(state_yes_I0_S0, params_yes) dN_I0_S0 = population(N_I0_S0, r_X) # yes S1: I0_S1 state_yes_I0_S1 = I0_out, S1, N_I0_S1 dI0_out += yes_cell_wrapper(state_yes_I0_S1, params_yes) dN_I0_S1 = population(N_I0_S1, r_X) # not I0: I0_I0 state_not_I0_I0 = L_I0_I0, I0_out, I0, N_I0_I0 dL_I0_I0, dd = not_cell_wrapper(state_not_I0_I0, params_not) dI0_out += dd dN_I0_I0 = population(N_I0_I0, r_X) """ I1 """ dI1_out = 0 # not S0: I1_S0 state_not_I1_S0 = L_I1_S0, I1_out, S0, N_I1_S0 dL_I1_S0, dd = not_cell_wrapper(state_not_I1_S0, params_not) dI1_out += dd dN_I1_S0 = population(N_I1_S0, r_X) # yes S1: I1_S1 state_yes_I1_S1 = I1_out, S1, N_I1_S1 dI1_out += yes_cell_wrapper(state_yes_I1_S1, params_yes) dN_I1_S1 = population(N_I1_S1, r_X) # not I1: I1_I1 state_not_I1_I1 = L_I1_I1, I1_out, I1, N_I1_I1 dL_I1_I1, dd = not_cell_wrapper(state_not_I1_I1, params_not) dI1_out += dd dN_I1_I1 = population(N_I1_I1, r_X) """ I2 """ dI2_out = 0 # yes S0: I2_S0 state_yes_I2_S0 = I2_out, S0, N_I2_S0 dI2_out += yes_cell_wrapper(state_yes_I2_S0, params_yes) dN_I2_S0 = population(N_I2_S0, r_X) # not S1: I2_S1 state_not_I2_S1 = L_I2_S1, I2_out, S1, N_I2_S1 dL_I2_S1, dd = not_cell_wrapper(state_not_I2_S1, params_not) dI2_out += dd dN_I2_S1 = population(N_I2_S1, r_X) # not I2: I2_I2 state_not_I2_I2 = L_I2_I2, I2_out, I2, N_I2_I2 dL_I2_I2, dd = not_cell_wrapper(state_not_I2_I2, params_not) dI2_out += dd dN_I2_I2 = population(N_I2_I2, r_X) """ I3 """ dI3_out = 0 # not S0: I3_S0 state_not_I3_S0 = L_I3_S0, I3_out, S0, N_I3_S0 dL_I3_S0, dd = not_cell_wrapper(state_not_I3_S0, params_not) dI3_out += dd dN_I3_S0 = population(N_I3_S0, r_X) # not S1: I3_S1 state_not_I3_S1 = L_I3_S1, I3_out, S1, N_I3_S1 dL_I3_S1, dd = not_cell_wrapper(state_not_I3_S1, params_not) dI3_out += dd dN_I3_S1 = population(N_I3_S1, r_X) # not I3: I3_I3 state_not_I3_I3 = L_I3_I3, I3_out, I3, N_I3_I3 dL_I3_I3, dd = not_cell_wrapper(state_not_I3_I3, params_not) dI3_out += dd dN_I3_I3 = population(N_I3_I3, r_X) """ out """ dout = 0 # not I0: I0 state_not_I0 = L_I0, out, I0_out, N_I0 dL_I0, dd = not_cell_wrapper(state_not_I0, params_not) dout += dd dN_I0 = population(N_I0, r_X) # not I1: I1 state_not_I1 = L_I1, out, I1_out, N_I1 dL_I1, dd = not_cell_wrapper(state_not_I1, params_not) dout += dd dN_I1 = population(N_I1, r_X) # not I2: I2 state_not_I2 = L_I2, out, I2_out, N_I2 dL_I2, dd = not_cell_wrapper(state_not_I2, params_not) dout += dd dN_I2 = population(N_I2, r_X) # not I3: I3 state_not_I3 = L_I3, out, I3_out, N_I3 dL_I3, dd = not_cell_wrapper(state_not_I3, params_not) dout += dd dN_I3 = population(N_I3, r_X) dI0, dI1, dI2, dI3, dS0, dS1 = 0, 0, 0, 0, 0, 0 dstate = np.array([dI0, dI1, dI2, dI3, dS0, dS1, dI0_out, dI1_out, dI2_out, dI3_out, dL_I0_I0, dL_I1_S0, dL_I1_I1, dL_I2_S1, dL_I2_I2, dL_I3_S0, dL_I3_S1, dL_I3_I3, dL_I0, dL_I1, dL_I2, dL_I3, dN_I0_S0, dN_I0_S1, dN_I0_I0, dN_I1_S0, dN_I1_S1, dN_I1_I1, dN_I2_S0, dN_I2_S1, dN_I2_I2, dN_I3_S0, dN_I3_S1, dN_I3_I3, dN_I0, dN_I1, dN_I2, dN_I3, dout]) return dstate def MUX_8_1_model(state, T, params): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X = params params_yes = gamma_x, n_y, theta_x, delta_x, rho_x params_not = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x I0, I1, I2, I3, I4, I5, I6, I7, S0, S1, S2 = state[:11] I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = state[11:19] L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I4_S2, L_I4_I4, L_I5_S0, L_I5_S2, L_I5_I5, L_I6_S1, L_I6_S2, L_I6_I6, L_I7_S0,\ L_I7_S1, L_I7_S2, L_I7_I7, L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = state[19:47] N_I0_S0, N_I0_S1, N_I0_S2, N_I0_I0,\ N_I1_S0, N_I1_S1, N_I1_S2, N_I1_I1,\ N_I2_S0, N_I2_S1, N_I2_S2, N_I2_I2,\ N_I3_S0, N_I3_S1, N_I3_S2, N_I3_I3,\ N_I4_S0, N_I4_S1, N_I4_S2, N_I4_I4,\ N_I5_S0, N_I5_S1, N_I5_S2, N_I5_I5,\ N_I6_S0, N_I6_S1, N_I6_S2, N_I6_I6,\ N_I7_S0, N_I7_S1, N_I7_S2, N_I7_I7,\ N_I0, N_I1, N_I2, N_I3, N_I4, N_I5, N_I6, N_I7 = state[47:87] out = state[87] """ I0 """ dI0_out = 0 # yes S0: I0_S0 state_yes_I0_S0 = I0_out, S0, N_I0_S0 dI0_out += yes_cell_wrapper(state_yes_I0_S0, params_yes) dN_I0_S0 = population(N_I0_S0, r_X) # yes S1: I0_S1 state_yes_I0_S1 = I0_out, S1, N_I0_S1 dI0_out += yes_cell_wrapper(state_yes_I0_S1, params_yes) dN_I0_S1 = population(N_I0_S1, r_X) # yes S2: I0_S2 state_yes_I0_S2 = I0_out, S2, N_I0_S2 dI0_out += yes_cell_wrapper(state_yes_I0_S2, params_yes) dN_I0_S2 = population(N_I0_S2, r_X) # not I0: I0_I0 state_not_I0_I0 = L_I0_I0, I0_out, I0, N_I0_I0 dL_I0_I0, dd = not_cell_wrapper(state_not_I0_I0, params_not) dI0_out += dd dN_I0_I0 = population(N_I0_I0, r_X) """ I1 """ dI1_out = 0 # not S0: I1_S0 state_not_I1_S0 = L_I1_S0, I1_out, S0, N_I1_S0 dL_I1_S0, dd = not_cell_wrapper(state_not_I1_S0, params_not) dI1_out += dd dN_I1_S0 = population(N_I1_S0, r_X) # yes S1: I1_S1 state_yes_I1_S1 = I1_out, S1, N_I1_S1 dI1_out += yes_cell_wrapper(state_yes_I1_S1, params_yes) dN_I1_S1 = population(N_I1_S1, r_X) # yes S1: I1_S2 state_yes_I1_S2 = I1_out, S2, N_I1_S2 dI1_out += yes_cell_wrapper(state_yes_I1_S2, params_yes) dN_I1_S2 = population(N_I1_S2, r_X) # not I1: I1_I1 state_not_I1_I1 = L_I1_I1, I1_out, I1, N_I1_I1 dL_I1_I1, dd = not_cell_wrapper(state_not_I1_I1, params_not) dI1_out += dd dN_I1_I1 = population(N_I1_I1, r_X) """ I2 """ dI2_out = 0 # yes S0: I2_S0 state_yes_I2_S0 = I2_out, S0, N_I2_S0 dI2_out += yes_cell_wrapper(state_yes_I2_S0, params_yes) dN_I2_S0 = population(N_I2_S0, r_X) # not S1: I2_S1 state_not_I2_S1 = L_I2_S1, I2_out, S1, N_I2_S1 dL_I2_S1, dd = not_cell_wrapper(state_not_I2_S1, params_not) dI2_out += dd dN_I2_S1 = population(N_I2_S1, r_X) # yes S2: I2_S2 state_yes_I2_S2 = I2_out, S2, N_I2_S2 dI2_out += yes_cell_wrapper(state_yes_I2_S2, params_yes) dN_I2_S2 = population(N_I2_S2, r_X) # not I2: I2_I2 state_not_I2_I2 = L_I2_I2, I2_out, I2, N_I2_I2 dL_I2_I2, dd = not_cell_wrapper(state_not_I2_I2, params_not) dI2_out += dd dN_I2_I2 = population(N_I2_I2, r_X) """ I3 """ dI3_out = 0 # not S0: I3_S0 state_not_I3_S0 = L_I3_S0, I3_out, S0, N_I3_S0 dL_I3_S0, dd = not_cell_wrapper(state_not_I3_S0, params_not) dI3_out += dd dN_I3_S0 = population(N_I6_S0, r_X) # not S1: I3_S1 state_not_I3_S1 = L_I3_S1, I3_out, S1, N_I3_S1 dL_I3_S1, dd = not_cell_wrapper(state_not_I3_S1, params_not) dI3_out += dd dN_I3_S1 = population(N_I3_S1, r_X) # yes S2: I3_S2 state_yes_I3_S2 = I3_out, S2, N_I3_S2 dI3_out += yes_cell_wrapper(state_yes_I3_S2, params_yes) dN_I3_S2 = population(N_I3_S2, r_X) # not I3: I3_I3 state_not_I3_I3 = L_I3_I3, I3_out, I3, N_I3_I3 dL_I3_I3, dd = not_cell_wrapper(state_not_I3_I3, params_not) dI3_out += dd dN_I3_I3 = population(N_I3_I3, r_X) """ I4 """ dI4_out = 0 # yes S0: I4_S0 state_yes_I4_S0 = I4_out, S0, N_I4_S0 dI4_out += yes_cell_wrapper(state_yes_I4_S0, params_yes) dN_I4_S0 = population(N_I4_S0, r_X) # yes S1: I4_S1 state_yes_I4_S1 = I4_out, S1, N_I4_S1 dI4_out += yes_cell_wrapper(state_yes_I4_S1, params_yes) dN_I4_S1 = population(N_I4_S1, r_X) # not S2: I4_S2 state_not_I4_S2 = L_I4_S2, I4_out, S2, N_I4_S2 dL_I4_S2, dd = not_cell_wrapper(state_not_I4_S2, params_not) dI4_out += dd dN_I4_S2 = population(N_I4_S2, r_X) # not I4: I4_I4 state_not_I4_I4 = L_I4_I4, I4_out, I4, N_I4_I4 dL_I4_I4, dd = not_cell_wrapper(state_not_I4_I4, params_not) dI4_out += dd dN_I4_I4 = population(N_I4_I4, r_X) """ I5 """ dI5_out = 0 # not S0: I5_S0 state_not_I5_S0 = L_I5_S0, I5_out, S0, N_I5_S0 dL_I5_S0, dd = not_cell_wrapper(state_not_I5_S0, params_not) dI5_out += dd dN_I5_S0 = population(N_I5_S0, r_X) # yes S1: I5_S1 state_yes_I5_S1 = I5_out, S1, N_I5_S1 dI5_out += yes_cell_wrapper(state_yes_I5_S1, params_yes) dN_I5_S1 = population(N_I5_S1, r_X) # not S2: I5_S2 state_not_I5_S2 = L_I5_S2, I5_out, S2, N_I5_S2 dL_I5_S2, dd = not_cell_wrapper(state_not_I5_S2, params_not) dI5_out += dd dN_I5_S2 = population(N_I5_S2, r_X) # not I5: I5_I5 state_not_I5_I5 = L_I5_I5, I5_out, I5, N_I5_I5 dL_I5_I5, dd = not_cell_wrapper(state_not_I5_I5, params_not) dI5_out += dd dN_I5_I5 = population(N_I5_I5, r_X) """ I6 """ dI6_out = 0 # yes S0: I6_S0 state_yes_I6_S0 = I6_out, S0, N_I6_S0 dI6_out += yes_cell_wrapper(state_yes_I6_S0, params_yes) dN_I6_S0 = population(N_I6_S0, r_X) # not S1: I6_S1 state_not_I6_S1 = L_I6_S1, I6_out, S1, N_I6_S1 dL_I6_S1, dd = not_cell_wrapper(state_not_I6_S1, params_not) dI6_out += dd dN_I6_S1 = population(N_I6_S1, r_X) # not S2: I3_S2 state_not_I6_S2 = L_I6_S2, I6_out, S2, N_I6_S2 dL_I6_S2, dd = not_cell_wrapper(state_not_I6_S2, params_not) dI6_out += dd dN_I6_S2 = population(N_I6_S2, r_X) # not I6: I6_I6 state_not_I6_I6 = L_I6_I6, I6_out, I6, N_I6_I6 dL_I6_I6, dd = not_cell_wrapper(state_not_I6_I6, params_not) dI6_out += dd dN_I6_I6 = population(N_I6_I6, r_X) """ I7 """ dI7_out = 0 # not S0: I7_S0 state_not_I7_S0 = L_I7_S0, I7_out, S0, N_I7_S0 dL_I7_S0, dd = not_cell_wrapper(state_not_I7_S0, params_not) dI7_out += dd dN_I7_S0 = population(N_I7_S0, r_X) # not S1: I7_S1 state_not_I7_S1 = L_I7_S1, I7_out, S1, N_I7_S1 dL_I7_S1, dd = not_cell_wrapper(state_not_I7_S1, params_not) dI7_out += dd dN_I7_S1 = population(N_I7_S1, r_X) # not S2: I7_S2 state_not_I7_S2 = L_I7_S2, I7_out, S2, N_I7_S2 dL_I7_S2, dd = not_cell_wrapper(state_not_I7_S2, params_not) dI7_out += dd dN_I7_S2 = population(N_I7_S2, r_X) # not I7: I7_I7 state_not_I7_I7 = L_I7_I7, I7_out, I7, N_I7_I7 dL_I7_I7, dd = not_cell_wrapper(state_not_I7_I7, params_not) dI7_out += dd dN_I7_I7 = population(N_I7_I7, r_X) """ out """ dout = 0 # not I0: I0 state_not_I0 = L_I0, out, I0_out, N_I0 dL_I0, dd = not_cell_wrapper(state_not_I0, params_not) dout += dd dN_I0 = population(N_I0, r_X) # not I1: I1 state_not_I1 = L_I1, out, I1_out, N_I1 dL_I1, dd = not_cell_wrapper(state_not_I1, params_not) dout += dd dN_I1 = population(N_I1, r_X) # not I2: I2 state_not_I2 = L_I2, out, I2_out, N_I2 dL_I2, dd = not_cell_wrapper(state_not_I2, params_not) dout += dd dN_I2 = population(N_I2, r_X) # not I3: I3 state_not_I3 = L_I3, out, I3_out, N_I3 dL_I3, dd = not_cell_wrapper(state_not_I3, params_not) dout += dd dN_I3 = population(N_I3, r_X) # not I4: I4 state_not_I4 = L_I4, out, I4_out, N_I4 dL_I4, dd = not_cell_wrapper(state_not_I4, params_not) dout += dd dN_I4 = population(N_I4, r_X) # not I5: I5 state_not_I5 = L_I5, out, I5_out, N_I5 dL_I5, dd = not_cell_wrapper(state_not_I5, params_not) dout += dd dN_I5 = population(N_I5, r_X) # not I6: I6 state_not_I6 = L_I6, out, I6_out, N_I6 dL_I6, dd = not_cell_wrapper(state_not_I6, params_not) dout += dd dN_I6 = population(N_I6, r_X) # not I7: I7 state_not_I7 = L_I7, out, I7_out, N_I7 dL_I7, dd = not_cell_wrapper(state_not_I7, params_not) dout += dd dN_I7 = population(N_I7, r_X) dI0, dI1, dI2, dI3, dI4, dI5, dI6, dI7, dS0, dS1, dS2 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 dstate = np.array([dI0, dI1, dI2, dI3, dI4, dI5, dI6, dI7, dS0, dS1, dS2, dI0_out, dI1_out, dI2_out, dI3_out, dI4_out, dI5_out, dI6_out, dI7_out, dL_I0_I0, dL_I1_S0, dL_I1_I1, dL_I2_S1, dL_I2_I2, dL_I3_S0, dL_I3_S1, dL_I3_I3, dL_I4_S2, dL_I4_I4, dL_I5_S0, dL_I5_S2, dL_I5_I5, dL_I6_S1, dL_I6_S2, dL_I6_I6, dL_I7_S0,\ dL_I7_S1, dL_I7_S2, dL_I7_I7, dL_I0, dL_I1, dL_I2, dL_I3, dL_I4, dL_I5, dL_I6, dL_I7, dN_I0_S0, dN_I0_S1, dN_I0_S2, dN_I0_I0, dN_I1_S0, dN_I1_S1, dN_I1_S2, dN_I1_I1, dN_I2_S0, dN_I2_S1, dN_I2_S2, dN_I2_I2, dN_I3_S0, dN_I3_S1, dN_I3_S2, dN_I3_I3, dN_I4_S0, dN_I4_S1, dN_I4_S2, dN_I4_I4, dN_I5_S0, dN_I5_S1, dN_I5_S2, dN_I5_I5, dN_I6_S0, dN_I6_S1, dN_I6_S2, dN_I6_I6, dN_I7_S0, dN_I7_S1, dN_I7_S2, dN_I7_I7, dN_I0, dN_I1, dN_I2, dN_I3, dN_I4, dN_I5, dN_I6, dN_I7, dout]) return dstate def MUX_4_1_generate_stoichiometry(): """ I0, I1, I2, I3, S0, S1 = state[:6] I0_out, I1_out, I2_out, I3_out = state[6:10] L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I0, L_I1, L_I2, L_I3 = state[10:22] N_I0_S0, N_I0_S1, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_I3, N_I0, N_I1, N_I2, N_I3 = state[22:38] out = state[38] """ #I0, I1, I2, I3, S0, S1 = range(6) I0_out, I1_out, I2_out, I3_out = range(6,10) L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I0, L_I1, L_I2, L_I3 = range(10,22) #N_I0_S0, N_I0_S1, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_I3, N_I0, N_I1, N_I2, N_I3 = range(22,38) out = 38 # # x axis ... species # y axis ... reactions # N = np.zeros((39, 72)) """ # yes S0: I0_S0 """ r = 0 # reaction 0 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 1 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 2 # I0_out --> 0 N[I0_out, r] = -1 """ # yes S1: I0_S1 """ r += 1 # reaction 3 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 4 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 5 # I0_out --> 0 N[I0_out, r] = -1 """ # not I0: I0_I0 """ r += 1 # reaction 6 # 0 --> L_I0_I0 N[L_I0_I0, r] = 1 r += 1 # reaction 7 # L_I0_I0 --> 0 N[L_I0_I0, r] = -1 r += 1 # reaction 8 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 9 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 10 # I0_out --> 0 N[I0_out, r] = -1 """ # not S0: I1_S0 """ r += 1 # reaction 11 # 0 --> L_I1_S0 N[L_I1_S0, r] = 1 r += 1 # reaction 12 # L_I1_S0 --> 0 N[L_I1_S0, r] = -1 r += 1 # reaction 13 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 14 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 15 # I1_out --> 0 N[I1_out, r] = -1 """ # yes S1: I1_S1 """ r += 1 # reaction 16 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 17 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 18 # I1_out --> 0 N[I1_out, r] = -1 """ # not I1: I1_I1 """ r += 1 # reaction 19 # 0 --> L_I1_I1 N[L_I1_I1, r] = 1 r += 1 # reaction 20 # L_I1_I1 --> 0 N[L_I1_I1, r] = -1 r += 1 # reaction 21 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 22 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 23 # I1_out --> 0 N[I1_out, r] = -1 """ # yes S0: I2_S0 """ r += 1 # reaction 24 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 25 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 26 # I2_out --> 0 N[I2_out, r] = -1 """ # not S1: I2_S1 """ r += 1 # reaction 27 # 0 --> L_I2_S1 N[L_I2_S1, r] = 1 r += 1 # reaction 28 # L_I2_S1 --> 0 N[L_I2_S1, r] = -1 r += 1 # reaction 29 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 30 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 31 # I2_out --> 0 N[I2_out, r] = -1 """ # not I2: I2_I2 """ r += 1 # reaction 32 # 0 --> L_I2_I2 N[L_I2_I2, r] = 1 r += 1 # reaction 33 # L_I2_I2 --> 0 N[L_I2_I2, r] = -1 r += 1 # reaction 34 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 35 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 36 # I2_out --> 0 N[I2_out, r] = -1 """ # not S0: I3_S0 """ r += 1 # reaction 37 # 0 --> L_I3_S0 N[L_I3_S0, r] = 1 r += 1 # reaction 38 # 0 --> L_I3_S0 N[L_I3_S0, r] = -1 r += 1 # reaction 39 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 40 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 41 # I3_out --> 0 N[I3_out, r] = -1 """ # not S1: I3_S1 """ r += 1 # reaction 42 # 0 --> L_I3_S1 N[L_I3_S1, r] = 1 r += 1 # reaction 43 # L_I3_S1 --> 0 N[L_I3_S1, r] = -1 r += 1 # reaction 44 # 0 --> I3_out N[L_I3_S1, r] = 1 r += 1 # reaction 45 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 46 # I3_out --> 0 N[I3_out, r] = -1 """ # not I3: I3_I3 """ r += 1 # reaction 47 # 0 --> L_I3_I3 N[L_I3_I3, r] = 1 r += 1 # reaction 48 # L_I3_I3 --> 0 N[L_I3_I3, r] = -1 r += 1 # reaction 49 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 50 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 51 # I3_out --> 0 N[I3_out, r] = -1 """ # not I0: I0 """ r += 1 # reaction 52 # 0 --> L_I0 N[L_I0, r] = 1 r += 1 # reaction 53 # L_I0 --> 0 N[L_I0, r] = -1 r += 1 # reaction 54 # 0 --> out N[out, r] = 1 r += 1 # reaction 55 # out --> 0 N[out, r] = -1 r += 1 # reaction 56 # out --> 0 N[out, r] = -1 """ # not I1: I1 """ r += 1 # reaction 57 # 0 --> L_I1 N[L_I1, r] = 1 r += 1 # reaction 58 # L_I1 --> 0 N[L_I1, r] = -1 r += 1 # reaction 59 # 0 --> out N[out, r] = 1 r += 1 # reaction 60 # out --> 0 N[out, r] = -1 r += 1 # reaction 61 # out --> 0 N[out, r] = -1 """ # not I2: I2 """ r += 1 # reaction 62 # 0 --> L_I2 N[L_I2, r] = 1 r += 1 # reaction 63 # L_I2 --> 0 N[L_I2, r] = -1 r += 1 # reaction 64 # 0 --> out N[out, r] = 1 r += 1 # reaction 65 # out --> 0 N[out, r] = -1 r += 1 # reaction 66 # out --> 0 N[out, r] = -1 """ # not I3: I3 """ r += 1 # reaction 67 # 0 --> L_I3 N[L_I3, r] = 1 r += 1 # reaction 68 # L_I3 --> 0 N[L_I3, r] = -1 r += 1 # reaction 69 # 0 --> out N[out, r] = 1 r += 1 # reaction 70 # out --> 0 N[out, r] = -1 r += 1 # reaction 71 # out --> 0 N[out, r] = -1 return N def MUX_8_1_generate_stoichiometry(): """ I0, I1, I2, I3, I4, I5, I6, I7, S0, S1, S2 = state[:11] I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = state[11:19] L_I0_I0, L_I1_S2, L_I1_I1, L_I2_S1,\ L_I2_I2, L_I3_S1, L_I3_S2, L_I3_I3,\ L_I4_S0, L_I4_I4, L_I5_S0, L_I5_S2,\ L_I5_I5, L_I6_S0, L_I6_S1, L_I6_I6,\ L_I7_S0, L_I7_S1, L_I7_S2, L_I7_I7,\ L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = state[19:47] N_I0_S0, N_I0_S1, N_I0_S2, N_I0_I0,\ N_I1_S0, N_I1_S1, N_I1_S2, N_I1_I1,\ N_I2_S0, N_I2_S1, N_I2_S2, N_I2_I2,\ N_I3_S0, N_I3_S1, N_I3_S2, N_I3_I3,\ N_I4_S0, N_I4_S1, N_I4_S2, N_I4_I4,\ N_I5_S0, N_I5_S1, N_I5_S2, N_I5_I5,\ N_I6_S0, N_I6_S1, N_I6_S2, N_I6_I6,\ N_I7_S0, N_I7_S1, N_I7_S2, N_I7_I7,\ N_I0, N_I1, N_I2, N_I3, N_I4, N_I5, N_I6, N_I7 = state[47:87] out = state[87] """ # I0, I1, I2, I3, I4, I5, I6, I7, S0, S1, S2 = range(11) I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = range(11, 19) L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I4_S2,\ L_I4_I4, L_I5_S0, L_I5_S2, L_I5_I5, L_I6_S1, L_I6_S2, L_I6_I6, L_I7_S0, \ L_I7_S1, L_I7_S2, L_I7_I7, L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = range(19, 47) #N_I0_S0, N_I0_S1, N_I0_S2, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_S2, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_S2, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_S2, N_I3_I3, N_I4_S0, N_I4_S1, N_I4_S2, N_I4_I4, N_I5_S0, N_I5_S1, N_I5_S2, N_I5_I5, N_I6_S0, N_I6_S1, N_I6_S2, N_I6_I6, N_I7_S0, N_I7_S1, N_I7_S2, N_I7_I7, N_I0, N_I1, N_I2, N_I3, N_I4, N_I5, N_I6, N_I7 = range(47,86) out = 87 # # x axis ... species # y axis ... reactions # N = np.zeros((88, 176)) """ # yes S0: I0_S0 """ r = 0 # reaction 0 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 1 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 2 # I0_out --> 0 N[I0_out, r] = -1 """ # yes S1: I0_S1 """ r += 1 # reaction 3 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 4 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 5 # I0_out --> 0 N[I0_out, r] = -1 """ # yes S2: I0_S2 """ # reaction 6 r += 1 # 0 --> I0_out N[I0_out, r] = 1 # reaction 7 r += 1 # I0_out --> 0 N[I0_out, r] = -1 # reaction 8 r += 1 # I0_out --> 0 N[I0_out, r] = -1 """ # not I0: I0_I0 """ r += 1 # reaction 9 # 0 --> L_I0_I0 N[L_I0_I0, r] = 1 r += 1 # reaction 10 # L_I0_I0 --> 0 N[L_I0_I0, r] = -1 r += 1 # reaction 11 # 0 --> I0_out N[I0_out, r] = 1 r += 1 # reaction 12 # I0_out --> 0 N[I0_out, r] = -1 r += 1 # reaction 13 # I0_out --> 0 N[I0_out, r] = -1 """ # not S0: I1_S0 """ r += 1 # reaction 14 # 0 --> L_I1_S0 N[L_I1_S0, r] = 1 r += 1 # reaction 15 # L_I1_S0 --> 0 N[L_I1_S0, r] = -1 r += 1 # reaction 16 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 17 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 18 # I1_out --> 0 N[I1_out, r] = -1 """ # yes S1: I1_S1 """ r += 1 # reaction 19 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 20 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 21 # I1_out --> 0 N[I1_out, r] = -1 """ # yes S2: I1_S2 """ r += 1 # reaction 22 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 23 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 24 # I1_out --> 0 N[I1_out, r] = -1 """ # not I1: I1_I1 """ r += 1 # reaction 25 # 0 --> L_I1_I1 N[L_I1_I1, r] = 1 r += 1 # reaction 26 # L_I1_I1 --> 0 N[L_I1_I1, r] = -1 r += 1 # reaction 27 # 0 --> I1_out N[I1_out, r] = 1 r += 1 # reaction 28 # I1_out --> 0 N[I1_out, r] = -1 r += 1 # reaction 23 # I1_out --> 0 N[I1_out, r] = -1 """ # yes S0: I2_S0 """ r += 1 # reaction 24 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 25 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 26 # I2_out --> 0 N[I2_out, r] = -1 """ # not S1: I2_S1 """ r += 1 # reaction 27 # 0 --> L_I2_S1 N[L_I2_S1, r] = 1 r += 1 # reaction 28 # L_I2_S1 --> 0 N[L_I2_S1, r] = -1 r += 1 # reaction 29 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 30 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 31 # I2_out --> 0 N[I2_out, r] = -1 """ # yes S2: I2_S2 """ r += 1 # reaction 32 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 33 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 34 # I2_out --> 0 N[I2_out, r] = -1 """ # not I2: I2_I2 """ r += 1 # reaction 35 # 0 --> L_I2_I2 N[L_I2_I2, r] = 1 r += 1 # reaction 36 # L_I2_I2 --> 0 N[L_I2_I2, r] = -1 r += 1 # reaction 37 # 0 --> I2_out N[I2_out, r] = 1 r += 1 # reaction 38 # I2_out --> 0 N[I2_out, r] = -1 r += 1 # reaction 39 # I2_out --> 0 N[I2_out, r] = -1 """ # not S0: I3_S0 """ r += 1 # reaction 40 # 0 --> L_I3_S0 N[L_I3_S0, r] = 1 r += 1 # reaction 41 # L_I3_S0 --> 0 N[L_I3_S0, r] = -1 r += 1 # reaction 42 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 43 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 44 # I3_out --> 0 N[I3_out, r] = -1 """ # not S1: I3_S1 """ r += 1 # reaction 45 # 0 --> L_I3_S1 N[L_I3_S1, r] = 1 r += 1 # reaction 46 # L_I3_S1 --> 0 N[L_I3_S1, r] = -1 r += 1 # reaction 47 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 48 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 49 # I3_out --> 0 N[I3_out, r] = -1 """ # yes S2: I3_S2 """ r += 1 # reaction 50 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 51 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 52 # I3_out --> 0 N[I3_out, r] = -1 """ # not I3: I3_I3 """ r += 1 # reaction 53 # 0 --> L_I3_I3 N[L_I3_I3, r] = 1 r += 1 # reaction 54 # L_I3_I3 --> 0 N[L_I3_I3, r] = -1 r += 1 # reaction 55 # 0 --> I3_out N[I3_out, r] = 1 r += 1 # reaction 56 # I3_out --> 0 N[I3_out, r] = -1 r += 1 # reaction 57 # I3_out --> 0 N[I3_out, r] = -1 """ # yes S0: I4_S0 """ r += 1 # reaction 58 # 0 --> I4_out N[I4_out, r] = 1 r += 1 # reaction 59 # I4_out --> 0 N[I4_out, r] = -1 r += 1 # reaction 60 # I4_out --> 0 N[I4_out, r] = -1 """ # yes S1: I4_S1 """ r += 1 # reaction 61 # 0 --> I4_out N[I4_out, r] = 1 r += 1 # reaction 62 # I4_out --> 0 N[I4_out, r] = -1 r += 1 # reaction 63 # I4_out --> 0 N[I4_out, r] = -1 """ # not S2: I4_S2 """ r += 1 # reaction 64 # 0 --> L_I4_S2 N[L_I4_S2, r] = 1 r += 1 # reaction 65 # L_I4_S2 --> 0 N[L_I4_S2, r] = -1 r += 1 # reaction 66 # 0 --> I4_out N[I4_out, r] = 1 r += 1 # reaction 67 # I4_out --> 0 N[I4_out, r] = -1 r += 1 # reaction 68 # I4_out --> 0 N[I4_out, r] = -1 """ # not I4: I4_I4 """ r += 1 # reaction 69 # 0 --> L_I4_I4 N[L_I4_I4, r] = 1 r += 1 # reaction 70 # L_I4_I4 --> 0 N[L_I4_I4, r] = -1 r += 1 # reaction 71 # 0 --> I4_out N[I4_out, r] = 1 r += 1 # reaction 72 # I4_out --> 0 N[I4_out, r] = -1 r += 1 # reaction 73 # I4_out --> 0 N[I4_out, r] = -1 """ # not S0: I5_S0 """ r += 1 # reaction 74 # 0 --> L_I5_S0 N[L_I5_S0, r] = 1 r += 1 # reaction 75 # L_I5_S0 --> 0 N[L_I5_S0, r] = -1 r += 1 # reaction 76 # 0 --> I5_out N[I5_out, r] = 1 r += 1 # reaction 77 # I5_out --> 0 N[I5_out, r] = -1 r += 1 # reaction 78 # I5_out --> 0 N[I5_out, r] = -1 """ # yes S1: I5_S1 """ r += 1 # reaction 79 # 0 --> I5_out N[I5_out, r] = 1 r += 1 # reaction 80 # I5_out --> 0 N[I5_out, r] = -1 r += 1 # reaction 81 # I5_out --> 0 N[I5_out, r] = -1 """ # not S2: I5_S2 """ r += 1 # reaction 82 # 0 --> L_I5_S2 N[L_I5_S2, r] = 1 r += 1 # reaction 83 # L_I5_S2 --> 0 N[L_I5_S2, r] = -1 r += 1 # reaction 84 # 0 --> I5_out N[I5_out, r] = 1 r += 1 # reaction 85 # I5_out --> 0 N[I5_out, r] = -1 r += 1 # reaction 86 # I5_out --> 0 N[I5_out, r] = -1 """ # not I5: I5_I5 """ r += 1 # reaction 87 # 0 --> L_I5_I5 N[L_I5_I5, r] = 1 r += 1 # reaction 88 # L_I5_I5 --> 0 N[L_I5_I5, r] = -1 r += 1 # reaction 89 # 0 --> I5_out N[I5_out, r] = 1 r += 1 # reaction 90 # I5_out --> 0 N[I5_out, r] = -1 r += 1 # reaction 91 # I5_out --> 0 N[I5_out, r] = -1 """ # yes S0: I6_S0 """ r += 1 # reaction 92 # 0 --> I6_out N[I6_out, r] = 1 r += 1 # reaction 93 # I6_out --> 0 N[I6_out, r] = -1 r += 1 # reaction 94 # I6_out --> 0 N[I6_out, r] = -1 """ # not S1: I6_S1 """ r += 1 # reaction 95 # 0 --> L_I6_S1 N[L_I6_S1, r] = 1 r += 1 # reaction 96 # L_I6_S1 --> 0 N[L_I6_S1, r] = -1 r += 1 # reaction 97 # 0 --> I6_out N[I6_out, r] = 1 r += 1 # reaction 98 # I6_out --> 0 N[I6_out, r] = -1 r += 1 # reaction 99 # I6_out --> 0 N[I6_out, r] = -1 """ # not S2: I6_S2 """ r += 1 # reaction 100 # 0 --> L_I6_S2 N[L_I6_S2, r] = 1 r += 1 # reaction 101 # L_I6_S2 --> 0 N[L_I6_S2, r] = -1 r += 1 # reaction 102 # 0 --> I6_out N[I6_out, r] = 1 r += 1 # reaction 103 # I6_out --> 0 N[I6_out, r] = -1 r += 1 # reaction 104 # I6_out --> 0 N[I6_out, r] = -1 """ # not I6: I6_I6 """ r += 1 # reaction 105 # 0 --> L_I6_I6 N[L_I6_I6, r] = 1 r += 1 # reaction 106 # L_I6_I6 --> 0 N[L_I6_I6, r] = -1 r += 1 # reaction 107 # 0 --> I6_out N[I6_out, r] = 1 r += 1 # reaction 108 # I6_out --> 0 N[I6_out, r] = -1 r += 1 # reaction 109 # I6_out --> 0 N[I6_out, r] = -1 """ # not S0: I7_S0 """ r += 1 # reaction 110 # 0 --> L_I7_S0 N[L_I7_S0, r] = 1 r += 1 # reaction 111 # L_I7_S0 --> 0 N[L_I7_S0, r] = -1 r += 1 # reaction 112 # 0 --> I7_out N[I7_out, r] = 1 r += 1 # reaction 113 # I7_out --> 0 N[I7_out, r] = -1 r += 1 # reaction 114 # I7_out --> 0 N[I7_out, r] = -1 """ # not S1: I7_S1 """ r += 1 # reaction 115 # 0 --> L_I7_S1 N[L_I7_S1, r] = 1 r += 1 # reaction 116 # L_I7_S1 --> 0 N[L_I7_S1, r] = -1 r += 1 # reaction 117 # 0 --> I7_out N[I7_out, r] = 1 r += 1 # reaction 118 # I7_out --> 0 N[I7_out, r] = -1 r += 1 # reaction 119 # I7_out --> 0 N[I7_out, r] = -1 """ # not S2: I7_S2 """ r += 1 # reaction 120 # 0 --> L_I7_S2 N[L_I7_S2, r] = 1 r += 1 # reaction 121 # L_I7_S2 --> 0 N[L_I7_S2, r] = -1 r += 1 # reaction 122 # 0 --> I7_out N[I7_out, r] = 1 r += 1 # reaction 123 # I7_out --> 0 N[I7_out, r] = -1 r += 1 # reaction 124 # I7_out --> 0 N[I7_out, r] = -1 """ # not I7: I7_I7 """ r += 1 # reaction 125 # 0 --> L_I7_I7 N[L_I7_I7, r] = 1 r += 1 # reaction 126 # L_I7_I7 --> 0 N[L_I7_I7, r] = -1 r += 1 # reaction 127 # 0 --> I7_out N[I7_out, r] = 1 r += 1 # reaction 128 # I7_out --> 0 N[I7_out, r] = -1 r += 1 # reaction 129 # I7_out --> 0 N[I7_out, r] = -1 """ # not I0: I0 """ r += 1 # reaction 130 # 0 --> L_I0 N[L_I0, r] = 1 r += 1 # reaction 131 # L_I0 --> 0 N[L_I0, r] = -1 r += 1 # reaction 132 # 0 --> out N[out, r] = 1 r += 1 # reaction 133 # out --> 0 N[out, r] = -1 r += 1 # reaction 134 # out --> 0 N[out, r] = -1 """ # not I1: I1 """ r += 1 # reaction 135 # 0 --> L_I1 N[L_I1, r] = 1 r += 1 # reaction 136 # L_I1 --> 0 N[L_I1, r] = -1 r += 1 # reaction 137 # 0 --> out N[out, r] = 1 r += 1 # reaction 138 # out --> 0 N[out, r] = -1 r += 1 # reaction 139 # out --> 0 N[out, r] = -1 """ # not I2: I2 """ r += 1 # reaction 140 # 0 --> L_I2 N[L_I2, r] = 1 r += 1 # reaction 141 # L_I2 --> 0 N[L_I2, r] = -1 r += 1 # reaction 142 # 0 --> out N[out, r] = 1 r += 1 # reaction 143 # out --> 0 N[out, r] = -1 r += 1 # reaction 144 # out --> 0 N[out, r] = -1 """ # not I3: I3 """ r += 1 # reaction 145 # 0 --> L_I3 N[L_I3, r] = 1 r += 1 # reaction 146 # L_I3 --> 0 N[L_I3, r] = -1 r += 1 # reaction 147 # 0 --> out N[out, r] = 1 r += 1 # reaction 148 # out --> 0 N[out, r] = -1 r += 1 # reaction 149 # out --> 0 N[out, r] = -1 """ # not I4: I4 """ r += 1 # reaction 150 # 0 --> L_I4 N[L_I4, r] = 1 r += 1 # reaction 151 # L_I4 --> 0 N[L_I4, r] = -1 r += 1 # reaction 152 # 0 --> out N[out, r] = 1 r += 1 # reaction 153 # out --> 0 N[out, r] = -1 r += 1 # reaction 154 # out --> 0 N[out, r] = -1 """ # not I5: I5 """ r += 1 # reaction 155 # 0 --> L_I5 N[L_I5, r] = 1 r += 1 # reaction 156 # L_I5 --> 0 N[L_I5, r] = -1 r += 1 # reaction 157 # 0 --> out N[out, r] = 1 r += 1 # reaction 158 # out --> 0 N[out, r] = -1 r += 1 # reaction 159 # out --> 0 N[out, r] = -1 """ # not I6: I6 """ r += 1 # reaction 160 # 0 --> L_I6 N[L_I6, r] = 1 r += 1 # reaction 161 # L_I6 --> 0 N[L_I6, r] = -1 r += 1 # reaction 162 # 0 --> out N[out, r] = 1 r += 1 # reaction 163 # out --> 0 N[out, r] = -1 r += 1 # reaction 164 # out --> 0 N[out, r] = -1 """ # not I7: I7 """ r += 1 # reaction 165 # 0 --> L_I7 N[L_I7, r] = 1 r += 1 # reaction 166 # L_I7 --> 0 N[L_I7, r] = -1 r += 1 # reaction 167 # 0 --> out N[out, r] = 1 r += 1 # reaction 168 # out --> 0 N[out, r] = -1 r += 1 # reaction 169 # out --> 0 N[out, r] = -1 return N def MUX_4_1_model_stochastic(state, params, Omega): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X = params params_yes = gamma_x, n_y, theta_x, delta_x, rho_x params_not = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x I0, I1, I2, I3, S0, S1 = state[:6] I0_out, I1_out, I2_out, I3_out = state[6:10] L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I0, L_I1, L_I2, L_I3 = state[10:22] N_I0_S0, N_I0_S1, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_I3, N_I0, N_I1, N_I2, N_I3 = state[22:38] out = state[38] """ I0 """ # yes S0: I0_S0 state_yes_I0_S0 = I0_out, S0, N_I0_S0, N_I0_S0 p_I0_S0 = yes_cell_stochastic(state_yes_I0_S0, params_yes, Omega) # yes S1: I0_S1 state_yes_I0_S1 = I0_out, S1, N_I0_S1, N_I0_S1 p_I0_S1 = yes_cell_stochastic(state_yes_I0_S1, params_yes, Omega) # not I0: I0_I0 state_not_I0_I0 = L_I0_I0, I0_out, I0, N_I0_I0, N_I0_I0 p_I0_I0 = not_cell_stochastic(state_not_I0_I0, params_not, Omega) """ I1 """ # not S0: I1_S0 state_not_I1_S0 = L_I1_S0, I1_out, S0, N_I1_S0, N_I1_S0 p_I1_S0 = not_cell_stochastic(state_not_I1_S0, params_not, Omega) # yes S1: I1_S1 state_yes_I1_S1 = I1_out, S1, N_I1_S1, N_I1_S1 p_I1_S1 = yes_cell_stochastic(state_yes_I1_S1, params_yes, Omega) # not I1: I1_I1 state_not_I1_I1 = L_I1_I1, I1_out, I1, N_I1_I1, N_I1_I1 p_I1_I1 = not_cell_stochastic(state_not_I1_I1, params_not, Omega) """ I2 """ # yes S0: I2_S0 state_yes_I2_S0 = I2_out, S0, N_I2_S0, N_I2_S0 p_I2_S0 = yes_cell_stochastic(state_yes_I2_S0, params_yes, Omega) # not S1: I2_S1 state_not_I2_S1 = L_I2_S1, I2_out, S1, N_I2_S1, N_I2_S1 p_I2_S1= not_cell_stochastic(state_not_I2_S1, params_not, Omega) # not I2: I2_I2 state_not_I2_I2 = L_I2_I2, I2_out, I2, N_I2_I2, N_I2_I2 p_I2_I2 = not_cell_stochastic(state_not_I2_I2, params_not, Omega) """ I3 """ # not S0: I3_S0 state_not_I3_S0 = L_I3_S0, I3_out, S0, N_I3_S0, N_I3_S0 p_I3_S0 = not_cell_stochastic(state_not_I3_S0, params_not, Omega) # not S1: I3_S1 state_not_I3_S1 = L_I3_S1, I3_out, S1, N_I3_S1, N_I3_S1 p_I3_S1 = not_cell_stochastic(state_not_I3_S1, params_not, Omega) # not I3: I3_I3 state_not_I3_I3 = L_I3_I3, I3_out, I3, N_I3_I3, N_I3_I3 p_I3_I3 = not_cell_stochastic(state_not_I3_I3, params_not, Omega) """ out """ # not I0: I0 state_not_I0 = L_I0, out, I0_out, N_I0, N_I0 p_I0 = not_cell_stochastic(state_not_I0, params_not, Omega) # not I1: I1 state_not_I1 = L_I1, out, I1_out, N_I1, N_I1 p_I1 = not_cell_stochastic(state_not_I1, params_not, Omega) # not I2: I2 state_not_I2 = L_I2, out, I2_out, N_I2, N_I2 p_I2 = not_cell_stochastic(state_not_I2, params_not, Omega) # not I3: I3 state_not_I3 = L_I3, out, I3_out, N_I3, N_I3 p_I3 = not_cell_stochastic(state_not_I3, params_not, Omega) return (p_I0_S0 + p_I0_S1 + p_I0_I0 + p_I1_S0 + p_I1_S1 + p_I1_I1 + p_I2_S0 + p_I2_S1 + p_I2_I2 + p_I3_S0 + p_I3_S1 + p_I3_I3 + p_I0 + p_I1 + p_I2 + p_I3) def MUX_8_1_model_stochastic(state, params, Omega): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X = params params_yes = gamma_x, n_y, theta_x, delta_x, rho_x params_not = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x I0, I1, I2, I3, I4, I5, I6, I7, S0, S1, S2 = state[:11] I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = state[11:19] L_I0_I0, L_I1_S0, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S0, L_I3_S1, L_I3_I3, L_I4_S2, L_I4_I4, L_I5_S0, L_I5_S2, L_I5_I5, L_I6_S1, L_I6_S2, L_I6_I6, L_I7_S0,\ L_I7_S1, L_I7_S2, L_I7_I7, L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = state[19:47] N_I0_S0, N_I0_S1, N_I0_S2, N_I0_I0,\ N_I1_S0, N_I1_S1, N_I1_S2, N_I1_I1,\ N_I2_S0, N_I2_S1, N_I2_S2, N_I2_I2,\ N_I3_S0, N_I3_S1, N_I3_S2, N_I3_I3,\ N_I4_S0, N_I4_S1, N_I4_S2, N_I4_I4,\ N_I5_S0, N_I5_S1, N_I5_S2, N_I5_I5,\ N_I6_S0, N_I6_S1, N_I6_S2, N_I6_I6,\ N_I7_S0, N_I7_S1, N_I7_S2, N_I7_I7,\ N_I0, N_I1, N_I2, N_I3, N_I4, N_I5, N_I6, N_I7 = state[47:87] out = state[87] """ I0 """ # yes S0: I0_S0 state_yes_I0_S0 = I0_out, S0, N_I0_S0, N_I0_S0 p_I0_S0 = yes_cell_stochastic(state_yes_I0_S0, params_yes, Omega) # yes S1: I0_S1 state_yes_I0_S1 = I0_out, S1, N_I0_S1, N_I0_S1 p_I0_S1 = yes_cell_stochastic(state_yes_I0_S1, params_yes, Omega) # yes S2: I0_S2 state_yes_I0_S2 = I0_out, S2, N_I0_S2, N_I0_S2 p_I0_S2 = yes_cell_stochastic(state_yes_I0_S2, params_yes, Omega) # not I0: I0_I0 state_not_I0_I0 = L_I0_I0, I0_out, I0, N_I0_I0, N_I0_I0 p_I0_I0 = not_cell_stochastic(state_not_I0_I0, params_not, Omega) """ I1 """ # not S0: I1_S0 state_not_I1_S0 = L_I1_S0, I1_out, S0, N_I1_S0, N_I1_S0 p_I1_S0 = not_cell_stochastic(state_not_I1_S0, params_not, Omega) # yes S1: I1_S1 state_yes_I1_S1 = I1_out, S1, N_I1_S1, N_I1_S1 p_I1_S1 = yes_cell_stochastic(state_yes_I1_S1, params_yes, Omega) # yes S2: I1_S2 state_yes_I1_S2 = I1_out, S2, N_I1_S2, N_I1_S2 p_I1_S2 = yes_cell_stochastic(state_yes_I1_S2, params_yes, Omega) # not I1: I1_I1 state_not_I1_I1 = L_I1_I1, I1_out, I1, N_I1_I1, N_I1_I1 p_I1_I1 = not_cell_stochastic(state_not_I1_I1, params_not, Omega) """ I2 """ # yes S0: I2_S0 state_yes_I2_S0 = I2_out, S0, N_I2_S0, N_I2_S0 p_I2_S0 = yes_cell_stochastic(state_yes_I2_S0, params_yes, Omega) # not S1: I2_S1 state_not_I2_S1 = L_I2_S1, I2_out, S1, N_I2_S1, N_I2_S1 p_I2_S1 = not_cell_stochastic(state_not_I2_S1, params_not, Omega) # yes S2: I2_S2 state_yes_I2_S2 = I2_out, S2, N_I2_S2, N_I2_S2 p_I2_S2 = yes_cell_stochastic(state_yes_I2_S2, params_yes, Omega) # not I2: I2_I2 state_not_I2_I2 = L_I2_I2, I2_out, I2, N_I2_I2, N_I2_I2 p_I2_I2 = not_cell_stochastic(state_not_I2_I2, params_not, Omega) """ I3 """ # not S0: I3_S0 state_not_I3_S0 = L_I3_S0, I3_out, S0, N_I3_S0, N_I3_S0 p_I3_S0 = not_cell_stochastic(state_not_I3_S0, params_not, Omega) # not S1: I3_S1 state_not_I3_S1 = L_I3_S1, I3_out, S1, N_I3_S1, N_I3_S1 p_I3_S1 = not_cell_stochastic(state_not_I3_S1, params_not, Omega) # yes S2: I3_S2 state_yes_I3_S2 = I3_out, S2, N_I3_S2, N_I3_S2 p_I3_S2 = yes_cell_stochastic(state_yes_I3_S2, params_yes, Omega) # not I3: I3_I3 state_not_I3_I3 = L_I3_I3, I3_out, I3, N_I3_I3, N_I3_I3 p_I3_I3 = not_cell_stochastic(state_not_I3_I3, params_not, Omega) """ I4 """ # yes S0: I4_S0 state_yes_I4_S0 = I4_out, S0, N_I4_S0, N_I4_S0 p_I4_S0 = yes_cell_stochastic(state_yes_I4_S0, params_yes, Omega) # yes S1: I4_S1 state_yes_I4_S1 = I4_out, S1, N_I4_S1, N_I4_S1 p_I4_S1 = yes_cell_stochastic(state_yes_I4_S1, params_yes, Omega) # not S2: I4_S2 state_not_I4_S2 = L_I4_S2, I4_out, S2, N_I4_S2, N_I4_S2 p_I4_S2 = not_cell_stochastic(state_not_I4_S2, params_not, Omega) # not I4: I4_I4 state_not_I4_I4 = L_I4_I4, I4_out, I4, N_I4_I4, N_I4_I4 p_I4_I4 = not_cell_stochastic(state_not_I4_I4, params_not, Omega) """ I5 """ # not S0: I5_S0 state_not_I5_S0 = L_I5_S0, I5_out, S0, N_I5_S0, N_I5_S0 p_I5_S0 = not_cell_stochastic(state_not_I5_S0, params_not, Omega) # yes S1: I5_S1 state_yes_I5_S1 = I5_out, S1, N_I5_S1, N_I5_S1 p_I5_S1 = yes_cell_stochastic(state_yes_I5_S1, params_yes, Omega) # not S2: I5_S2 state_not_I5_S2 = L_I5_S2, I5_out, S2, N_I5_S2, N_I5_S2 p_I5_S2 = not_cell_stochastic(state_not_I5_S2, params_not, Omega) # not I5: I5_I5 state_not_I5_I5 = L_I5_I5, I5_out, I5, N_I5_I5, N_I5_I5 p_I5_I5 = not_cell_stochastic(state_not_I5_I5, params_not, Omega) """ I6 """ # yes S0: I6_S0 state_yes_I6_S0 = I6_out, S0, N_I6_S0, N_I6_S0 p_I6_S0 = yes_cell_stochastic(state_yes_I6_S0, params_yes, Omega) # not S1: I6_S1 state_not_I6_S1 = L_I6_S1, I6_out, S1, N_I6_S1, N_I6_S1 p_I6_S1 = not_cell_stochastic(state_not_I6_S1, params_not, Omega) # not S2: I6_S2 state_not_I6_S2 = L_I6_S2, I6_out, S2, N_I6_S2, N_I6_S2 p_I6_S2 = not_cell_stochastic(state_not_I6_S2, params_not, Omega) # not I6: I6_I6 state_not_I6_I6 = L_I6_I6, I6_out, I6, N_I6_I6, N_I6_I6 p_I6_I6 = not_cell_stochastic(state_not_I6_I6, params_not, Omega) """ I7 """ # not S0: I7_S0 state_not_I7_S0 = L_I7_S0, I7_out, S0, N_I7_S0, N_I7_S0 p_I7_S0 = not_cell_stochastic(state_not_I7_S0, params_not, Omega) # not S1: I7_S1 state_not_I7_S1 = L_I7_S1, I7_out, S1, N_I7_S1, N_I7_S1 p_I7_S1 = not_cell_stochastic(state_not_I7_S1, params_not, Omega) # not S2: I7_S2 state_not_I7_S2 = L_I7_S2, I7_out, S2, N_I7_S2, N_I7_S2 p_I7_S2 = not_cell_stochastic(state_not_I7_S2, params_not, Omega) # not I7: I7_I7 state_not_I7_I7 = L_I7_I7, I7_out, I7, N_I7_I7, N_I7_I7 p_I7_I7 = not_cell_stochastic(state_not_I7_I7, params_not, Omega) """ out """ # not I0: I0 state_not_I0 = L_I0, out, I0_out, N_I0, N_I0 p_I0 = not_cell_stochastic(state_not_I0, params_not, Omega) # not I1: I1 state_not_I1 = L_I1, out, I1_out, N_I1, N_I1 p_I1 = not_cell_stochastic(state_not_I1, params_not, Omega) # not I2: I2 state_not_I2 = L_I2, out, I2_out, N_I2, N_I2 p_I2 = not_cell_stochastic(state_not_I2, params_not, Omega) # not I3: I3 state_not_I3 = L_I3, out, I3_out, N_I3, N_I3 p_I3 = not_cell_stochastic(state_not_I3, params_not, Omega) # not I4: I4 state_not_I4 = L_I4, out, I4_out, N_I4, N_I4 p_I4 = not_cell_stochastic(state_not_I4, params_not, Omega) # not I5: I5 state_not_I5 = L_I5, out, I5_out, N_I5, N_I5 p_I5 = not_cell_stochastic(state_not_I5, params_not, Omega) # not I6: I6 state_not_I6 = L_I6, out, I6_out, N_I6, N_I6 p_I6 = not_cell_stochastic(state_not_I6, params_not, Omega) # not I7: I7 state_not_I7 = L_I7, out, I7_out, N_I7, N_I7 p_I7 = not_cell_stochastic(state_not_I7, params_not, Omega) return (p_I0_S0 + p_I0_S1 + p_I0_S2 + p_I0_I0 + p_I1_S0 + p_I1_S1 + p_I1_S2 + p_I1_I1 + p_I2_S0 + p_I2_S1 + p_I2_S2 + p_I2_I2 + p_I3_S0 + p_I3_S1 + p_I3_S2 + p_I3_I3 + p_I4_S0 + p_I4_S1 + p_I4_S2 + p_I4_I4 + p_I5_S0 + p_I5_S1 + p_I5_S2 + p_I5_I5 + p_I6_S0 + p_I6_S1 + p_I6_S2 + p_I6_I6 + p_I7_S0 + p_I7_S1 + p_I7_S2 + p_I7_I7 + p_I0 + p_I1 + p_I2 + p_I3 + p_I4 + p_I5 + p_I6 + p_I7) def CLB_generate_stoichiometry(): N_toggle_IO = toggle_generate_stoichiometry() N_toggle_I1 = toggle_generate_stoichiometry() N_toggle_I2 = toggle_generate_stoichiometry() N_toggle_I3 = toggle_generate_stoichiometry() N_mux = MUX_4_1_generate_stoichiometry() # skip first four rows (I0, I1, I2, I3) N_mux = N_mux[4:,:] return merge_N(merge_N(merge_N(merge_N(N_toggle_IO, N_toggle_I1), N_toggle_I2), N_toggle_I3), N_mux) def CLB_8_generate_stoichiometry(): N_toggle_IO = toggle_generate_stoichiometry() N_toggle_I1 = toggle_generate_stoichiometry() N_toggle_I2 = toggle_generate_stoichiometry() N_toggle_I3 = toggle_generate_stoichiometry() N_toggle_I4 = toggle_generate_stoichiometry() N_toggle_I5 = toggle_generate_stoichiometry() N_toggle_I6 = toggle_generate_stoichiometry() N_toggle_I7 = toggle_generate_stoichiometry() N_mux = MUX_8_1_generate_stoichiometry() # skip first eight rows (I0, I1, I2, I3, I4, I5, I6, I7) N_mux = N_mux[8:,:] return merge_N(merge_N(merge_N(merge_N(merge_N(merge_N(merge_N(merge_N(N_toggle_IO, N_toggle_I1), N_toggle_I2), N_toggle_I3), N_toggle_I4), N_toggle_I5), N_toggle_I6), N_toggle_I7), N_mux) def CLB_model(state, T, params): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = params """ latches """ ######### # params # set params for symmetric toggle switch topology gamma_L_Y, theta_L_Y = gamma_L_X, theta_L_X n_x, m_y = n_y, m_x eta_y, omega_y = eta_x, omega_x params_toggle = [delta_L, gamma_L_X, gamma_L_Y, n_x, n_y, theta_L_X, theta_L_Y, eta_x, eta_y, omega_x, omega_y, m_x, m_y, delta_x, delta_y, rho_x, rho_y, r_X, r_Y] # degradation rates for induction of switches are specific for each toggle switch params_toggle_I0 = params_toggle.copy() params_toggle_I0[-4:-2] = rho_I0_a, rho_I0_b params_toggle_I1 = params_toggle.copy() params_toggle_I1[-4:-2] = rho_I1_a, rho_I1_b params_toggle_I2 = params_toggle.copy() params_toggle_I2[-4:-2] = rho_I2_a, rho_I2_b params_toggle_I3 = params_toggle.copy() params_toggle_I3[-4:-2] = rho_I3_a, rho_I3_b ######### # states # latch I0 I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b = state[:6] state_toggle_IO = I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b # latch I1 I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b = state[6:12] state_toggle_I1 = I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b # latch I2 I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b = state[12:18] state_toggle_I2 = I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b # latch I3 I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b = state[18:24] state_toggle_I3 = I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b ######### # models dstate_toggle_IO = toggle_model(state_toggle_IO, T, params_toggle_I0) dstate_toggle_I1 = toggle_model(state_toggle_I1, T, params_toggle_I1) dstate_toggle_I2 = toggle_model(state_toggle_I2, T, params_toggle_I2) dstate_toggle_I3 = toggle_model(state_toggle_I3, T, params_toggle_I3) dstate_toggles = np.append( np.append(np.append(dstate_toggle_IO, dstate_toggle_I1, axis=0), dstate_toggle_I2, axis=0), dstate_toggle_I3, axis=0) """ mux """ ######### # params params_mux = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X ######### # state I0, I1, I2, I3 = I0_a, I1_a, I2_a, I3_a state_mux = np.append([I0, I1, I2, I3], state[24:], axis=0) ######## # model dstate_mux = MUX_4_1_model(state_mux, T, params_mux) dstate_mux = dstate_mux[4:] # ignore dI0, dI1, dI2, dI3 """ return """ dstate = np.append(dstate_toggles, dstate_mux, axis=0) return dstate def CLB_model_8(state, T, params): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b, rho_I4_a, rho_I4_b, rho_I5_a, rho_I5_b, rho_I6_a, rho_I6_b, rho_I7_a, rho_I7_b = params """ latches """ ######### # params # set params for symmetric toggle switch topology gamma_L_Y, theta_L_Y = gamma_L_X, theta_L_X n_x, m_y = n_y, m_x eta_y, omega_y = eta_x, omega_x params_toggle = [delta_L, gamma_L_X, gamma_L_Y, n_x, n_y, theta_L_X, theta_L_Y, eta_x, eta_y, omega_x, omega_y, m_x, m_y, delta_x, delta_y, rho_x, rho_y, r_X, r_Y] # degradation rates for induction of switches are specific for each toggle switch params_toggle_I0 = params_toggle.copy() params_toggle_I0[-4:-2] = rho_I0_a, rho_I0_b params_toggle_I1 = params_toggle.copy() params_toggle_I1[-4:-2] = rho_I1_a, rho_I1_b params_toggle_I2 = params_toggle.copy() params_toggle_I2[-4:-2] = rho_I2_a, rho_I2_b params_toggle_I3 = params_toggle.copy() params_toggle_I3[-4:-2] = rho_I3_a, rho_I3_b params_toggle_I4 = params_toggle.copy() params_toggle_I4[-4:-2] = rho_I4_a, rho_I4_b params_toggle_I5 = params_toggle.copy() params_toggle_I5[-4:-2] = rho_I5_a, rho_I5_b params_toggle_I6 = params_toggle.copy() params_toggle_I6[-4:-2] = rho_I6_a, rho_I6_b params_toggle_I7 = params_toggle.copy() params_toggle_I7[-4:-2] = rho_I7_a, rho_I7_b ######### # states # latch I0 I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b = state[:6] state_toggle_IO = I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b # latch I1 I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b = state[6:12] state_toggle_I1 = I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b # latch I2 I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b = state[12:18] state_toggle_I2 = I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b # latch I3 I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b = state[18:24] state_toggle_I3 = I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b # latch I4 I4_L_A, I4_L_B, I4_a, I4_b, I4_N_a, I4_N_b = state[24:30] state_toggle_I4 = I4_L_A, I4_L_B, I4_a, I4_b, I4_N_a, I4_N_b # latch I5 I5_L_A, I5_L_B, I5_a, I5_b, I5_N_a, I5_N_b = state[30:36] state_toggle_I5 = I5_L_A, I5_L_B, I5_a, I5_b, I5_N_a, I5_N_b # latch I6 I6_L_A, I6_L_B, I6_a, I6_b, I6_N_a, I6_N_b = state[36:42] state_toggle_I6 = I6_L_A, I6_L_B, I6_a, I6_b, I6_N_a, I6_N_b # latch I7 I7_L_A, I7_L_B, I7_a, I7_b, I7_N_a, I7_N_b = state[42:48] state_toggle_I7 = I7_L_A, I7_L_B, I7_a, I7_b, I7_N_a, I7_N_b # models dstate_toggle_IO = toggle_model(state_toggle_IO, T, params_toggle_I0) dstate_toggle_I1 = toggle_model(state_toggle_I1, T, params_toggle_I1) dstate_toggle_I2 = toggle_model(state_toggle_I2, T, params_toggle_I2) dstate_toggle_I3 = toggle_model(state_toggle_I3, T, params_toggle_I3) dstate_toggle_I4 = toggle_model(state_toggle_I4, T, params_toggle_I4) dstate_toggle_I5 = toggle_model(state_toggle_I5, T, params_toggle_I5) dstate_toggle_I6 = toggle_model(state_toggle_I6, T, params_toggle_I6) dstate_toggle_I7 = toggle_model(state_toggle_I7, T, params_toggle_I7) dstate_toggles = np.append(np.append(np.append(np.append(np.append(np.append(np.append(dstate_toggle_IO, dstate_toggle_I1, axis=0), dstate_toggle_I2, axis = 0), dstate_toggle_I3, axis = 0), dstate_toggle_I4, axis = 0), dstate_toggle_I5, axis = 0), dstate_toggle_I6, axis = 0), dstate_toggle_I7, axis = 0) """ mux """ ######### # params params_mux = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X # state I0, I1, I2, I3, I4, I5, I6, I7 = I0_a, I1_a, I2_a, I3_a, I4_a, I5_a, I6_a, I7_a state_mux = np.append([I0, I1, I2, I3, I4, I5, I6, I7], state[48:], axis=0) # model dstate_mux = MUX_8_1_model(state_mux, T, params_mux) dstate_mux = dstate_mux[8:] # ignore dI0, dI1, dI2, dI3, dI4, dI5, dI6, dI7 """ return """ dstate = np.append(dstate_toggles, dstate_mux, axis = 0) return dstate def CLB_model_stochastic(state, params, Omega): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = params """ latches """ ######### # params # set params for symmetric toggle switch topology gamma_L_Y, theta_L_Y = gamma_L_X, theta_L_X n_x, m_y = n_y, m_x eta_y, omega_y = eta_x, omega_x params_toggle = [delta_L, gamma_L_X, gamma_L_Y, n_x, n_y, theta_L_X, theta_L_Y, eta_x, eta_y, omega_x, omega_y, m_x, m_y, delta_x, delta_y, rho_x, rho_y, r_X, r_Y] # degradation rates for induction of switches are specific for each toggle switch params_toggle_I0 = params_toggle.copy() params_toggle_I0[-4:-2] = rho_I0_a, rho_I0_b params_toggle_I1 = params_toggle.copy() params_toggle_I1[-4:-2] = rho_I1_a, rho_I1_b params_toggle_I2 = params_toggle.copy() params_toggle_I2[-4:-2] = rho_I2_a, rho_I2_b params_toggle_I3 = params_toggle.copy() params_toggle_I3[-4:-2] = rho_I3_a, rho_I3_b ######### # states # latch I0 I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b = state[:6] state_toggle_IO = I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b # latch I1 I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b = state[6:12] state_toggle_I1 = I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b # latch I2 I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b = state[12:18] state_toggle_I2 = I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b # latch I3 I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b = state[18:24] state_toggle_I3 = I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b ######### # models p_toggle_IO = toggle_model_stochastic(state_toggle_IO, params_toggle_I0, Omega) p_toggle_I1 = toggle_model_stochastic(state_toggle_I1, params_toggle_I1, Omega) p_toggle_I2 = toggle_model_stochastic(state_toggle_I2, params_toggle_I2, Omega) p_toggle_I3 = toggle_model_stochastic(state_toggle_I3, params_toggle_I3, Omega) """ mux """ ######### # params params_mux = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X ######### # state I0, I1, I2, I3 = I0_a, I1_a, I2_a, I3_a state_mux = np.append([I0, I1, I2, I3], state[24:], axis=0) ######## # model p_mux = MUX_4_1_model_stochastic(state_mux, params_mux, Omega) """ return """ return p_toggle_IO + p_toggle_I1 + p_toggle_I2 + p_toggle_I3 + p_mux def CLB_model_8_stochastic(state, params, Omega): delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b, rho_I4_a, rho_I4_b, rho_I5_a, rho_I5_b, rho_I6_a, rho_I6_b, rho_I7_a, rho_I7_b = params """ latches """ ######### # params # set params for symmetric toggle switch topology gamma_L_Y, theta_L_Y = gamma_L_X, theta_L_X n_x, m_y = n_y, m_x eta_y, omega_y = eta_x, omega_x params_toggle = [delta_L, gamma_L_X, gamma_L_Y, n_x, n_y, theta_L_X, theta_L_Y, eta_x, eta_y, omega_x, omega_y, m_x, m_y, delta_x, delta_y, rho_x, rho_y, r_X, r_Y] # degradation rates for induction of switches are specific for each toggle switch params_toggle_I0 = params_toggle.copy() params_toggle_I0[-4:-2] = rho_I0_a, rho_I0_b params_toggle_I1 = params_toggle.copy() params_toggle_I1[-4:-2] = rho_I1_a, rho_I1_b params_toggle_I2 = params_toggle.copy() params_toggle_I2[-4:-2] = rho_I2_a, rho_I2_b params_toggle_I3 = params_toggle.copy() params_toggle_I3[-4:-2] = rho_I3_a, rho_I3_b params_toggle_I4 = params_toggle.copy() params_toggle_I4[-4:-2] = rho_I4_a, rho_I4_b params_toggle_I5 = params_toggle.copy() params_toggle_I5[-4:-2] = rho_I5_a, rho_I5_b params_toggle_I6 = params_toggle.copy() params_toggle_I6[-4:-2] = rho_I6_a, rho_I6_b params_toggle_I7 = params_toggle.copy() params_toggle_I7[-4:-2] = rho_I7_a, rho_I7_b ######### # states # latch I0 I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b = state[:6] state_toggle_IO = I0_L_A, I0_L_B, I0_a, I0_b, I0_N_a, I0_N_b # latch I1 I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b = state[6:12] state_toggle_I1 = I1_L_A, I1_L_B, I1_a, I1_b, I1_N_a, I1_N_b # latch I2 I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b = state[12:18] state_toggle_I2 = I2_L_A, I2_L_B, I2_a, I2_b, I2_N_a, I2_N_b # latch I3 I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b = state[18:24] state_toggle_I3 = I3_L_A, I3_L_B, I3_a, I3_b, I3_N_a, I3_N_b # latch I4 I4_L_A, I4_L_B, I4_a, I4_b, I4_N_a, I4_N_b = state[24:30] state_toggle_I4 = I4_L_A, I4_L_B, I4_a, I4_b, I4_N_a, I4_N_b # latch I5 I5_L_A, I5_L_B, I5_a, I5_b, I5_N_a, I5_N_b = state[30:36] state_toggle_I5 = I5_L_A, I5_L_B, I5_a, I5_b, I5_N_a, I5_N_b # latch I6 I6_L_A, I6_L_B, I6_a, I6_b, I6_N_a, I6_N_b = state[36:42] state_toggle_I6 = I6_L_A, I6_L_B, I6_a, I6_b, I6_N_a, I6_N_b # latch I7 I7_L_A, I7_L_B, I7_a, I7_b, I7_N_a, I7_N_b = state[42:48] state_toggle_I7 = I7_L_A, I7_L_B, I7_a, I7_b, I7_N_a, I7_N_b ######### # models p_toggle_IO = toggle_model_stochastic(state_toggle_IO, params_toggle_I0, Omega) p_toggle_I1 = toggle_model_stochastic(state_toggle_I1, params_toggle_I1, Omega) p_toggle_I2 = toggle_model_stochastic(state_toggle_I2, params_toggle_I2, Omega) p_toggle_I3 = toggle_model_stochastic(state_toggle_I3, params_toggle_I3, Omega) p_toggle_I4 = toggle_model_stochastic(state_toggle_I4, params_toggle_I4, Omega) p_toggle_I5 = toggle_model_stochastic(state_toggle_I5, params_toggle_I5, Omega) p_toggle_I6 = toggle_model_stochastic(state_toggle_I6, params_toggle_I6, Omega) p_toggle_I7 = toggle_model_stochastic(state_toggle_I7, params_toggle_I7, Omega) """ mux """ ######### # params params_mux = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X ######### # state I0, I1, I2, I3, I4, I5, I6, I7 = I0_a, I1_a, I2_a, I3_a, I4_a, I5_a, I6_a, I7_a state_mux = np.append([I0, I1, I2, I3, I4, I5, I6, I7], state[48:], axis=0) ######## # model #p_mux = MUX_8_1_model_stochastic(state_mux, params_mux, Omega) p_mux = MUX_8_1_model_stochastic(state_mux, params_mux, Omega) """ return """ return p_toggle_IO + p_toggle_I1 + p_toggle_I2 + p_toggle_I3 + p_toggle_I4 + p_toggle_I5 + p_toggle_I6 + p_toggle_I7 + p_mux """ wrappers for scipy.integrate.ode """ def toggle_model_ODE(T, state, params): return toggle_model(state, T, params) def not_model_ODE(T, state, params): return not_model(state, T, params) def yes_model_ODE(T, state, params): return yes_model(state, T, params) def MUX_4_1_model_ODE(T, state, params): return MUX_4_1_model(state, T, params) def MUX_8_1_model_ODE(T, state, params): return MUX_8_1_model(state, T, params) def CLB_model_ODE(T, state, params): return CLB_model(state, T, params) def CLB_8_model_ODE(T, state, params): return CLB_model_8(state, T, params)
py
b414777e4c9ca59289ebaf910d919996a08d04b6
# -*- coding: utf-8 -*- # # eso-blog # Copyright (c) 2007-2017, European Southern Observatory (ESO) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of the European Southern Observatory nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY ESO ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE from __future__ import unicode_literals from django.template import engines from djangoplicity.archives.views import GenericDetailView class PostDetailView(GenericDetailView): def render(self, request, model, obj, state, admin_rights, **kwargs): ''' Override render to pre-render the post body as it can contain template tags ''' tpl = engines['django'].from_string(obj.body) obj.body = tpl.render() return super(PostDetailView, self).render(request, model, obj, state, admin_rights, **kwargs)
py
b41478c1c171461d6513648a68745e9b706ba031
def bitcoinToEuros(bitcoin_amount, bitcoin_value_euros): euros_value=bitcoin_amount*bitcoin_value_euros return euros_value bitcoin_to_euros=25000 valor_bitcoin=bitcoinToEuros(1,bitcoin_to_euros) print(valor_bitcoin) if valor_bitcoin<=30000: print("el valor esta por debajo de 30000", valor_bitcoin)
py
b41478faf2a5583639a42e717e4a39a2cb468f99
import rich_click as click # Show the positional arguments click.rich_click.SHOW_ARGUMENTS = True # Uncomment this line to group the arguments together with the options # click.rich_click.GROUP_ARGUMENTS_OPTIONS = True @click.command() @click.argument("input", type=click.Path(), required=True) @click.option( "--type", default="files", show_default=True, help="Type of file to sync", ) @click.option("--all", is_flag=True, help="Sync all the things?") @click.option("--debug/--no-debug", "-d/-n", default=False, help="Enable debug mode") def cli(input, type, all, debug): """ My amazing tool does all the things. This is a minimal example based on documentation from the 'click' package. You can try using --help at the top level and also for specific group subcommands. """ print(f"Debug mode is {'on' if debug else 'off'}") if __name__ == "__main__": cli()
py
b41479e4291026aa8183a4c89f8afe04ee4b3d32
import io import os import zipfile import tarfile import gzip import shutil from functools import partial import torch.utils.data from .utils import RandomShuffler from .example import Example from ..utils import download_from_url, unicode_csv_reader class Dataset(torch.utils.data.Dataset): """Defines a dataset composed of Examples along with its Fields. Attributes: sort_key (callable): A key to use for sorting dataset examples for batching together examples with similar lengths to minimize padding. examples (list(Example)): The examples in this dataset. fields (dict[str, Field]): Contains the name of each column or field, together with the corresponding Field object. Two fields with the same Field object will have a shared vocabulary. """ sort_key = None def __init__(self, examples, fields, filter_pred=None): """Create a dataset from a list of Examples and Fields. Arguments: examples: List of Examples. fields (List(tuple(str, Field))): The Fields to use in this tuple. The string is a field name, and the Field is the associated field. filter_pred (callable or None): Use only examples for which filter_pred(example) is True, or use all examples if None. Default is None. """ if filter_pred is not None: make_list = isinstance(examples, list) examples = filter(filter_pred, examples) if make_list: examples = list(examples) self.examples = examples self.fields = dict(fields) # Unpack field tuples for n, f in list(self.fields.items()): if isinstance(n, tuple): self.fields.update(zip(n, f)) del self.fields[n] @classmethod def splits(cls, path=None, root='.data', train=None, validation=None, test=None, **kwargs): """Create Dataset objects for multiple splits of a dataset. Arguments: path (str): Common prefix of the splits' file paths, or None to use the result of cls.download(root). root (str): Root dataset storage directory. Default is '.data'. train (str): Suffix to add to path for the train set, or None for no train set. Default is None. validation (str): Suffix to add to path for the validation set, or None for no validation set. Default is None. test (str): Suffix to add to path for the test set, or None for no test set. Default is None. Remaining keyword arguments: Passed to the constructor of the Dataset (sub)class being used. Returns: Tuple[Dataset]: Datasets for train, validation, and test splits in that order, if provided. """ if path is None: path = cls.download(root) train_data = None if train is None else cls( os.path.join(path, train), **kwargs) val_data = None if validation is None else cls( os.path.join(path, validation), **kwargs) test_data = None if test is None else cls( os.path.join(path, test), **kwargs) return tuple(d for d in (train_data, val_data, test_data) if d is not None) def split(self, split_ratio=0.7, stratified=False, strata_field='label', random_state=None): """Create train-test(-valid?) splits from the instance's examples. Arguments: split_ratio (float or List of floats): a number [0, 1] denoting the amount of data to be used for the training split (rest is used for validation), or a list of numbers denoting the relative sizes of train, test and valid splits respectively. If the relative size for valid is missing, only the train-test split is returned. Default is 0.7 (for the train set). stratified (bool): whether the sampling should be stratified. Default is False. strata_field (str): name of the examples Field stratified over. Default is 'label' for the conventional label field. random_state (tuple): the random seed used for shuffling. A return value of `random.getstate()`. Returns: Tuple[Dataset]: Datasets for train, validation, and test splits in that order, if the splits are provided. """ train_ratio, test_ratio, val_ratio = check_split_ratio(split_ratio) # For the permutations rnd = RandomShuffler(random_state) if not stratified: train_data, test_data, val_data = rationed_split(self.examples, train_ratio, test_ratio, val_ratio, rnd) else: if strata_field not in self.fields: raise ValueError("Invalid field name for strata_field {}" .format(strata_field)) strata = stratify(self.examples, strata_field) train_data, test_data, val_data = [], [], [] for group in strata: # Stratify each group and add together the indices. group_train, group_test, group_val = rationed_split(group, train_ratio, test_ratio, val_ratio, rnd) train_data += group_train test_data += group_test val_data += group_val splits = tuple(Dataset(d, self.fields) for d in (train_data, val_data, test_data) if d) # In case the parent sort key isn't none if self.sort_key: for subset in splits: subset.sort_key = self.sort_key return splits def __getitem__(self, i): return self.examples[i] def __len__(self): try: return len(self.examples) except TypeError: return 2**32 def __iter__(self): for x in self.examples: yield x def __getattr__(self, attr): if attr in self.fields: for x in self.examples: yield getattr(x, attr) @classmethod def download(cls, root, check=None): """Download and unzip an online archive (.zip, .gz, or .tgz). Arguments: root (str): Folder to download data to. check (str or None): Folder whose existence indicates that the dataset has already been downloaded, or None to check the existence of root/{cls.name}. Returns: str: Path to extracted dataset. """ path = os.path.join(root, cls.name) check = path if check is None else check if not os.path.isdir(check): for url in cls.urls: if isinstance(url, tuple): url, filename = url else: filename = os.path.basename(url) zpath = os.path.join(path, filename) if not os.path.isfile(zpath): if not os.path.exists(os.path.dirname(zpath)): os.makedirs(os.path.dirname(zpath)) print('downloading {}'.format(filename)) download_from_url(url, zpath) zroot, ext = os.path.splitext(zpath) _, ext_inner = os.path.splitext(zroot) if ext == '.zip': with zipfile.ZipFile(zpath, 'r') as zfile: print('extracting') zfile.extractall(path) # tarfile cannot handle bare .gz files elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar': with tarfile.open(zpath, 'r:gz') as tar: dirs = [member for member in tar.getmembers()] tar.extractall(path=path, members=dirs) elif ext == '.gz': with gzip.open(zpath, 'rb') as gz: with open(zroot, 'wb') as uncompressed: shutil.copyfileobj(gz, uncompressed) return os.path.join(path, cls.dirname) class TabularDataset(Dataset): """Defines a Dataset of columns stored in CSV, TSV, or JSON format.""" def __init__(self, path, format, fields, skip_header=False, **kwargs): """Create a TabularDataset given a path, file format, and field list. Arguments: path (str): Path to the data file. format (str): The format of the data file. One of "CSV", "TSV", or "JSON" (case-insensitive). fields (list(tuple(str, Field)) or dict[str: tuple(str, Field)]: If using a list, the format must be CSV or TSV, and the values of the list should be tuples of (name, field). The fields should be in the same order as the columns in the CSV or TSV file, while tuples of (name, None) represent columns that will be ignored. If using a dict, the keys should be a subset of the JSON keys or CSV/TSV columns, and the values should be tuples of (name, field). Keys not present in the input dictionary are ignored. This allows the user to rename columns from their JSON/CSV/TSV key names and also enables selecting a subset of columns to load. skip_header (bool): Whether to skip the first line of the input file. """ format = format.lower() make_example = { 'json': Example.fromJSON, 'dict': Example.fromdict, 'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format] with io.open(os.path.expanduser(path), encoding="utf8") as f: if format == 'csv': reader = unicode_csv_reader(f) elif format == 'tsv': reader = unicode_csv_reader(f, delimiter='\t') else: reader = f if format in ['csv', 'tsv'] and isinstance(fields, dict): if skip_header: raise ValueError('When using a dict to specify fields with a {} file,' 'skip_header must be False and' 'the file must have a header.'.format(format)) header = next(reader) field_to_index = {f: header.index(f) for f in fields.keys()} make_example = partial(make_example, field_to_index=field_to_index) if skip_header: next(reader) examples = [make_example(line, fields) for line in reader] if isinstance(fields, dict): fields, field_dict = [], fields for field in field_dict.values(): if isinstance(field, list): fields.extend(field) else: fields.append(field) super(TabularDataset, self).__init__(examples, fields, **kwargs) def check_split_ratio(split_ratio): """Check that the split ratio argument is not malformed""" valid_ratio = 0. if isinstance(split_ratio, float): # Only the train set relative ratio is provided # Assert in bounds, validation size is zero assert split_ratio > 0. and split_ratio < 1., ( "Split ratio {} not between 0 and 1".format(split_ratio)) test_ratio = 1. - split_ratio return (split_ratio, test_ratio, valid_ratio) elif isinstance(split_ratio, list): # A list of relative ratios is provided length = len(split_ratio) assert length == 2 or length == 3, ( "Length of split ratio list should be 2 or 3, got {}".format(split_ratio)) # Normalize if necessary ratio_sum = sum(split_ratio) if not ratio_sum == 1.: split_ratio = [float(ratio) / ratio_sum for ratio in split_ratio] if length == 2: return tuple(split_ratio + [valid_ratio]) return tuple(split_ratio) else: raise ValueError('Split ratio must be float or a list, got {}' .format(type(split_ratio))) def stratify(examples, strata_field): # The field has to be hashable otherwise this doesn't work # There's two iterations over the whole dataset here, which can be # reduced to just one if a dedicated method for stratified splitting is used unique_strata = set(getattr(example, strata_field) for example in examples) strata_maps = {s: [] for s in unique_strata} for example in examples: strata_maps[getattr(example, strata_field)].append(example) return list(strata_maps.values()) def rationed_split(examples, train_ratio, test_ratio, val_ratio, rnd): # Create a random permutation of examples, then split them # by ratio x length slices for each of the train/test/dev? splits N = len(examples) randperm = rnd(range(N)) train_len = int(round(train_ratio * N)) # Due to possible rounding problems if not val_ratio: test_len = N - train_len else: test_len = int(round(test_ratio * N)) indices = (randperm[:train_len], # Train randperm[train_len:train_len + test_len], # Test randperm[train_len + test_len:]) # Validation # There's a possibly empty list for the validation set data = tuple([examples[i] for i in index] for index in indices) return data
py
b4147afd914519373ed3e04a81576a08c21a0305
#This script will look through the file_repository_new_runs folder for UAVSAR MLC zip files; create a new project structure for each; #and copy the python scripts and move the .mlc.zip files to the appropriate subfolders. Then it will run each script in sequence. #--update: Just download the DEM once, for example with my .ipynb tool. Then put it in docker_new_runs. This script will copy the files over to 0_ann and skip the DEM download ### ----!!! important...need to update the field "file name" in the dem.wgs84.xml each time. Or run fiximagexml import os, shutil, subprocess, time import numpy as np #---user specific input---# simrun = 0 #set to 1 if using simulated NISAR data as input #---end user specify input---# #step0 zerodir = '0_make_ann_dem' zerofn = '0_alos_dem.py' zerofn_dem0 = 'dem.hdr' zerofn_dem1 = 'dem.wgs84' zerofn_dem2 = 'dem.wgs84.vrt' zerofn_dem3 = 'dem.wgs84.xml' #zerofn = '0_make_ann_dem_move_v2.py' #step1 coregdir = '1_getoffs' coregfn = '1_isce_stack.py' #step2 resampdir = '2_coreg' resampfn = '2_resamp_pol.py' #resampfn = '2_resamp_pol_v1.py' #step 3 postprocdir = '3_postprocess_stacks' postprocfn = '3_uavsar_pp_mlc_v2.py' cwd = os.getcwd() os.chdir(cwd) shutil.copy(os.path.join(cwd, '.netrc'), '/work/.netrc') maindir, thisdir = os.path.split(cwd) mlczip = [f for f in os.listdir() if f.endswith('.zip')] #group the mlc.zip by project...those that start with the same two parameters prjlst = [] for numd, vald in enumerate(mlczip): prj = vald.split('_')[0]#+'_'+vald.split('_')[1] prjlst.append(prj) projects = list(set(prjlst)) print('There are %s projects (%s)' % (len(projects), projects)) print(prj) for num, val in enumerate(projects): prjfiles = [f for f in os.listdir() if f.startswith(val)] print('Project %s consists of %s flightlines' % (projects[num], len(prjfiles))) if simrun == 1: prjdir = os.path.join(maindir, projects[num]+'_sim') else: prjdir = os.path.join(maindir, projects[num]) if not os.path.exists(prjdir): os.mkdir(prjdir) os.mkdir(os.path.join(prjdir,zerodir)) os.mkdir(os.path.join(prjdir,coregdir)) os.mkdir(os.path.join(prjdir,resampdir)) os.mkdir(os.path.join(prjdir,postprocdir)) shutil.copy(os.path.join(cwd, zerofn), os.path.join(prjdir, zerodir, zerofn)) shutil.copy(os.path.join(cwd, zerofn_dem0), os.path.join(prjdir, zerodir, zerofn_dem0)) shutil.copy(os.path.join(cwd, zerofn_dem1), os.path.join(prjdir, zerodir, zerofn_dem1)) shutil.copy(os.path.join(cwd, zerofn_dem2), os.path.join(prjdir, zerodir, zerofn_dem2)) shutil.copy(os.path.join(cwd, zerofn_dem3), os.path.join(prjdir, zerodir, zerofn_dem3)) shutil.copy(os.path.join(cwd, coregfn), os.path.join(prjdir, coregdir, coregfn)) shutil.copy(os.path.join(cwd, resampfn), os.path.join(prjdir, resampdir, resampfn)) shutil.copy(os.path.join(cwd, postprocfn), os.path.join(prjdir, postprocdir, postprocfn)) for numd, vald in enumerate(prjfiles): shutil.move(os.path.join(cwd, vald), os.path.join(prjdir, zerodir, vald)) t0 = time.time() print('Running %s for project %s' % (zerofn, val)) os.chdir(os.path.join(prjdir, zerodir)) aa = os.getcwd() subprocess.call([os.path.join(aa,zerofn)]) t1 = time.time() print('Running %s for project %s' % (coregfn, val)) os.chdir(os.path.join(prjdir, coregdir)) aa = os.getcwd() subprocess.call([os.path.join(aa, coregfn)]) t2 = time.time() print('Running %s for project %s' % (resampfn, val)) os.chdir(os.path.join(prjdir, resampdir)) aa = os.getcwd() subprocess.call([os.path.join(aa,resampfn)]) t3 = time.time() print('Running %s for project %s' % (postprocfn, val)) os.chdir(os.path.join(prjdir, postprocdir)) aa = os.getcwd() subprocess.call([os.path.join(aa, postprocfn)]) t4 = time.time() print('0_make_ann_dem took %s seconds' %(np.round(t1-t0,3))) print('1_getoffs took %s seconds' %(np.round(t2-t1,3))) print('2_coreg took %s seconds' %(np.round(t3-t2,3))) print('3_postprocess took %s seconds' %(np.round(t4-t3,3))) print('Processing took %s seconds' %(np.round(t4-t0,3))) os.chdir(cwd)
py
b4147b47405305cb536aab332cae36860af04154
def GetPath(args): return args[1]
py
b4147ceff8c6fa88d2af1bcaa8f1fdb084a537e7
#!/usr/bin/env python """ The cat module allows you to execute an insights datasource and write its output to stdout. A string representation of the datasource is written to stderr before the output. >>> insights-cat hostname CommandOutputProvider("/usr/bin/hostname -f") alonzo Pass -q if you want only the datasource information. >>> insights-cat -q ethtool CommandOutputProvider("/sbin/ethtool docker0") CommandOutputProvider("/sbin/ethtool enp0s31f6") CommandOutputProvider("/sbin/ethtool lo") CommandOutputProvider("/sbin/ethtool tun0") CommandOutputProvider("/sbin/ethtool virbr0") CommandOutputProvider("/sbin/ethtool virbr0-nic") CommandOutputProvider("/sbin/ethtool wlp3s0") """ from __future__ import print_function import argparse import logging import os import sys import yaml from contextlib import contextmanager import colorama as C from insights import apply_configs, create_context, dr, extract, HostContext from insights.core.spec_factory import ContentProvider C.init() def parse_args(): p = argparse.ArgumentParser("Insights spec runner.") p.add_argument("-c", "--config", help="Configure components.") p.add_argument("-p", "--plugins", default="", help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") p.add_argument("-q", "--quiet", action="store_true", help="Only show commands or paths.") p.add_argument("--no-header", action="store_true", help="Don't print command or path headers.") p.add_argument("-D", "--debug", action="store_true", help="Show debug level information.") p.add_argument("spec", nargs=1, help="Spec to dump.") p.add_argument("archive", nargs="?", help="Archive or directory to analyze.") return p.parse_args() def configure_logging(debug): if debug: logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) def parse_plugins(raw): for path in raw.split(","): path = path.strip() if path.endswith(".py"): path, _ = os.path.splitext(path) path = path.rstrip("/").replace("/", ".") yield path def load_default_plugins(): for f in ["default", "insights_archive", "sos_archive", "jdr_archive"]: dr.load_components("insights.specs.%s" % f, continue_on_error=False) def load_plugins(raw): if raw: for p in parse_plugins(raw): dr.load_components(p, continue_on_error=False) def configure(config): if config: with open(config) as f: apply_configs(yaml.safe_load(f)) def get_spec(fqdn): if "." not in fqdn: fqdn = "insights.specs.Specs.%s" % fqdn return dr.get_component(fqdn) @contextmanager def create_broker(root=None): if not root: broker = dr.Broker() broker[HostContext] = HostContext() yield broker else: def from_dir(d): broker = dr.Broker() ctx = create_context(d, None) broker[ctx.__class__] = ctx return broker if os.path.isdir(root): yield from_dir(root) else: with extract(root) as ex: yield from_dir(ex.tmp_dir) def dump_spec(value, quiet=False, no_header=False): if not value: return value = value if isinstance(value, list) else [value] for v in value: if not no_header: vname = str(v) if isinstance(v, ContentProvider) else "Raw Data" print(C.Fore.BLUE + vname + C.Style.RESET_ALL, file=sys.stderr) if not quiet: if isinstance(v, ContentProvider): for d in v.stream(): print(d) else: print(v) def dump_error(spec, broker): if spec in broker.exceptions: for ex in broker.exceptions[spec]: print(broker.tracebacks[ex], file=sys.stderr) if spec in broker.missing_requirements: missing = broker.missing_requirements[spec] required = missing[0] at_least_one = missing[1] buf = sys.stderr print("Missing Dependencies:", file=buf) if required: print(" Requires:", file=buf) for d in required: print(" %s" % dr.get_name(d), file=buf) if at_least_one: for alo in at_least_one: print(" At Least One Of:", file=buf) for d in alo: print(" %s" % dr.get_name(d), file=buf) def run(spec, archive=None, quiet=False, no_header=False): with create_broker(archive) as broker: value = dr.run(spec, broker=broker).get(spec) if value: dump_spec(value, quiet=quiet, no_header=no_header) else: dump_error(spec, broker) return sys.exit(1) def main(): args = parse_args() configure_logging(args.debug) load_default_plugins() load_plugins(args.plugins) configure(args.config) spec = get_spec(args.spec[0]) if not spec: print("Spec not found: %s" % args.spec[0], file=sys.stderr) sys.exit(1) run(spec, archive=args.archive, quiet=args.quiet, no_header=args.no_header) if __name__ == "__main__": main()
py
b4147dcd3851a52f5a1a9319a6986519f66ac00b
#!/usr/bin/env python3 # Copyright 2018 Johns Hopkins University (author: Ashish Arora) # Apache 2.0 """ This module will be used for creating text localization mask on page image. Given the word segmentation (bounding box around a word) for every word, it will extract line segmentation. To extract line segmentation, it will take word bounding boxes of a line as input, will create a minimum area bounding box that will contain all corner points of word bounding boxes. The obtained bounding box (will not necessarily be vertically or horizontally aligned). """ import xml.dom.minidom as minidom from waldo.data_manipulation import * from waldo.core_config import CoreConfig from waldo.mar_utils import compute_hull from scipy.spatial import ConvexHull from waldo.data_transformation import scale_down_image_with_objects, \ make_square_image_with_padding def get_mask_from_page_image(madcat_file_path, image_file_name, max_size): """ Given a page image, extracts the page image mask from it. Input ----- image_file_name (string): complete path and name of the page image. madcat_file_path (string): complete path and name of the madcat xml file corresponding to the page image. """ objects = _get_bounding_box(madcat_file_path) img = Image.open(image_file_name).convert("RGB") im_arr = np.array(img) config = CoreConfig() config.num_colors = 3 image_with_objects = { 'img': im_arr, 'objects': objects } im_height = im_arr.shape[0] im_width = im_arr.shape[1] validated_objects = [] for original_object in image_with_objects['objects']: ordered_polygon_points = original_object['polygon'] object = {} resized_pp = [] for point in ordered_polygon_points: new_point = _validate_and_update_point(point, im_width, im_height) resized_pp.append(new_point) object['polygon'] = resized_pp validated_objects.append(object) validated_image_with_objects = { 'img': im_arr, 'objects': validated_objects } scaled_image_with_objects = scale_down_image_with_objects(validated_image_with_objects, config, max_size) img_padded = make_square_image_with_padding(scaled_image_with_objects['img'], 3, 255) padded_image_with_objects = { 'img': img_padded, 'objects': scaled_image_with_objects['objects'] } y = convert_to_mask(padded_image_with_objects, config) return y def _get_bounding_box(madcat_file_path): """ Given word boxes of each line, return bounding box for each line in sorted order Input ----- image_file_name (string): complete path and name of the page image. madcat_file_path (string): complete path and name of the madcat xml file corresponding to the page image. """ objects = [] doc = minidom.parse(madcat_file_path) zone = doc.getElementsByTagName('zone') for node in zone: object = {} token_image = node.getElementsByTagName('token-image') mbb_input = [] for token_node in token_image: word_point = token_node.getElementsByTagName('point') for word_node in word_point: word_coordinate = (int(word_node.getAttribute('x')), int(word_node.getAttribute('y'))) mbb_input.append(word_coordinate) points = get_minimum_bounding_box(mbb_input) points = tuple(points) points_ordered = [points[index] for index in ConvexHull(points).vertices] object['polygon'] = points_ordered objects.append(object) return objects def _validate_and_update_point(pt0, im_width, im_height, pt1=(0, 0)): new_point = pt0 if pt0[0] < 0: new_point = _get_pointx_inside_origin(pt0, pt1) if pt0[0] > im_width: new_point = _get_pointx_inside_width(pt0, pt1, im_width) if pt0[1] < 0: new_point = _get_pointy_inside_origin(pt0, pt1) if pt0[1] > im_height: new_point = _get_pointy_inside_height(pt0, pt1, im_height) return new_point def _get_pointx_inside_origin(pt0, pt1): """ Given a point pt0, return an updated point that is inside orgin. It finds line equation and uses it to get updated point x value inside origin Returns ------- (float, float): updated point """ return (0, pt0[1]) # TODO def _get_pointx_inside_width(pt0, pt1, im_width): """ Given a point pt0, return an updated point that is inside image width. It finds line equation and uses it to get updated point x value inside image width Returns ------- (float, float): updated point """ return (im_width, pt0[1]) # TODO def _get_pointy_inside_origin(pt0, pt1): """ Given a point pt0, return an updated point that is inside orgin. It finds line equation and uses it to get updated point y value inside origin Returns ------- (float, float): updated point """ return (pt0[0], 0) # TODO def _get_pointy_inside_height(pt0, pt1, im_height): """ Given a point pt0, return an updated point that is inside image height. It finds line equation and uses it to get updated point y value inside image height Returns ------- (float, float): updated point """ return (pt0[0], im_height) # TODO
py
b4147ee0c6cef89483a4173c114ea146db8f8662
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class UsersConfig(AppConfig): name = "kimza_tech.users" verbose_name = _("Users") def ready(self): try: import kimza_tech.users.signals # noqa F401 except ImportError: pass
py
b4148041af7a55ccc621e64a9ca3db2d8dd1f8bd
def recurse(x): if x == 0: return recurse(x-1) while True: recurse(20)
py
b41480836d23a7f61ba6db9091f84164dcc976ce
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*- # vim: tabstop=2:shiftwidth=2:noexpandtab # kate: tab-width 2; replace-tabs off; indent-width 2; # # ============================================================================== # Authors: Patrick Lehmann # Martin Zabel # # Python Main Module: Entry point to the testbench tools in PoC repository. # # Description: # ------------------------------------ # This is a python main module (executable) which: # - runs automated testbenches, # - ... # # License: # ============================================================================== # Copyright 2007-2016 Technische Universitaet Dresden - Germany # Chair of VLSI-Design, Diagnostics and Architecture # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,default # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # load dependencies from argparse import RawDescriptionHelpFormatter from configparser import Error as ConfigParser_Error, DuplicateOptionError from datetime import datetime from os import environ from pathlib import Path from platform import system as platform_system from shutil import copy as shutil_copy from sys import argv as sys_argv from textwrap import dedent from Compiler import CompilerException, CompileSteps from Base.Exceptions import ExceptionBase, CommonException, PlatformNotSupportedException, EnvironmentException, NotConfiguredException from Base.Logging import ILogable, Logger, Severity from Base.Project import VHDLVersion from Compiler.LSECompiler import Compiler as LSECompiler from Compiler.QuartusCompiler import Compiler as MapCompiler from Compiler.ISECompiler import Compiler as ISECompiler from Compiler.XCICompiler import Compiler as XCICompiler from Compiler.XCOCompiler import Compiler as XCOCompiler from Compiler.XSTCompiler import Compiler as XSTCompiler from Compiler.VivadoCompiler import Compiler as VivadoCompiler from DataBase import Query from DataBase.Config import Board from DataBase.Entity import NamespaceRoot, FQN, EntityTypes, WildCard, TestbenchKind, NetlistKind from DataBase.Solution import Repository from Simulator import Simulator as BaseSimulator, SimulatorException, SimulationSteps from Simulator.ActiveHDLSimulator import Simulator as ActiveHDLSimulator from Simulator.CocotbSimulator import Simulator as CocotbSimulator from Simulator.GHDLSimulator import Simulator as GHDLSimulator from Simulator.ISESimulator import Simulator as ISESimulator from Simulator.QuestaSimulator import Simulator as QuestaSimulator from Simulator.VivadoSimulator import Simulator as VivadoSimulator from ToolChains import ToolChainException, Configurator, ConfigurationException from ToolChains.GHDL import Configuration as GHDLConfiguration from lib.pyAttribute.ArgParseAttributes import ArgParseMixin from lib.pyAttribute.ArgParseAttributes import CommandAttribute, CommandGroupAttribute, ArgumentAttribute, SwitchArgumentAttribute, DefaultAttribute from lib.pyAttribute.ArgParseAttributes import CommonArgumentAttribute, CommonSwitchArgumentAttribute from lib.ExtendedConfigParser import ExtendedConfigParser from lib.Functions import Init, Exit from lib.Parser import ParserException from lib.pyAttribute import Attribute __author__ = "Patrick Lehmann, Martin Zabel" __copyright__ = "Copyright 2007-2016 Technische Universitaet Dresden - Germany, Chair of VLSI-Design, Diagnostics and Architecture" __maintainer__ = "Patrick Lehmann" __email__ = "[email protected]" __version__ = "1.1.0" __status__ = "Production" __license__ = "Apache License 2.0" __api__ = [ 'PoCEntityAttribute', 'BoardDeviceAttributeGroup', 'VHDLVersionAttribute', 'SimulationStepsAttribute', 'CompileStepsAttribute', 'PileOfCores', 'main' ] __all__ = __api__ class PoCEntityAttribute(Attribute): def __call__(self, func): self._AppendAttribute(func, ArgumentAttribute(metavar="PoC Entity", dest="FQN", type=str, nargs='+', help="A space separated list of PoC entities.")) return func class BoardDeviceAttributeGroup(Attribute): def __call__(self, func): self._AppendAttribute(func, ArgumentAttribute("--device", metavar="DeviceName", dest="DeviceName", help="The target platform's device name.")) self._AppendAttribute(func, ArgumentAttribute("--board", metavar="BoardName", dest="BoardName", help="The target platform's board name.")) return func class VHDLVersionAttribute(Attribute): def __call__(self, func): self._AppendAttribute(func, ArgumentAttribute("--std", metavar="VHDLVersion", dest="VHDLVersion", help="Simulate with VHDL-??")) return func class SimulationStepsAttribute(Attribute): def __call__(self, func): self._AppendAttribute(func, SwitchArgumentAttribute("-g", "--gui", dest="GUIMode", help="Run all steps (prepare, analysis, elaboration, optimization, simulation) and finally display the waveform in a GUI window.")) self._AppendAttribute(func, SwitchArgumentAttribute("-a", "--analyze", dest="Analyze", help="Run only the prepare and analysis step.")) self._AppendAttribute(func, SwitchArgumentAttribute("-e", "--elaborate", dest="Elaborate", help="Run only the prepare and elaboration step.")) # self._AppendAttribute(func, SwitchArgumentAttribute("-c", "--compile", dest="Compile", help="Run only the prepare and compile step.")) # self._AppendAttribute(func, SwitchArgumentAttribute("-o", "--optimize", dest="Optimize", help="Run only the prepare and optimization step.")) self._AppendAttribute(func, SwitchArgumentAttribute("-C", "--recompile", dest="Recompile", help="Run all compile steps (prepare, analysis, elaboration, optimization).")) self._AppendAttribute(func, SwitchArgumentAttribute("-s", "--simulate", dest="Simulate", help="Run only the prepare and simulation step.")) self._AppendAttribute(func, SwitchArgumentAttribute("-w", "--showwave", dest="ShowWave", help="Run only the prepare step and display the waveform in a GUI window.")) self._AppendAttribute(func, SwitchArgumentAttribute("-W", "--review", dest="Review", help="Run only display the waveform in a GUI window.")) self._AppendAttribute(func, SwitchArgumentAttribute("-S", "--resimulate", dest="Resimulate", help="Run all simulation steps (prepare, simulation) and finally display the waveform in a GUI window.")) self._AppendAttribute(func, SwitchArgumentAttribute("-r", "--showreport", dest="ShowReport", help="Show a simulation report.")) # self._AppendAttribute(func, SwitchArgumentAttribute( "--cleanup-after", dest="CleanUpAfter", help="Don't delete intermediate files. Skip post-delete rules.")) return func class CompileStepsAttribute(Attribute): def __call__(self, func): self._AppendAttribute(func, SwitchArgumentAttribute("-s", "--synthesize", dest="Synthesize", help="Run only the prepare and synthesize step.")) # merge # place # route # bitfile self._AppendAttribute(func, SwitchArgumentAttribute("-r", "--showreport", dest="ShowReport", help="Show a simulation report.")) self._AppendAttribute(func, SwitchArgumentAttribute( "--no-cleanup", dest="NoCleanUp", help="Don't delete intermediate files. Skip post-delete rules.")) return func class PileOfCores(ILogable, ArgParseMixin): HeadLine = "The PoC-Library - Service Tool" # configure hard coded variables here __CONFIGFILE_DIRECTORY = "py" __CONFIGFILE_PRIVATE = "config.private.ini" __CONFIGFILE_DEFAULTS = "config.defaults.ini" __CONFIGFILE_BOARDS = "config.boards.ini" __CONFIGFILE_STRUCTURE = "config.structure.ini" __CONFIGFILE_IPCORES = "config.entity.ini" # load platform information (Windows, Linux, Darwin, ...) __PLATFORM = platform_system() # records class __Directories__: """Data structure for all main directories. WORKAROUND: All members are initialized with empty :py:class:`pathlib.Path` instances, until Python 3.6 with type hints gets the default Python version. """ Working = Path() Root = Path() ConfigFiles = Path() Solution = Path() Project = Path() Source = Path() Testbench = Path() Netlist = Path() Temp = Path() PreCompiled = Path() class __ConfigFiles__: """Data structure for all configuration files. WORKAROUND: All members are initialized with empty :py:class:`pathlib.Path` instances, until Python 3.6 with type hints gets the default Python version. """ Private = Path() Defaults = Path() Boards = Path() Structure = Path() IPCores = Path() Solution = Path() Project = Path() def __init__(self, debug, verbose, quiet, dryRun, sphinx=False): # Call the initializer of ILogable # -------------------------------------------------------------------------- if quiet: severity = Severity.Quiet elif debug: severity = Severity.Debug elif verbose: severity = Severity.Verbose else: severity = Severity.Normal logger = Logger(severity, printToStdOut=True) ILogable.__init__(self, logger=logger) # Call the constructor of the ArgParseMixin # -------------------------------------------------------------------------- description = dedent("""\ This is the PoC-Library Service Tool. """) epilog = "Pile-of-Cores" class HelpFormatter(RawDescriptionHelpFormatter): def __init__(self, *args, **kwargs): kwargs['max_help_position'] = 25 super().__init__(*args, **kwargs) ArgParseMixin.__init__(self, description=description, epilog=epilog, formatter_class=HelpFormatter, add_help=False) if sphinx: return # Do some basic checks self.__CheckEnvironment() # declare members # -------------------------------------------------------------------------- self.__dryRun = dryRun self.__pocConfig = None self.__root = None self.__repo = None self.__directories = {} self.__SimulationDefaultVHDLVersion = BaseSimulator.VHDL_VERSION self.__SimulationDefaultBoard = None self._directories = self.__Directories__() self._directories.Working = Path.cwd() self._directories.Root = Path(environ.get('PoCRootDirectory')) self._directories.ConfigFiles = self.Directories.Root / self.__CONFIGFILE_DIRECTORY self._configFiles = self.__ConfigFiles__() self._configFiles.Private = self.Directories.ConfigFiles / self.__CONFIGFILE_PRIVATE self._configFiles.Defaults = self.Directories.ConfigFiles / self.__CONFIGFILE_DEFAULTS self._configFiles.Boards = self.Directories.ConfigFiles / self.__CONFIGFILE_BOARDS self._configFiles.Structure = self.Directories.ConfigFiles / self.__CONFIGFILE_STRUCTURE self._configFiles.IPCores = self.Directories.ConfigFiles / self.__CONFIGFILE_IPCORES self.__pocConfig = ExtendedConfigParser() self.__pocConfig.optionxform = str # class properties # ============================================================================ @property def Platform(self): return self.__PLATFORM @property def DryRun(self): return self.__dryRun @property def Directories(self): return self._directories @property def ConfigFiles(self): return self._configFiles @property def PoCConfig(self): return self.__pocConfig @property def Root(self): return self.__root @property def Repository(self): return self.__repo def __CheckEnvironment(self): if (self.Platform not in ["Windows", "Linux", "Darwin"]): raise PlatformNotSupportedException(self.Platform) if (environ.get('PoCRootDirectory') is None): raise EnvironmentException("Shell environment does not provide 'PoCRootDirectory' variable.") # read PoC configuration # ============================================================================ def __ReadPoCConfiguration(self): self.LogVerbose("Reading configuration files...") configFiles = [ (self.ConfigFiles.Private, "private"), (self.ConfigFiles.Defaults, "defaults"), (self.ConfigFiles.Boards, "boards"), (self.ConfigFiles.Structure, "structure"), (self.ConfigFiles.IPCores, "IP core") ] # create parser instance self.LogDebug("Reading PoC configuration from:") try: # process first file (private) file, name = configFiles[0] self.LogDebug(" {0!s}".format(file)) if not file.exists(): raise NotConfiguredException("PoC's {0} configuration file '{1!s}' does not exist.".format(name, file)) from FileNotFoundError(str(file)) self.__pocConfig.read(str(file)) for file, name in configFiles[1:]: self.LogDebug(" {0!s}".format(file)) if not file.exists(): raise ConfigurationException("PoC's {0} configuration file '{1!s}' does not exist.".format(name, file)) from FileNotFoundError(str(file)) self.__pocConfig.read(str(file)) except DuplicateOptionError as ex: raise ConfigurationException("Error in configuration file '{0!s}'.".format(file)) from ex # check PoC installation directory if (self.Directories.Root != Path(self.PoCConfig['INSTALL.PoC']['InstallationDirectory'])): raise NotConfiguredException("There is a mismatch between PoCRoot and PoC's installation directory.") # parsing values into class fields configSection = self.__pocConfig['CONFIG.DirectoryNames'] self.Directories.Source = self.Directories.Root / configSection['HDLSourceFiles'] self.Directories.Testbench = self.Directories.Root / configSection['TestbenchFiles'] self.Directories.NetList = self.Directories.Root / configSection['NetlistFiles'] self.Directories.Temp = self.Directories.Root / configSection['TemporaryFiles'] self.Directories.PreCompiled = self.Directories.Root / configSection['PrecompiledFiles'] # Initialize the default board (GENERIC) self.__SimulationDefaultBoard = Board(self) # Initialize PoC's namespace structure self.__root = NamespaceRoot(self) self.__repo = Repository(self) def __BackupPoCConfiguration(self): now = datetime.now() backupFile = self._configFiles.Private.with_suffix(".{datetime}.ini".format(datetime=now.strftime("%Y.%m.%d-%H.%M.%S"))) self.LogVerbose("Copying old configuration file to '{0!s}'.".format(backupFile, **Init.Foreground)) self.LogDebug("cp {0!s} {1!s}".format(self._configFiles.Private, backupFile)) try: shutil_copy(str(self._configFiles.Private), str(backupFile), follow_symlinks=True) except OSError as ex: raise ConfigurationException("Error while copying '{0!s}'.".format(self._configFiles.Private)) from ex def __WritePoCConfiguration(self): for sectionName in [sectionName for sectionName in self.__pocConfig if not (sectionName.startswith("INSTALL") or sectionName.startswith("SOLUTION"))]: self.__pocConfig.remove_section(sectionName) self.__pocConfig.remove_section("SOLUTION.DEFAULTS") # Writing configuration to disc self.LogNormal("{GREEN}Writing configuration file to '{0!s}'.{NOCOLOR}".format(self._configFiles.Private, **Init.Foreground)) with self._configFiles.Private.open('w') as configFileHandle: self.PoCConfig.write(configFileHandle) def SaveAndReloadPoCConfiguration(self): self.__WritePoCConfiguration() self.__pocConfig.clear() self.__ReadPoCConfiguration() def __PrepareForConfiguration(self): self.__ReadPoCConfiguration() def __PrepareForSimulation(self): self.LogNormal("Initializing PoC-Library Service Tool for simulations") self.__ReadPoCConfiguration() def __PrepareForSynthesis(self): self.LogNormal("Initializing PoC-Library Service Tool for synthesis") self.__ReadPoCConfiguration() # ============================================================================ # Common commands # ============================================================================ # common arguments valid for all commands # ---------------------------------------------------------------------------- @CommonSwitchArgumentAttribute("-D", dest="DEBUG", help="Enable script wrapper debug mode. See also :option:`poc.ps1 -D`.") @CommonSwitchArgumentAttribute( "--dryrun", dest="DryRun", help="Don't execute external programs.") @CommonSwitchArgumentAttribute("-d", "--debug", dest="debug", help="Enable debug mode.") @CommonSwitchArgumentAttribute("-v", "--verbose", dest="verbose", help="Print out detailed messages.") @CommonSwitchArgumentAttribute("-q", "--quiet", dest="quiet", help="Reduce messages to a minimum.") @CommonArgumentAttribute("--sln", metavar="SolutionID", dest="SolutionID", help="Solution name.") @CommonArgumentAttribute("--prj", metavar="ProjectID", dest="ProjectID", help="Project name.") def Run(self): ArgParseMixin.Run(self) def PrintHeadline(self): self.LogNormal("{HEADLINE}{line}{NOCOLOR}".format(line="="*80, **Init.Foreground)) self.LogNormal("{HEADLINE}{headline: ^80s}{NOCOLOR}".format(headline=self.HeadLine, **Init.Foreground)) self.LogNormal("{HEADLINE}{line}{NOCOLOR}".format(line="="*80, **Init.Foreground)) # ---------------------------------------------------------------------------- # fallback handler if no command was recognized # ---------------------------------------------------------------------------- @DefaultAttribute() def HandleDefault(self, _): self.PrintHeadline() # print("Common arguments:") # for funcname,func in CommonArgumentAttribute.GetMethods(self): # for comAttribute in CommonArgumentAttribute.GetAttributes(func): # print(" {0} {1}".format(comAttribute.Args, comAttribute.KWArgs['help'])) # # self.__mainParser.add_argument(*(comAttribute.Args), **(comAttribute.KWArgs)) # # for funcname,func in CommonSwitchArgumentAttribute.GetMethods(self): # for comAttribute in CommonSwitchArgumentAttribute.GetAttributes(func): # print(" {0} {1}".format(comAttribute.Args, comAttribute.KWArgs['help'])) self.MainParser.print_help() Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "help" command # ---------------------------------------------------------------------------- @CommandAttribute("help", help="help help") @ArgumentAttribute(metavar="Command", dest="Command", type=str, nargs="?", help="Print help page(s) for a command.") def HandleHelp(self, args): self.PrintHeadline() if (args.Command is None): self.MainParser.print_help() Exit.exit() elif (args.Command == "help"): print("This is a recursion ...") else: self.SubParsers[args.Command].print_help() Exit.exit() # ============================================================================ # Configuration commands # ============================================================================ # create the sub-parser for the "configure" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") # mccabe:disable=MC0001 @CommandAttribute("configure", help="Configure vendor tools for PoC.") @ArgumentAttribute(metavar="ToolChain", dest="ToolChain", type=str, nargs="?", help="Specify a tool chain to be configured.") @SwitchArgumentAttribute("--set-default-tools", dest="SetDefaultTools", help="Set default tool for a tool chain.") def HandleConfiguration(self, args): """Handle 'configure' command.""" self.PrintHeadline() if (self.Platform not in ["Darwin", "Linux", "Windows"]): raise PlatformNotSupportedException(self.Platform) # load existing configuration or create a new one try: self.__ReadPoCConfiguration() self.__BackupPoCConfiguration() configurator = Configurator(self) configurator.UpdateConfiguration() except NotConfiguredException: self.LogWarning("No private configuration found. Generating an empty PoC configuration...") configurator = Configurator(self) configurator.InitializeConfiguration() if (args.SetDefaultTools is True): configurator.ConfigureDefaultTools() else: toolChain = args.ToolChain if (toolChain is None): configurator.ConfigureAll() else: configurator.ConfigureTool(toolChain) if (self.Logger.LogLevel is Severity.Debug): self.LogDebug("Dumping PoCConfig...") self.LogDebug("-" * 40) for sectionName in self.__pocConfig.sections(): if (not sectionName.startswith("INSTALL")): continue self.LogDebug("[{0}]".format(sectionName)) configSection = self.__pocConfig[sectionName] for optionName in configSection: try: optionValue = configSection[optionName] except Exception: optionValue = "-- ERROR --" self.LogDebug("{0} = {1}".format(optionName, optionValue), indent=3) self.LogDebug("-" * 40) # ---------------------------------------------------------------------------- # create the sub-parser for the "add-solution" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") @CommandAttribute("add-solution", help="Add a solution to PoC.", description=dedent("""\ Add a solution to PoC. """)) def HandleAddSolution(self, _): #args self.PrintHeadline() self.__PrepareForConfiguration() self.LogNormal("Register a new solutions in PoC") self.LogNormal("Solution name: ", indent=1) solutionName = input() if (solutionName == ""): raise ConfigurationException("Empty input. Aborting!") self.LogNormal("Solution id: ", indent=1) solutionID = input() if (solutionID == ""): raise ConfigurationException("Empty input. Aborting!") if (solutionID in self.__repo): raise ConfigurationException("Solution ID is already used.") self.LogNormal("Solution path: ", indent=1) solutionRootPath = input() if (solutionRootPath == ""): raise ConfigurationException("Empty input. Aborting!") solutionRootPath = Path(solutionRootPath) if (not solutionRootPath.exists()): self.LogNormal("Path does not exists. Should it be created? [{CYAN}Y{NOCOLOR}/n]: ".format(**Init.Foreground), appendLinebreak=False) createPath = input() createPath = createPath if createPath != "" else "Y" if (createPath in ['n', 'N']): raise ConfigurationException("Cannot continue to register the new project, because '{0!s}' does not exist.".format(solutionRootPath)) elif (createPath not in ['y', 'Y']): raise ConfigurationException("Unsupported choice '{0}'".format(createPath)) try: solutionRootPath.mkdir(parents=True) except OSError as ex: raise ConfigurationException("Error while creating '{0!s}'.".format(solutionRootPath)) from ex self.__repo.AddSolution(solutionID, solutionName, solutionRootPath) self.__WritePoCConfiguration() self.LogNormal("Solution {GREEN}successfully{NOCOLOR} created.".format(**Init.Foreground)) # ---------------------------------------------------------------------------- # create the sub-parser for the "list-solution" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") @CommandAttribute("list-solution", help="List all solutions registered in PoC.", description=dedent("""\ List all solutions registered in PoC. """)) def HandleListSolution(self, _): #args self.PrintHeadline() self.__PrepareForConfiguration() self.LogNormal("Registered solutions in PoC:") if self.__repo.Solutions: for solution in self.__repo.Solutions: self.LogNormal(" {id: <10}{name}".format(id=solution.ID, name=solution.Name)) if (self.Logger.LogLevel <= Severity.Verbose): self.LogVerbose(" Path: {path!s}".format(path=solution.Path)) self.LogVerbose(" Projects:") for project in solution.Projects: self.LogVerbose(" {id: <6}{name}".format(id=project.ID, name=project.Name)) else: self.LogNormal(" {RED}No registered solutions found.{NOCOLOR}".format(**Init.Foreground)) # ---------------------------------------------------------------------------- # create the sub-parser for the "remove-solution" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") @CommandAttribute("remove-solution", help="Remove a solution from PoC.", description=dedent("""\ Remove a solution from PoC. """)) @ArgumentAttribute(metavar="SolutionID", dest="SolutionID", type=str, help="Solution name.") def HandleRemoveSolution(self, args): self.PrintHeadline() self.__PrepareForConfiguration() solution = self.__repo[args.SolutionID] self.LogNormal("Removing solution '{0}'.".format(solution.Name)) remove = input("Do you really want to remove this solution? [N/y]: ") remove = remove if remove != "" else "N" if (remove in ['n', 'N']): raise ConfigurationException("Operation canceled.") elif (remove not in ['y', 'Y']): raise ConfigurationException("Unsupported choice '{0}'".format(remove)) self.__repo.RemoveSolution(solution) self.__WritePoCConfiguration() self.LogNormal("Solution {GREEN}successfully{NOCOLOR} removed.".format(**Init.Foreground)) # ---------------------------------------------------------------------------- # create the sub-parser for the "add-project" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("add-project", help="Add a project to PoC.") # def HandleAddProject(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "list-project" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") @CommandAttribute("list-project", help="List all projects registered in PoC.", description=dedent("""\ List all projects registered in PoC. """)) def HandleListProject(self, args): self.PrintHeadline() self.__PrepareForConfiguration() if (args.SolutionID is None): raise ConfigurationException("Missing command line argument '--sln'.") try: solution = self.__repo[args.SolutionID] except KeyError as ex: raise ConfigurationException("Solution ID '{0}' is not registered in PoC.".format(args.SolutionID)) from ex self.LogNormal("Registered projects for solution '{0}':".format(solution.ID)) if solution.Projects: for project in solution.Projects: self.LogNormal(" {id: <10}{name}".format(id=project.ID, name=project.Name)) else: self.LogNormal(" {RED}No registered projects found.{NOCOLOR}".format(**Init.Foreground)) # ---------------------------------------------------------------------------- # create the sub-parser for the "remove-project" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("remove-project", help="Add a project to PoC.") # @ArgumentAttribute(metavar="Project", dest="Project", type=str, help="Project name.") # def HandleRemoveProject(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "add-ipcore" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("add-ipcore", help="Add a ipcore to PoC.") # def HandleAddIPCore(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "list-ipcore" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("list-ipcore", help="List all ipcores registered in PoC.") # def HandleListIPCore(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # # ipcore = Solution(self) # # self.LogNormal("Registered ipcores in PoC:") # for ipcoreName in ipcore.GetIPCoreNames(): # print(" {0}".format(ipcoreName)) # ---------------------------------------------------------------------------- # create the sub-parser for the "remove-ipcore" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("remove-ipcore", help="Add a ipcore to PoC.") # @ArgumentAttribute(metavar="IPCore", dest="IPCore", type=str, help="IPCore name.") # def HandleRemoveIPCore(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "add-testbench" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("add-testbench", help="Add a testbench to PoC.") # def HandleAddTestbench(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "remove-testbench" command # ---------------------------------------------------------------------------- # @CommandGroupAttribute("Configuration commands") # @CommandAttribute("remove-testbench", help="Add a testbench to PoC.") # @ArgumentAttribute(metavar="Testbench", dest="Testbench", type=str, help="Testbench name.") # def HandleRemoveTestbench(self, args): # self.PrintHeadline() # self.__PrepareForConfiguration() # ---------------------------------------------------------------------------- # create the sub-parser for the "query" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Configuration commands") @CommandAttribute("query", help="Query PoC's database.", description=dedent("""\ Query PoC's database. """)) @ArgumentAttribute(metavar="Query", dest="Query", type=str, help="todo help") def HandleQueryConfiguration(self, args): self.__PrepareForConfiguration() query = Query(self) try: result = query.QueryConfiguration(args.Query) print(result, end="") Exit.exit() except ConfigurationException as ex: print(str(ex), end="") Exit.exit(1) # ============================================================================ # Simulation commands # ============================================================================ # TODO: Maybe required to self-compile libraries again or in the future # def __PrepareVendorLibraryPaths(self): # # prepare vendor library path for Altera # if (len(self.PoCConfig.options("INSTALL.Altera.Quartus")) != 0): # self.Directories["AlteraPrimitiveSource"] = Path(self.PoCConfig['INSTALL.Altera.Quartus']['InstallationDirectory']) / "eda/sim_lib" # # prepare vendor library path for Xilinx # if (len(self.PoCConfig.options("INSTALL.Xilinx.ISE")) != 0): # self.Directories["XilinxPrimitiveSource"] = Path(self.PoCConfig['INSTALL.Xilinx.ISE']['InstallationDirectory']) / "ISE/vhdl/src" # elif (len(self.PoCConfig.options("INSTALL.Xilinx.Vivado")) != 0): # self.Directories["XilinxPrimitiveSource"] = Path(self.PoCConfig['INSTALL.Xilinx.Vivado']['InstallationDirectory']) / "data/vhdl/src" def _ExtractBoard(self, BoardName, DeviceName, force=False): if (BoardName is not None): return Board(self, BoardName) elif (DeviceName is not None): return Board(self, "Custom", DeviceName) elif (force is True): raise CommonException("Either a board name or a device name is required.") else: return self.__SimulationDefaultBoard def _ExtractFQNs(self, fqns, defaultLibrary="PoC", defaultType=EntityTypes.Testbench): if (len(fqns) == 0): raise CommonException("No FQN given.") return [FQN(self, fqn, defaultLibrary=defaultLibrary, defaultType=defaultType) for fqn in fqns] def _ExtractVHDLVersion(self, vhdlVersion, defaultVersion=None): if (defaultVersion is None): defaultVersion = self.__SimulationDefaultVHDLVersion if (vhdlVersion is None): return defaultVersion else: return VHDLVersion.Parse(vhdlVersion) # TODO: move to Configuration class in ToolChains.Xilinx.Vivado def _CheckVivadoEnvironment(self): # check if Vivado is configure if (len(self.PoCConfig.options("INSTALL.Xilinx.Vivado")) == 0): raise NotConfiguredException("Xilinx Vivado is not configured on this system.") if (environ.get('XILINX_VIVADO') is None): raise EnvironmentException("Xilinx Vivado environment is not loaded in this shell environment.") # TODO: move to Configuration class in ToolChains.Xilinx.ISE def _CheckISEEnvironment(self): # check if ISE is configure if (len(self.PoCConfig.options("INSTALL.Xilinx.ISE")) == 0): raise NotConfiguredException("Xilinx ISE is not configured on this system.") if (environ.get('XILINX') is None): raise EnvironmentException("Xilinx ISE environment is not loaded in this shell environment.") @staticmethod def _ExtractSimulationSteps(guiMode, analyze, elaborate, optimize, recompile, simulate, showWaveform, resimulate, showReport, cleanUp): simulationSteps = SimulationSteps.no_flags if (not (analyze or elaborate or optimize or recompile or simulate or resimulate or showWaveform)): simulationSteps |= SimulationSteps.Prepare | SimulationSteps.CleanUpBefore simulationSteps |= SimulationSteps.Analyze | SimulationSteps.Elaborate #| SimulationSteps.Optimize simulationSteps |= SimulationSteps.Simulate simulationSteps |= SimulationSteps.ShowWaveform & guiMode simulationSteps |= SimulationSteps.ShowReport simulationSteps |= SimulationSteps.CleanUpAfter & cleanUp elif (not (analyze or elaborate or optimize or simulate or resimulate or showWaveform or guiMode) and recompile): simulationSteps |= SimulationSteps.Analyze | SimulationSteps.Elaborate #| SimulationSteps.Optimize simulationSteps |= SimulationSteps.Recompile simulationSteps |= SimulationSteps.ShowReport & showReport simulationSteps |= SimulationSteps.CleanUpAfter & cleanUp elif (not (analyze or elaborate or optimize or recompile or simulate or showWaveform) and resimulate): simulationSteps |= SimulationSteps.Simulate simulationSteps |= SimulationSteps.ShowWaveform & guiMode simulationSteps |= SimulationSteps.ShowReport & showReport simulationSteps |= SimulationSteps.CleanUpAfter & cleanUp elif (recompile or resimulate): raise SimulatorException("Combination of command line options is not allowed.") else: # simulationSteps |= SimulationSteps.CleanUpBefore & True #cleanup simulationSteps |= SimulationSteps.Prepare & True #prepare simulationSteps |= SimulationSteps.Analyze & analyze simulationSteps |= SimulationSteps.Elaborate & elaborate # simulationSteps |= SimulationSteps.Optimize & optimize simulationSteps |= SimulationSteps.Simulate & simulate simulationSteps |= SimulationSteps.ShowWaveform & (showWaveform or guiMode) simulationSteps |= SimulationSteps.ShowReport & showReport return simulationSteps @staticmethod def _ExtractCompileSteps(guiMode, synthesize, showReport, cleanUp): compileSteps = CompileSteps.no_flags if (not (synthesize)): compileSteps |= CompileSteps.Prepare | SimulationSteps.CleanUpBefore compileSteps |= CompileSteps.Synthesize compileSteps |= CompileSteps.ShowReport & showReport compileSteps |= CompileSteps.CleanUpAfter & cleanUp else: # simulationSteps |= SimulationSteps.CleanUpBefore & True #cleanup compileSteps |= CompileSteps.Prepare & True #prepare compileSteps |= CompileSteps.Synthesize & synthesize compileSteps |= CompileSteps.ShowReport & showReport compileSteps |= CompileSteps.ShowGUI & guiMode return compileSteps # ---------------------------------------------------------------------------- # create the sub-parser for the "list-testbench" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") # mccabe:disable=MC0001 @CommandAttribute("list-testbench", help="List all testbenches.", description=dedent("""\ List all testbenches. """)) @PoCEntityAttribute() @ArgumentAttribute("--kind", metavar="Kind", dest="TestbenchKind", help="Testbench kind: VHDL | COCOTB") def HandleListTestbenches(self, args): self.PrintHeadline() self.__PrepareForSimulation() defaultLibrary = "PoC" if (args.SolutionID is not None): solutionName = args.SolutionID print("Solution name: {0}".format(solutionName)) if self.PoCConfig.has_option("SOLUTION.Solutions", solutionName): sectionName = "SOLUTION.{0}".format(solutionName) print("Found registered solution:") print(" Name: {0}".format(self.PoCConfig[sectionName]['Name'])) print(" Path: {0}".format(self.PoCConfig[sectionName]['Path'])) solutionRootPath = self.Directories.Root / self.PoCConfig[sectionName]['Path'] solutionConfigFile = solutionRootPath / ".PoC" / "solution.config.ini" solutionDefaultsFile = solutionRootPath / ".PoC" / "solution.defaults.ini" print(" sln files: {0!s} {1!s}".format(solutionConfigFile, solutionDefaultsFile)) self.LogVerbose("Reading solution file...") self.LogDebug(" {0!s}".format(solutionConfigFile)) self.LogDebug(" {0!s}".format(solutionDefaultsFile)) if not solutionConfigFile.exists(): raise NotConfiguredException("Solution's {0} configuration file '{1!s}' does not exist.".format(solutionName, solutionConfigFile)) \ from FileNotFoundError(str(solutionConfigFile)) if not solutionDefaultsFile.exists(): raise NotConfiguredException("Solution's {0} defaults file '{1!s}' does not exist.".format(solutionName, solutionDefaultsFile)) \ from FileNotFoundError(str(solutionDefaultsFile)) self.__pocConfig.read(str(solutionConfigFile)) self.__pocConfig.read(str(solutionDefaultsFile)) section = self.PoCConfig['PROJECT.Projects'] defaultLibrary = section['DefaultLibrary'] print("Solution:") print(" Name: {0}".format(section['Name'])) print(" Default library: {0}".format(defaultLibrary)) print(" Projects:") for item in section: if (section[item] in ["PoCProject", "ISEProject", "VivadoProject", "QuartusProject"]): sectionName2 = "PROJECT.{0}".format(item) print(" {0}".format(self.PoCConfig[sectionName2]['Name'])) print(" Namespace roots:") for item in section: if (section[item] == "Library"): libraryPrefix = item print(" {0: <16} {1}".format(self.PoCConfig[libraryPrefix]['Name'], libraryPrefix)) self.Root.AddLibrary(libraryPrefix, libraryPrefix) if (args.TestbenchKind is None): tbFilter = TestbenchKind.All else: tbFilter = TestbenchKind.Unknown for kind in args.TestbenchKind.lower().split(","): if (kind == "vhdl"): tbFilter |= TestbenchKind.VHDLTestbench elif (kind == "cocotb"): tbFilter |= TestbenchKind.CocoTestbench else: raise CommonException("Argument --kind has an unknown value '{0}'.".format(kind)) fqnList = self._ExtractFQNs(args.FQN, defaultLibrary) for fqn in fqnList: self.LogNormal("") entity = fqn.Entity if (isinstance(entity, WildCard)): for testbench in entity.GetTestbenches(tbFilter): print(str(testbench)) else: testbench = entity.GetTestbenches(tbFilter) print(str(testbench)) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "asim" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("asim", help="Simulate a PoC Entity with Aldec Active-HDL.", description=dedent("""\ Simulate a PoC Entity with Aldec Active-HDL. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @VHDLVersionAttribute() @SimulationStepsAttribute() def HandleActiveHDLSimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) vhdlVersion = self._ExtractVHDLVersion(args.VHDLVersion) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) # create a GHDLSimulator instance and prepare it simulator = ActiveHDLSimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=vhdlVersion) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ---------------------------------------------------------------------------- # create the sub-parser for the "ghdl" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("ghdl", help="Simulate a PoC Entity with GHDL.", description=dedent("""\ Simulate a PoC Entity with GHDL. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @VHDLVersionAttribute() @SimulationStepsAttribute() @ArgumentAttribute("--reproducer", metavar="Name", dest="CreateReproducer", help="Create a bug reproducer") def HandleGHDLSimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() config = GHDLConfiguration(self) if (not config.IsSupportedPlatform()): raise PlatformNotSupportedException() if (not config.IsConfigured()): raise NotConfiguredException("GHDL is not configured on this system.") fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) vhdlVersion = self._ExtractVHDLVersion(args.VHDLVersion) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) simulator = GHDLSimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=vhdlVersion) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ---------------------------------------------------------------------------- # create the sub-parser for the "isim" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("isim", help="Simulate a PoC Entity with Xilinx ISE Simulator (iSim).", description=dedent("""\ Simulate a PoC Entity with Xilinx ISE Simulator (iSim). """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @SimulationStepsAttribute() def HandleISESimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() self._CheckISEEnvironment() fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) simulator = ISESimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=VHDLVersion.VHDL93) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ---------------------------------------------------------------------------- # create the sub-parser for the "vsim" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("vsim", help="Simulate a PoC Entity with Mentor QuestaSim or ModelSim (vsim).", description=dedent("""\ Simulate a PoC Entity with Mentor QuestaSim or ModelSim (vsim). """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @VHDLVersionAttribute() @SimulationStepsAttribute() def HandleQuestaSimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) vhdlVersion = self._ExtractVHDLVersion(args.VHDLVersion) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) simulator = QuestaSimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=vhdlVersion) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ---------------------------------------------------------------------------- # create the sub-parser for the "xsim" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("xsim", help="Simulate a PoC Entity with Xilinx Vivado Simulator (xSim).", description=dedent("""\ Simulate a PoC Entity with Xilinx Vivado Simulator (xSim). """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @VHDLVersionAttribute() @SimulationStepsAttribute() def HandleVivadoSimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() self._CheckVivadoEnvironment() fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) # FIXME: VHDL-2008 is broken in Vivado 2016.1 -> use VHDL-93 by default vhdlVersion = self._ExtractVHDLVersion(args.VHDLVersion, defaultVersion=VHDLVersion.VHDL93) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) simulator = VivadoSimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=vhdlVersion) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ---------------------------------------------------------------------------- # create the sub-parser for the "cocotb" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("cocotb", help="Simulate a PoC Entity with Cocotb and QuestaSim.", description=dedent("""\ Simulate a PoC Entity with Cocotb and QuestaSim. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @SimulationStepsAttribute() def HandleCocotbSimulation(self, args): self.PrintHeadline() self.__PrepareForSimulation() # check if QuestaSim is configured if (len(self.PoCConfig.options("INSTALL.Mentor.QuestaSim")) == 0): if (len(self.PoCConfig.options("INSTALL.Altera.ModelSim")) == 0): raise NotConfiguredException("Neither Mentor QuestaSim, Mentor ModelSim nor ModelSim Altera Edition are configured on this system.") fqnList = self._ExtractFQNs(args.FQN) board = self._ExtractBoard(args.BoardName, args.DeviceName) simulationSteps = self._ExtractSimulationSteps(args.GUIMode, args.Analyze, args.Elaborate, False, args.Recompile, args.Simulate, args.ShowWave, args.Resimulate, args.ShowReport, False) # create a CocotbSimulator instance and prepare it simulator = CocotbSimulator(self, self.DryRun, simulationSteps) allPassed = simulator.RunAll(fqnList, board=board, vhdlVersion=VHDLVersion.VHDL2008) Exit.exit(1 if ((SimulationSteps.Simulate in simulationSteps) and not allPassed) else 0) # ============================================================================ # Synthesis commands # ============================================================================ # create the sub-parser for the "list-netlist" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Simulation commands") @CommandAttribute("list-netlist", help="List all netlists.", description=dedent("""\ List all netlists. """)) @PoCEntityAttribute() @ArgumentAttribute("--kind", metavar="Kind", dest="NetlistKind", help="Netlist kind: Lattice | Quartus | XST | CoreGen") def HandleListNetlist(self, args): self.PrintHeadline() self.__PrepareForSynthesis() if (args.NetlistKind is None): nlFilter = NetlistKind.All else: nlFilter = NetlistKind.Unknown for kind in args.TestbenchKind.lower().split(","): if (kind == "lattice"): nlFilter |= NetlistKind.LatticeNetlist elif (kind == "quartus"): nlFilter |= NetlistKind.QuartusNetlist elif (kind == "xst"): nlFilter |= NetlistKind.XstNetlist elif (kind == "coregen"): nlFilter |= NetlistKind.CoreGeneratorNetlist elif (kind == "vivado"): nlFilter |= NetlistKind.VivadoNetlist else: raise CommonException("Argument --kind has an unknown value '{0}'.".format(kind)) fqnList = self._ExtractFQNs(args.FQN) for fqn in fqnList: entity = fqn.Entity if (isinstance(entity, WildCard)): for testbench in entity.GetNetlists(nlFilter): print(str(testbench)) else: testbench = entity.GetNetlists(nlFilter) print(str(testbench)) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "ise" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("ise", help="Generate any IP core for the Xilinx ISE tool chain.", description=dedent("""\ Generate any IP core for the Xilinx ISE tool chain. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleISECompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() self._CheckISEEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = ISECompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "coregen" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("coregen", help="Generate an IP core with Xilinx ISE Core Generator.", description=dedent("""\ Generate an IP core with Xilinx ISE Core Generator. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleCoreGeneratorCompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() self._CheckISEEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = XCOCompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "xst" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("xst", help="Compile a PoC IP core with Xilinx ISE XST to a netlist.", description=dedent("""\ Compile a PoC IP core with Xilinx ISE XST to a netlist. :ref:`IP:PoC.Mem` foooo baaarr. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleXstCompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() self._CheckISEEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = XSTCompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "xci" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("xci", help="Generate an IP core from Xilinx Vivado IP Catalog.", description=dedent("""\ Generate an IP core from Xilinx Vivado IP Catalog. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleIpCatalogCompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() self._CheckISEEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = XCICompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "vivado" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("vivado", help="Compile a PoC IP core with Xilinx Vivado Synth to a design checkpoint.", description=dedent("""\ Compile a PoC IP core with Xilinx Vivado Synth to a design checkpoint. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleVivadoCompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() self._CheckVivadoEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = VivadoCompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "quartus" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("quartus", help="Compile a PoC IP core with Altera Quartus II Map to a netlist.", description=dedent("""\ Compile a PoC IP core with Altera Quartus II Map to a netlist. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleQuartusCompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() # TODO: check env variables # self._CheckQuartusEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = MapCompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # ---------------------------------------------------------------------------- # create the sub-parser for the "lattice" command # ---------------------------------------------------------------------------- @CommandGroupAttribute("Synthesis commands") @CommandAttribute("lse", help="Compile a PoC IP core with Lattice Diamond LSE to a netlist.", description=dedent("""\ Compile a PoC IP core with Lattice Diamond LSE to a netlist. """)) @PoCEntityAttribute() @BoardDeviceAttributeGroup() @CompileStepsAttribute() def HandleLSECompilation(self, args): self.PrintHeadline() self.__PrepareForSynthesis() # TODO: check env variables # self._CheckLatticeEnvironment() fqnList = self._ExtractFQNs(args.FQN, defaultType=EntityTypes.NetList) board = self._ExtractBoard(args.BoardName, args.DeviceName, force=True) compiler = LSECompiler(self, self.DryRun, args.NoCleanUp) compiler.RunAll(fqnList, board) Exit.exit() # main program def main(): # mccabe:disable=MC0001 """This is the entry point for PoC.py written as a function. 1. It extracts common flags from the script's arguments list, before :py:class:`~argparse.ArgumentParser` is fully loaded. 2. It initializes colorama for colored outputs 3. It creates an instance of PoC and hands over to class based execution. All is wrapped in a big ``try..except`` block to catch every unhandled exception. 4. Shutdown the script and return its exit code. """ dryRun = "--dryrun" in sys_argv debug = "-d" in sys_argv verbose = "-v" in sys_argv quiet = "-q" in sys_argv # configure Exit class Exit.quiet = quiet try: Init.init() # handover to a class instance poc = PileOfCores(debug, verbose, quiet, dryRun) poc.Run() Exit.exit() except (CommonException, ConfigurationException, SimulatorException, CompilerException) as ex: print("{RED}ERROR:{NOCOLOR} {message}".format(message=ex.message, **Init.Foreground)) cause = ex.__cause__ if isinstance(cause, FileNotFoundError): print("{YELLOW} FileNotFound:{NOCOLOR} '{cause}'".format(cause=str(cause), **Init.Foreground)) elif isinstance(cause, NotADirectoryError): print("{YELLOW} NotADirectory:{NOCOLOR} '{cause}'".format(cause=str(cause), **Init.Foreground)) elif isinstance(cause, DuplicateOptionError): print("{YELLOW} DuplicateOptionError:{NOCOLOR} '{cause}'".format(cause=str(cause), **Init.Foreground)) elif isinstance(cause, ConfigParser_Error): print("{YELLOW} configparser.Error:{NOCOLOR} '{cause}'".format(cause=str(cause), **Init.Foreground)) elif isinstance(cause, ParserException): print("{YELLOW} ParserException:{NOCOLOR} {cause}".format(cause=str(cause), **Init.Foreground)) cause = cause.__cause__ if (cause is not None): print("{YELLOW} {name}:{NOCOLOR} {cause}".format(name=cause.__class__.__name__, cause= str(cause), **Init.Foreground)) elif isinstance(cause, ToolChainException): print("{YELLOW} {name}:{NOCOLOR} {cause}".format(name=cause.__class__.__name__, cause=str(cause), **Init.Foreground)) cause = cause.__cause__ if (cause is not None): if isinstance(cause, OSError): print("{YELLOW} {name}:{NOCOLOR} {cause}".format(name=cause.__class__.__name__, cause=str(cause), **Init.Foreground)) else: print(" Possible causes:") print(" - The compile order is broken.") print(" - A source file was not compiled and an old file got used.") if (not (verbose or debug)): print() print("{CYAN} Use '-v' for verbose or '-d' for debug to print out extended messages.{NOCOLOR}".format(**Init.Foreground)) Exit.exit(1) except EnvironmentException as ex: Exit.printEnvironmentException(ex) except NotConfiguredException as ex: Exit.printNotConfiguredException(ex) except PlatformNotSupportedException as ex: Exit.printPlatformNotSupportedException(ex) except ExceptionBase as ex: Exit.printExceptionBase(ex) except NotImplementedError as ex: Exit.printNotImplementedError(ex) except Exception as ex: Exit.printException(ex) # entry point if __name__ == "__main__": Exit.versionCheck((3,5,0)) main() # else: # print(__name__) # Exit.printThisIsNoLibraryFile(PoC.HeadLine)
py
b4148098ce87da40600c46d1cc880fbc91c825f2
from collections import namedtuple import operator def passes_operator_filter(player, classifier_key, value, operator): """ Tests whether a given player passes a filter for a given key in its classifier dict using a given (in)equality operator. e.g. For the following strategy: class ExampleStrategy(Player): classifier = { 'stochastic': True, 'inspects_source': False, 'memory_depth': 10, 'makes_use_of': ['game', 'length'] } passes_operator_filter(ExampleStrategy(), 'memory_depth', 10, operator.eq) would test whether the 'memory_depth' entry equals 10 and return True Parameters ---------- player : an instance of axelrod.Player classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'memory_depth'). value: int The value against which the strategy's classifier dict entry is to be tested. operator: operator.le, operator.ge or operator.eq Indicating whether a 'less than or equal to' or 'greater than or equal to' test should be applied. Returns ------- boolean True if the value from the strategy's classifier dictionary matches the value and operator passed to the function. """ classifier_value = player.classifier[classifier_key] if (isinstance(classifier_value, str) and classifier_value.lower() == 'infinity'): classifier_value = float('inf') return operator(classifier_value, value) def passes_in_list_filter(player, classifier_key, value): """ Tests whether a given list of values exist in the list returned from the given players's classifier dict for the given classifier_key. e.g. For the following strategy: class ExampleStrategy(Player): classifier = { 'stochastic': True, 'inspects_source': False, 'memory_depth': 10, 'makes_use_of': ['game', 'length'] } passes_in_list_filter(ExampleStrategy(), 'makes_use_of', 'game', operator.eq) would test whether 'game' exists in the strategy's' 'makes_use_of' entry and return True. Parameters ---------- strategy : a descendant class of axelrod.Player classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'makes_use_of'). value: list The values against which the strategy's classifier dict entry is to be tested. Returns ------- boolean """ result = True for entry in value: if entry not in player.classifier[classifier_key]: result = False return result def passes_filterset(strategy, filterset): """ Determines whether a given strategy meets the criteria defined in a dictionary of filters. e.g. For the following strategy: class ExampleStrategy(Player): classifier = { 'stochastic': True, 'inspects_source': False, 'memory_depth': 10, 'makes_use_of': ['game', 'length'] } and this filterset dict: example_filterset = { 'stochastic': True, 'memory_depth': 10 } passes_filterset(ExampleStrategy, example_filterset) would test whether both the strategy's 'stochastic' entry is True AND that its 'memory_depth' equals 10 and return True. Parameters ---------- strategy : a descendant class of axelrod.Player filterset : dict mapping filter name to criterion. e.g. { 'stochastic': True, 'min_memory_depth': 2 } Returns ------- boolean True if the given strategy meets all the supplied criteria in the filterset, otherwise false. """ FilterFunction = namedtuple('FilterFunction', 'function kwargs') # A dictionary mapping filter name (from the supplied filterset) to # the relevant function and arguments for that filter. filter_functions = { 'stochastic': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'stochastic', 'operator': operator.eq }), 'long_run_time': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'long_run_time', 'operator': operator.eq }), 'manipulates_state': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'manipulates_state', 'operator': operator.eq }), 'manipulates_source': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'manipulates_source', 'operator': operator.eq }), 'inspects_source': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'inspects_source', 'operator': operator.eq }), 'memory_depth': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'memory_depth', 'operator': operator.eq }), 'min_memory_depth': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'memory_depth', 'operator': operator.ge }), 'max_memory_depth': FilterFunction( function=passes_operator_filter, kwargs={ 'classifier_key': 'memory_depth', 'operator': operator.le }), 'makes_use_of': FilterFunction( function=passes_in_list_filter, kwargs={'classifier_key': 'makes_use_of'}) } # A list of boolean values to record whether the strategy passed or failed # each of the filters in the supplied filterset. passes_filters = [] # Loop through each of the entries in the filter_functions dict and, if # that filter is defined in the supplied filterset, call the relevant # function and record its result in the passes_filters list. for _filter, filter_function in filter_functions.items(): if filterset.get(_filter, None) is not None: kwargs = filter_function.kwargs kwargs['player'] = strategy() kwargs['value'] = filterset[_filter] passes_filters.append(filter_function.function(**kwargs)) # Return True if the strategy passed all the supplied filters return all(passes_filters)
py
b414811e0ba7ed887e520a60bf6d7ebe7f6310af
# -*- encoding: utf-8 -*- # # Copyright 2013 Martin Zimmermann <[email protected]>. All rights reserved. # License: BSD Style, 2 clauses -- see LICENSE. import sys import io import os import tempfile import subprocess import shutil import shlex from os.path import join, dirname, isfile, splitext from datetime import datetime from acrylamid import log, readers, commands from acrylamid.errors import AcrylamidException from acrylamid.compat import string_types from acrylamid.tasks import task, argument from acrylamid.utils import force_unicode as u from acrylamid.helpers import safe, event try: input = raw_input except NameError: pass yaml, rst, md = \ lambda title, date: u"---\ntitle: %s\ndate: %s\n---\n\n" % (safe(title), date), \ lambda title, date: u"%s\n" % title + "="*len(title) + '\n\n' + ":date: %s\n\n" % date, \ lambda title, date: u"Title: %s\nDate: %s\n\n" % (title, date) formats = {'.md': md, '.mkdown': md, '.rst': rst, '.rest': rst} @task('new', [argument("title", nargs="*", default=None)], help="create a new entry") def run(conf, env, options): """Subcommand: new -- create a new blog entry the easy way. Either run ``acrylamid new My fresh new Entry`` or interactively via ``acrylamid new`` and the file will be created using the preferred permalink format.""" # we need the actual default values commands.initialize(conf, env) # config content_extension originally defined as string, not a list extlist = conf.get('content_extension',['.txt']) if isinstance(extlist, string_types): ext = extlist else: ext = extlist[0] fd, tmp = tempfile.mkstemp(suffix=ext, dir='.cache/') editor = os.getenv('VISUAL') if os.getenv('VISUAL') else os.getenv('EDITOR') tt = formats.get(ext, yaml) if options.title: title = u(' '.join(options.title)) else: title = u(input("Entry's title: ")) with io.open(fd, 'w', encoding='utf-8') as f: f.write(tt(title, datetime.now().strftime(conf['date_format']))) entry = readers.Entry(tmp, conf) p = join(conf['content_dir'], splitext(entry.permalink.strip('/'))[0]) try: os.makedirs(p.rsplit('/', 1)[0]) except OSError: pass filepath = p + ext if isfile(filepath): raise AcrylamidException('Entry already exists %r' % filepath) shutil.move(tmp, filepath) event.create('new', filepath) if datetime.now().hour == 23 and datetime.now().minute > 45: log.info("notice don't forget to update entry.date-day after mignight!") if log.level() >= log.WARN: return try: if editor: retcode = subprocess.call(shlex.split(editor) + [filepath]) elif sys.platform == 'darwin': retcode = subprocess.call(['open', filepath]) else: retcode = subprocess.call(['xdg-open', filepath]) except OSError: raise AcrylamidException('Could not launch an editor') # XXX process detaches... m( if retcode < 0: raise AcrylamidException('Child was terminated by signal %i' % -retcode) if os.stat(filepath)[6] == 0: raise AcrylamidException('File is empty!')
py
b41481902edaaca42416bdcbd745b4ba26e9a205
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 系统内部使用工具 Case Name : 在减容开始之后删除待减容备机与主机的root互信 Description : 1.下载解压数据库安装包 2.配置xml文件 3.创建用户并修改文件属性 4.gs_preinstall预安装: ./gs_preinstall -U [初始用户] -G [初始用户组] -X [xml配置文件路径] --sep-env-file=[env文件路径] --skip-hostname-set --non-interactive 5.gs_install安装数据库: gs_install -X [xml配置文件路径] 6.一主两备在减容备1过程中删除备1与主机用户互信 7.清理环境 gs_uninstall 删除数据库:gs_uninstall --delete-data 清理用户下进程 删除数据库相关目录并删除用户 Expect : 1.下载解压成功 2.配置成功 3.用户创建成功且文件属性修改成功 4.预安装成功 5.数据库安装成功 6.备机减容成功 7.清理成功 History : """ import os import re import time import unittest from testcase.utils.ComThread import ComThread from testcase.utils.Common import Common from testcase.utils.Common import CommonSH from testcase.utils.Constant import Constant from testcase.utils.Logger import Logger from yat.test import Node from yat.test import macro primary_sh = CommonSH('PrimaryDbUser') Constant = Constant() @unittest.skipIf(3 != primary_sh.get_node_num(), '非1+2环境不执行') class Tools(unittest.TestCase): def setUp(self): self.log = Logger() self.common = Common() self.log.info(f'----{os.path.basename(__file__)} start----') self.primary_root_node = Node('PrimaryRoot') self.primary_node = Node('PrimaryDbUser') self.standby1_root_node = Node('Standby1Root') self.standby2_root_node = Node('Standby2Root') self.u_name = os.path.basename(__file__)[:-3].split('Tools_')[-1] self.pri_host = self.primary_node.db_host self.sta1_host = self.standby1_root_node.db_host self.sta2_host = self.standby2_root_node.db_host self.ssh_file = os.path.join('/home', self.u_name, 'hostfile') self.path = macro.DB_INSTANCE_PATH.split('/')[1] self.openGauss_path = os.path.join('/', f'{self.path}', f'{self.u_name}') self.package_path = os.path.join(self.openGauss_path, 'pkg') self.conf_path = os.path.join(self.openGauss_path, 'config') self.data_path = os.path.join(self.openGauss_path, 'cluster') self.xml_path = os.path.join(self.conf_path, 'primary_standby1_standby2.xml') self.env_path = os.path.join(self.conf_path, 'env') self.script_path = os.path.join(self.package_path, 'script') self.instance_path = os.path.join(self.data_path, 'dn1') wget_result = self.primary_node.sh(f"wget {macro.FTP_PATH} " f"-c -t2 -T30").result() res = re.search(Constant.wget_connect_success_msg, wget_result) self.log.info(res) if not re.search(Constant.wget_connect_success_msg, wget_result): raise Exception('wget连接失败') def test_gs_dropnode(self): text = '----step1:下载解压数据库安装包 expect:下载解压成功----' self.log.info(text) self.common.check_load_msg(self.primary_root_node, self.primary_node, self.package_path, self.conf_path) text = '----查看主机名----' self.log.info(text) primary_node_name = self.primary_root_node.sh("uname -n").result() standby1_node_name = self.standby1_root_node.sh("uname -n").result() standby2_node_name = self.standby2_root_node.sh("uname -n").result() self.log.info(primary_node_name) self.log.info(standby1_node_name) self.log.info(standby2_node_name) self.assertIsNotNone(primary_node_name, '执行失败:' + text) self.assertIsNotNone(standby1_node_name, '执行失败:' + text) self.assertIsNotNone(standby2_node_name, '执行失败:' + text) text = '----查询系统未使用端口----' self.log.info(text) for count in range(5): port = self.common.get_not_used_port(self.primary_node) self.log.info(port) standby_port_check_cmd = f'netstat -tlnp | grep {port}' sta1_check_msg = self.standby1_root_node.sh( standby_port_check_cmd).result() sta2_check_msg = self.standby2_root_node.sh( standby_port_check_cmd).result() if not (sta1_check_msg or sta2_check_msg): self.assertNotEqual(0, port, '执行失败:' + text) break else: continue text = '----step2:配置xml文件 expect:配置成功----' self.log.info(text) gaussdb_app_path = os.path.join(self.data_path, 'app') gaussdb_log_path = os.path.join(self.data_path, 'gaussdb_log') tmpmppdb_path = os.path.join(self.data_path, 'tmp') gaussdb_tool_path = os.path.join(self.data_path, 'tool') self.common.scp_file(self.primary_root_node, 'primary_standby1_standby2.xml', self.conf_path) xml_cmd = \ f"sed -i " \ f"'s#v_nodeNames#{primary_node_name}, {standby1_node_name}, " \ f"{standby2_node_name}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_gaussdbAppPath#{gaussdb_app_path}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_gaussdbLogPath#{gaussdb_log_path}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_tmpMppdbPath#{tmpmppdb_path}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_gaussdbToolPath#{gaussdb_tool_path}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_corePath#{macro.DB_CORE_PATH}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_db_host#{self.pri_host}, {self.sta1_host}, " \ f"{self.sta2_host}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_pri_nodeNames#{primary_node_name}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_pri_db_host#{self.pri_host}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_dataPortBase#{port}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_dataNode1#" \ f"{self.instance_path}, {standby1_node_name}, " \ f"{self.instance_path}, {standby2_node_name}, " \ f"{self.instance_path}#g' {self.xml_path};" \ f"sed -i 's#v_sta1_nodeNames#{standby1_node_name}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_sta1_db_host#{self.sta1_host}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_sta2_nodeNames#{standby2_node_name}#g' " \ f"{self.xml_path};" \ f"sed -i 's#v_sta2_db_host#{self.sta2_host}#g' " \ f"{self.xml_path};" \ f"cat {self.xml_path}" self.log.info(xml_cmd) xml_result = self.primary_root_node.sh(xml_cmd).result() self.log.info(xml_result) msg_list = [primary_node_name, standby1_node_name, standby2_node_name, self.data_path, f"{port}", self.pri_host, self.sta1_host, self.sta2_host] for content in msg_list: self.assertIn(content, xml_result, '执行失败:' + text) text = '----step3:创建用户并修改文件属性 expect:成功----' self.log.info(text) self.common.create_user(self.primary_root_node, self.u_name) file_own = f'chown -R {self.u_name}:{self.u_name} {self.openGauss_path}' self.log.info(file_own) file_msg = self.primary_root_node.sh(file_own).result() self.log.info(file_msg) self.assertEqual('', file_msg, '执行失败:' + text) text = '----step4:执行gs_preinstall命令 expect:预安装成功----' self.log.info(text) preinstall_cmd = f'./gs_preinstall -U {self.u_name} ' \ f'-G {self.u_name} -X {self.xml_path}' \ f' --sep-env-file={self.env_path} --skip-hostname-set ' execute_cmd = f'''cd {self.script_path} expect <<EOF set timeout 300 spawn {preinstall_cmd} expect "*(yes/no)?" send "yes\\n" expect "Password:" send "{self.primary_root_node.ssh_password}\\n" expect "*(yes/no)?" send "yes\\n" expect "Password:" send "{macro.COMMON_PASSWD}\\n" expect "Password:" send "{macro.COMMON_PASSWD}\\n" expect "Password:" send "{macro.COMMON_PASSWD}\\n" expect eof\n''' + '''EOF''' self.log.info(execute_cmd) msg = self.primary_root_node.sh(execute_cmd).result() self.log.info(msg) self.assertIn(Constant.preinstall_success_msg, msg, '执行失败:' + text) text = '----step5:执行gs_install安装数据库 expect:安装成功----' self.log.info(text) su_cmd = f'su - {self.u_name}' install_cmd = f'gs_install -X {self.xml_path}' execute_cmd = f'''expect <<EOF set timeout 200 spawn {su_cmd} expect "$" send "source {self.env_path}\\n" expect "$" send "{install_cmd}\\n" expect "Please enter password for database:" send "{macro.COMMON_PASSWD}\\n" expect "Please repeat for database:" send "{macro.COMMON_PASSWD}\\n" expect eof\n''' + '''EOF''' self.log.info(execute_cmd) exec_msg = self.primary_root_node.sh(execute_cmd).result() self.log.info(exec_msg) self.assertIn(Constant.install_success_msg, exec_msg, text + '执行失败') text = '----step6:在对备1减容时删除备1与主机用户互信 expect:减容成功----' self.log.info(text) self.log.info('构建减容命令shell脚本前置条件') drop_cmd = f'gs_dropnode -U {self.u_name} -G {self.u_name} ' \ f'-h {self.sta1_host}' self.log.info(drop_cmd) shell_path = os.path.join('/', 'home', f'{self.u_name}', 'dropnode.sh') shell_cmd = f'touch {shell_path};echo -e {drop_cmd} > {shell_path};' \ f'chmod 755 {shell_path};cat {shell_path}' self.log.info(shell_cmd) pri_shell_res = self.primary_root_node.sh(shell_cmd).result() sta1_shell_res = self.standby1_root_node.sh(shell_cmd).result() self.log.info(pri_shell_res) self.log.info(sta1_shell_res) self.log.info('对备1执行减容操作') drop_cmd = f'''expect <<EOF set timeout 60 spawn su - {self.u_name} expect "$" send "source {self.env_path}\\n" expect "$" send "{shell_path}\\n" expect "(yes/no)?" send "yes\\n" expect eof\n''' + '''EOF''' session_1 = ComThread(self.common.get_sh_result, args=( self.primary_root_node, drop_cmd,)) session_1.setDaemon(True) self.log.info('在备1减容同时删除备1与主机用户互信') ssh_path = os.path.join('/', 'home', f'{self.u_name}', '.ssh') del_cmd = f'rm -rf {ssh_path}' session_2 = ComThread(self.common.get_sh_result, args=( self.standby1_root_node, del_cmd,)) session_2.setDaemon(True) session_1.start() time.sleep(0.5) session_2.start() self.log.info('备1减容结果') session_1.join(120) session_1_result = session_1.get_result() self.log.info(session_1_result) self.log.info('备1互信删除结果') session_2.join(120) session_2_result = session_2.get_result() self.log.info(session_2_result) self.log.info('备1减容及备1删除互信结果') self.assertIn('Success to drop the target nodes', session_1_result, text + '减容失败') self.assertEqual('', session_2_result, '备1用户互信删除失败') self.log.info('检查数据库是否减容') check_cmd = f'su - {self.u_name} <<EOF\n' \ f'source {self.env_path};' \ f'gs_om -t status --detail;\n' \ f'EOF' self.log.info(check_cmd) check_res = self.primary_root_node.sh(check_cmd).result() self.log.info(check_res) check_lis = [self.pri_host, self.sta2_host] for info in check_lis: self.assertIn(info, check_res, '数据库状态检查失败') def tearDown(self): text = "----step7:卸载数据库,清理用户及文件 expect:成功----" self.log.info(text) text_1 = '----gs_uninstall卸载数据库----' self.log.info(text_1) execute_cmd = f'su - {self.u_name} <<EOF\n' \ f'source {self.env_path};' \ f'gs_uninstall --delete-data;\n' \ f'EOF' self.log.info(execute_cmd) uninstall_msg = self.primary_root_node.sh(execute_cmd).result() self.log.info(uninstall_msg) text_2 = '-----删除用户下进程 expect:成功-----' self.log.info(text_2) kill_cmd = f"ps -u {self.u_name} | grep -v PID| " \ f"awk \'{{{{print $1}}}}\' | xargs kill -9" self.log.info(kill_cmd) self.common.get_sh_result(self.primary_root_node, kill_cmd) self.common.get_sh_result(self.standby1_root_node, kill_cmd) self.common.get_sh_result(self.standby2_root_node, kill_cmd) text_3 = '----删除用户及数据准备目录 expect:成功----' self.log.info(text_3) dir_del_cmd = f'rm -rf {self.openGauss_path}' self.log.info(dir_del_cmd) self.primary_root_node.sh(dir_del_cmd).result() self.standby1_root_node.sh(dir_del_cmd).result() self.standby2_root_node.sh(dir_del_cmd).result() check_cmd = f'if [ -d {self.openGauss_path} ]; ' \ f'then echo "exists"; else echo "not exists"; fi' self.log.info(check_cmd) pri_del_dir_msg = self.common.get_sh_result(self.primary_root_node, check_cmd) sta1_del_dir_msg = self.common.get_sh_result(self.standby1_root_node, check_cmd) sta2_del_dir_msg = self.common.get_sh_result(self.standby2_root_node, check_cmd) usr_del_cmd = f'userdel -rf {self.u_name}' self.log.info(usr_del_cmd) pri_del_usr_msg = self.primary_root_node.sh(usr_del_cmd).result() sta1_del_usr_msg = self.standby1_root_node.sh(usr_del_cmd).result() sta2_del_usr_msg = self.standby2_root_node.sh(usr_del_cmd).result() self.assertIn(Constant.uninstall_success_msg, uninstall_msg, '执行失败:' + text_1) for msg in [pri_del_dir_msg, sta1_del_dir_msg, sta2_del_dir_msg]: self.assertEqual('not exists', msg, '目录删除失败:' + text_3) for msg in [pri_del_usr_msg, sta1_del_usr_msg, sta2_del_usr_msg]: self.log.info(msg) self.assertEqual('', msg, '用户删除失败' + text_3) self.log.info(f'----{os.path.basename(__file__)} finish----')
py
b4148362416e863bb525733085ff1a90219f3cca
from .tool.func import * def give_user_check_2(conn, name): curs = conn.cursor() plus_id = flask.request.args.get('plus', None) if admin_check('all', None, name) == 1 or (plus_id and admin_check('all', None, plus_id) == 1): if admin_check() != 1: return re_error('/error/4') num = int(number_check(flask.request.args.get('num', '1'))) sql_num = (num * 50 - 50) if num * 50 > 0 else 0 div = '' check_type = flask.request.args.get('type', '') if admin_check(4, (check_type + ' ' if check_type != '' else '') + 'check (' + name + ')') != 1: return re_error('/error/3') if check_type == '': if ip_or_user(name) == 0: curs.execute(db_change("select data from user_set where name = \"approval_question\" and id = ?"), [name]) approval_question = curs.fetchall() if approval_question and approval_question[0][0]: curs.execute(db_change("select data from user_set where name = \"approval_question_answer\" and id = ?"), [name]) approval_question_answer = curs.fetchall() if approval_question_answer and approval_question_answer[0]: div += ''' <table id="main_table_set"> <tbody> <tr id="main_table_top_tr"> <td>Q</td> <td>''' + approval_question[0][0] + '''</td> <td>A</td> <td>''' + approval_question_answer[0][0] + '''</td> </tr> </tbody> </table> <hr class="main_hr"> ''' if plus_id: plus = "or " + ('ip' if ip_or_user(plus_id) == 1 else 'name') + " = ? " set_list = [name, plus_id, sql_num] if num == 1: curs.execute(db_change("" + \ "select distinct ip from ua_d " + \ "where " + ('ip' if ip_or_user(name) == 1 else 'name') + " = ? or " + ('ip' if ip_or_user(plus_id) == 1 else 'name') + " = ? " ""), [name, plus_id]) all_ip_count = len(curs.fetchall()) curs.execute(db_change("" + \ "select distinct ip from ua_d " + \ "where " + ('ip' if ip_or_user(name) == 1 else 'name') + " = ?" + \ ""), [name]) a_ip_count = len(curs.fetchall()) curs.execute(db_change("" + \ "select distinct ip from ua_d " + \ "where " + ('ip' if ip_or_user(plus_id) == 1 else 'name') + " = ? " ""), [plus_id]) b_ip_count = len(curs.fetchall()) if a_ip_count + b_ip_count != all_ip_count: div += load_lang('same_ip_exist') + '<hr class="main_hr">' else: plus = '' set_list = [name, sql_num] curs.execute(db_change("" + \ "select name, ip, ua, today from ua_d " + \ "where " + ('ip' if ip_or_user(name) == 1 else 'name') + " = ? " + \ plus + \ "order by today desc limit ?, 50" + \ ""), set_list) record = curs.fetchall() if record: if not plus_id: div = '' + \ '<a href="/manager/14?plus=' + url_pas(name) + '">(' + load_lang('compare') + ')</a> ' + \ '<a href="/check/' + url_pas(name) + '?type=simple">(' + load_lang('simple_check') + ')</a>' + \ '<hr class="main_hr">' + \ '' + div else: div = '' + \ '<a href="/check/' + url_pas(name) + '">(' + name + ')</a> ' + \ '<a href="/check/' + url_pas(plus_id) + '">(' + plus_id + ')</a>' + \ '<hr class="main_hr">' + \ '' + div div += ''' <table id="main_table_set"> <tbody> <tr id="main_table_top_tr"> <td id="main_table_width">''' + load_lang('name') + '''</td> <td id="main_table_width">''' + load_lang('ip') + '''</td> <td id="main_table_width">''' + load_lang('time') + '''</td> </tr> ''' set_n = 0 for data in record: if data[2]: if len(data[2]) > 300: ua = '' + \ '<a href="javascript:void();" onclick="document.getElementById(\'check_' + str(set_n) + '\').style.display=\'block\';">(300+)</a>' + \ '<div id="check_' + str(set_n) + '" style="display:none;">' + html.escape(data[2]) + '</div>' + \ '' set_n += 1 else: ua = html.escape(data[2]) else: ua = '<br>' div += ''' <tr> <td> <a href="/check/''' + url_pas(data[0]) + '''">''' + data[0] + '''</a> <a href="/check_delete''' + \ '''?name=''' + url_pas(data[0]) + \ '''&ip=''' + url_pas(data[1]) + \ '''&time=''' + url_pas(data[3].replace(' ', '').replace(':', '').replace('-', '')) + \ '''&return_type=''' + ('0' if ip_or_user(name) == 0 else '1') + '''"> (''' + load_lang('delete') + ''') </a> </td> <td><a href="/check/''' + url_pas(data[1]) + '''">''' + data[1] + '''</a></td> <td>''' + data[3] + '''</td> </tr> <tr> <td colspan="3">''' + ua + '''</td> </tr> ''' div += ''' </tbody> </table> ''' div += next_fix( '/check/' + url_pas(name) + ('?plus=' + plus_id + '&num=' if plus_id else '?num='), num, record ) return easy_minify(flask.render_template(skin_check(), imp = [load_lang('check'), wiki_set(), wiki_custom(), wiki_css([0, 0])], data = div, menu = [['manager', load_lang('return')]] )) else: curs.execute(db_change("" + \ "select distinct " + ('name' if ip_or_user(name) == 1 else 'ip') + " from ua_d " + \ "where " + ('ip' if ip_or_user(name) == 1 else 'name') + " = ? " "order by today desc limit ?, 50" + \ ""), [name, sql_num]) record = curs.fetchall() div = '' for i in record: div += '<li><a href="/check/' + url_pas(i[0]) + '?type=simple">' + i[0] + '</a></li>' if div != '': div = '<ul class="inside_ul">' + div + '</ul>' div += next_fix( '/check/' + url_pas(name) + '?type=' + check_type + '&num=', num, record ) return easy_minify(flask.render_template(skin_check(), imp = [name, wiki_set(), wiki_custom(), wiki_css(['(' + load_lang('simple_check') + ')', 0])], data = div, menu = [['check/' + url_pas(name), load_lang('return')]] ))
py
b414842022b9d9e8a6b79d50553eb0334b70d595
import os import sys import json import argparse import pika DEFAULT_RABBITMQ_HOSTNAME = os.getenv("RABBITMQ_HOSTNAME", "localhost") DEFAULT_RABBITMQ_PORT = int(os.getenv('RABBITMQ_PORT', 5672)) DEFAULT_RABBITMQ_USER = os.getenv("RABBITMQ_USER", "gotoiot") DEFAULT_RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "gotoiot") DEFAULT_RABBITMQ_VHOST = os.getenv("RABBITMQ_VHOST", "/") DEFAULT_EXCHANGE = os.getenv("EXCHANGE", "") DEFAULT_EXCHANGE_TYPE = os.getenv("EXCHANGE_TYPE", "") DEFAULT_DURABLE = os.getenv("DURABLE", True) DEFAULT_ROUTING_KEY = os.getenv("ROUTING_KEY", "") DEFAULT_QUEUE = os.getenv("QUEUE", "") DEFAULT_EXCLUSIVE = os.getenv("EXCLUSIVE", False) DEFAULT_AUTO_ACK = os.getenv("AUTO_ACK", True) DEFAULT_SIMPLE_DECLARATION = os.getenv("SIMPLE_DECLARATION", False) # objects to establish connection to broker connection = None channel = None def parse_cli_args(): def _boolean_string(s): if s not in {'False', 'True'}: raise ValueError('Not a valid boolean string') return s == 'True' # Create the parset object parser = argparse.ArgumentParser( description='Help of usage for Goto IoT AMQP consumer client' ) # Add the cli arguments supported parser.add_argument( "-H", "--hostname", dest='rabbitmq_hostname', type=str, help='The RabbitMQ hostname', default=DEFAULT_RABBITMQ_HOSTNAME ) parser.add_argument( "-p", "--port", dest='rabbitmq_port', type=int, help='The RabbitMQ port', default=DEFAULT_RABBITMQ_PORT ) parser.add_argument( "-u", "--user", dest='rabbitmq_user', type=str, help='The RabbitMQ user', default=DEFAULT_RABBITMQ_USER ) parser.add_argument( "-P", "--pass", dest='rabbitmq_pass', type=str, help='The RabbitMQ pass', default=DEFAULT_RABBITMQ_PASS ) parser.add_argument( "-v", "--vhost", dest='rabbitmq_vhost', type=str, help='The RabbitMQ virtual host', default=DEFAULT_RABBITMQ_VHOST ) parser.add_argument( "-e", "--exchange", dest='exchange', type=str, help='The exchange name to publish', default=DEFAULT_EXCHANGE ) parser.add_argument( "-t", "--type", dest='exchange_type', type=str, help='The exchange type to declare', default=DEFAULT_EXCHANGE_TYPE ) parser.add_argument( "-d", "--durable", dest='durable', type=_boolean_string, help='The exchange durable config', default=DEFAULT_DURABLE ) parser.add_argument( "-r", "--routing_key", dest='routing_key', type=str, help='The routing key to publish message', default=DEFAULT_ROUTING_KEY ) parser.add_argument( "-q", "--queue", dest='queue', type=str, help='The queue name to consume from', default=DEFAULT_QUEUE ) parser.add_argument( "-x", "--exclusive", dest='exclusive', type=_boolean_string, help='The queue exclusive declaration flag', default=DEFAULT_EXCLUSIVE ) parser.add_argument( "-a", "--auto_ack", dest='auto_ack', type=_boolean_string, help='The queue auto ACK enable flag config', default=DEFAULT_AUTO_ACK ) parser.add_argument( "-s", "--simple_declaration", dest='simple_declaration', type=_boolean_string, help='Flag to declare simple exchange entities', default=DEFAULT_SIMPLE_DECLARATION ) # Parse arguments and return them as dict args = parser.parse_args() return vars(args) def connect_to_broker(**kwargs): global connection global channel credentials = pika.PlainCredentials(kwargs['rabbitmq_user'], kwargs['rabbitmq_pass']) parameters = pika.ConnectionParameters( kwargs['rabbitmq_hostname'], kwargs['rabbitmq_port'], kwargs['rabbitmq_vhost'], credentials ) connection = pika.BlockingConnection(parameters) channel = connection.channel() print(f"[INFO] Connected to broker: " f"host={kwargs['rabbitmq_hostname']}, " f"port={kwargs['rabbitmq_port']}, " f"user={kwargs['rabbitmq_user']}, " f"pass={kwargs['rabbitmq_pass']}, " f"vhost={kwargs['rabbitmq_vhost']}") def _on_message_callback(ch, method, properties, body): print(f"[INFO] Received message: {body}") # print(f"[INFO] Received message: ch={ch}, method={method}, properties={properties}, body={body}") def declare_broker_entities(**kwargs): if kwargs['simple_declaration']: channel.basic_consume( queue=kwargs['queue'], on_message_callback=_on_message_callback, auto_ack=kwargs['auto_ack'] ) print(f"[INFO] Done simple exchange declarations because already declared") return channel.exchange_declare( exchange=kwargs['exchange'], exchange_type=kwargs['exchange_type'], durable=kwargs['durable'] ) print(f"[INFO] Declared exchange: exchange='{kwargs['exchange']}', exchange_type='{kwargs['exchange_type']}', durable='{kwargs['durable']}'") queue = channel.queue_declare( queue=kwargs['queue'], exclusive=kwargs['exclusive'], durable=kwargs['durable'] ) queue_name = queue.method.queue channel.queue_bind( exchange=kwargs['exchange'], queue=queue_name, routing_key=kwargs['routing_key'] ) print(f"[INFO] Binded exchange '{kwargs['exchange']}' to queue '{queue_name}' with routing key '{kwargs['routing_key']}'") channel.basic_consume( queue=queue_name, on_message_callback=_on_message_callback, auto_ack=kwargs['auto_ack'] ) def disconnect_from_broker(): connection.close() print(f"[INFO] Closed connection to broker") def consume_queue(**kwargs): print(f"[INFO] Starting to consume from queue in blocking mode...To exit press CTRL+C") channel.start_consuming() def main(): cli_args = parse_cli_args() # print(f"[DEBUG] CLI args: {cli_args}") connect_to_broker(**cli_args) declare_broker_entities(**cli_args) consume_queue(**cli_args) if __name__ == '__main__': try: main() except KeyboardInterrupt: disconnect_from_broker() print('[INFO] Exiting consumer by keyboard interrupt') try: sys.exit(0) except SystemExit: os._exit(0)
py
b41485dd51f6036f42ac37059484a47b00653e5e
import torch.optim as optim from torch.nn.utils import clip_grad_norm class Optim(object): def set_parameters(self, params): self.params = [p for p in params if p.requires_grad] if self.method == 'sgd': self.optimizer = optim.SGD(self.params, lr=self.lr) elif self.method == 'rmsprop': self.optimizer = optim.RMSprop( self.params, lr=self.lr, alpha=self.alpha) elif self.method == 'adam': self.optimizer = optim.Adam(self.params, lr=self.lr, betas=self.betas, eps=1e-9) else: raise RuntimeError("Invalid optim method: " + self.method) def __init__(self, method, lr, alpha, max_grad_norm, lr_decay=1, start_decay_at=None, beta1=0.9, beta2=0.98, opt=None): self.last_metric = None self.lr = lr self.alpha = alpha self.max_grad_norm = max_grad_norm self.method = method self.lr_decay = lr_decay self.start_decay_at = start_decay_at self.start_decay = False self._step = 0 self.betas = [beta1, beta2] self.opt = opt def _setRate(self, lr): self.lr = lr self.optimizer.param_groups[0]['lr'] = self.lr def step(self): "Compute gradients norm." self._step += 1 # Decay method used in tensor2tensor. if self.opt.__dict__.get("decay_method", "") == "noam": self._setRate( self.opt.learning_rate * (self.opt.rnn_size ** (-0.5) * min(self._step ** (-0.5), self._step * self.opt.warmup_steps**(-1.5)))) if self.max_grad_norm: clip_grad_norm(self.params, self.max_grad_norm) self.optimizer.step() def updateLearningRate(self, metric, epoch): """ Decay learning rate if val perf does not improve or we hit the start_decay_at limit. """ if (self.start_decay_at is not None) and (epoch >= self.start_decay_at): self.start_decay = True if (self.last_metric is not None) and (metric is not None) and (metric > self.last_metric): self.start_decay = True if self.start_decay: self.lr = self.lr * self.lr_decay print("Decaying learning rate to %g" % self.lr) self.last_metric = metric self.optimizer.param_groups[0]['lr'] = self.lr
py
b4148724e7c2588f8f830c28eb44fc9a14c2c678
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from polyaxon import settings def check_offline(f): """ The `check_offline` is a decorator to ignore any decorated function when POLYAXON_IS_OFFLINE env var is found. usage example with class method: @check_offline def my_func(self, *args, **kwargs): ... return ... usage example with a function: @check_offline def my_func(arg1, arg2): ... return ... """ @functools.wraps(f) def wrapper(*args, **kwargs): if settings.CLIENT_CONFIG.is_offline: return None return f(*args, **kwargs) return wrapper
py
b4148801b3575dadab1ea68ddcec2061997b7902
"""Shelly helpers functions.""" from datetime import timedelta import logging from typing import List, Optional, Tuple import aioshelly from homeassistant.const import EVENT_HOMEASSISTANT_STOP, TEMP_CELSIUS, TEMP_FAHRENHEIT from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import singleton from homeassistant.util.dt import parse_datetime, utcnow from .const import ( BASIC_INPUTS_EVENTS_TYPES, COAP, DATA_CONFIG_ENTRY, DOMAIN, SHBTN_1_INPUTS_EVENTS_TYPES, SHIX3_1_INPUTS_EVENTS_TYPES, ) _LOGGER = logging.getLogger(__name__) async def async_remove_shelly_entity(hass, domain, unique_id): """Remove a Shelly entity.""" entity_reg = await hass.helpers.entity_registry.async_get_registry() entity_id = entity_reg.async_get_entity_id(domain, DOMAIN, unique_id) if entity_id: _LOGGER.debug("Removing entity: %s", entity_id) entity_reg.async_remove(entity_id) def temperature_unit(block_info: dict) -> str: """Detect temperature unit.""" if block_info[aioshelly.BLOCK_VALUE_UNIT] == "F": return TEMP_FAHRENHEIT return TEMP_CELSIUS def get_device_name(device: aioshelly.Device) -> str: """Naming for device.""" return device.settings["name"] or device.settings["device"]["hostname"] def get_number_of_channels(device: aioshelly.Device, block: aioshelly.Block) -> int: """Get number of channels for block type.""" channels = None if block.type == "input": # Shelly Dimmer/1L has two input channels and missing "num_inputs" if device.settings["device"]["type"] in ["SHDM-1", "SHDM-2", "SHSW-L"]: channels = 2 else: channels = device.shelly.get("num_inputs") elif block.type == "emeter": channels = device.shelly.get("num_emeters") elif block.type in ["relay", "light"]: channels = device.shelly.get("num_outputs") elif block.type in ["roller", "device"]: channels = 1 return channels or 1 def get_entity_name( device: aioshelly.Device, block: aioshelly.Block, description: Optional[str] = None, ) -> str: """Naming for switch and sensors.""" channel_name = get_device_channel_name(device, block) if description: return f"{channel_name} {description}" return channel_name def get_device_channel_name( device: aioshelly.Device, block: aioshelly.Block, ) -> str: """Get name based on device and channel name.""" entity_name = get_device_name(device) if ( not block or block.type == "device" or get_number_of_channels(device, block) == 1 ): return entity_name channel_name = None mode = block.type + "s" if mode in device.settings: channel_name = device.settings[mode][int(block.channel)].get("name") if channel_name: return channel_name if device.settings["device"]["type"] == "SHEM-3": base = ord("A") else: base = ord("1") return f"{entity_name} channel {chr(int(block.channel)+base)}" def is_momentary_input(settings: dict, block: aioshelly.Block) -> bool: """Return true if input button settings is set to a momentary type.""" # Shelly Button type is fixed to momentary and no btn_type if settings["device"]["type"] == "SHBTN-1": return True button = settings.get("relays") or settings.get("lights") or settings.get("inputs") # Shelly 1L has two button settings in the first channel if settings["device"]["type"] == "SHSW-L": channel = int(block.channel or 0) + 1 button_type = button[0].get("btn" + str(channel) + "_type") else: # Some devices has only one channel in settings channel = min(int(block.channel or 0), len(button) - 1) button_type = button[channel].get("btn_type") return button_type in ["momentary", "momentary_on_release"] def get_device_uptime(status: dict, last_uptime: str) -> str: """Return device uptime string, tolerate up to 5 seconds deviation.""" uptime = utcnow() - timedelta(seconds=status["uptime"]) if not last_uptime: return uptime.replace(microsecond=0).isoformat() if abs((uptime - parse_datetime(last_uptime)).total_seconds()) > 5: return uptime.replace(microsecond=0).isoformat() return last_uptime def get_input_triggers( device: aioshelly.Device, block: aioshelly.Block ) -> List[Tuple[str, str]]: """Return list of input triggers for block.""" if "inputEvent" not in block.sensor_ids or "inputEventCnt" not in block.sensor_ids: return [] if not is_momentary_input(device.settings, block): return [] triggers = [] if block.type == "device" or get_number_of_channels(device, block) == 1: subtype = "button" else: subtype = f"button{int(block.channel)+1}" if device.settings["device"]["type"] == "SHBTN-1": trigger_types = SHBTN_1_INPUTS_EVENTS_TYPES elif device.settings["device"]["type"] == "SHIX3-1": trigger_types = SHIX3_1_INPUTS_EVENTS_TYPES else: trigger_types = BASIC_INPUTS_EVENTS_TYPES for trigger_type in trigger_types: triggers.append((trigger_type, subtype)) return triggers def get_device_wrapper(hass: HomeAssistant, device_id: str): """Get a Shelly device wrapper for the given device id.""" if not hass.data.get(DOMAIN): return None for config_entry in hass.data[DOMAIN][DATA_CONFIG_ENTRY]: wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry][COAP] if wrapper.device_id == device_id: return wrapper return None @singleton.singleton("shelly_coap") async def get_coap_context(hass): """Get CoAP context to be used in all Shelly devices.""" context = aioshelly.COAP() await context.initialize() @callback def shutdown_listener(ev): context.close() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_listener) return context def get_device_sleep_period(settings: dict) -> int: """Return the device sleep period in seconds or 0 for non sleeping devices.""" sleep_period = 0 if settings.get("sleep_mode", False): sleep_period = settings["sleep_mode"]["period"] if settings["sleep_mode"]["unit"] == "h": sleep_period *= 60 # hours to minutes return sleep_period * 60 # minutes to seconds
py
b414883a2bd6046afa122bb62ff86ecc8b4963d2
import logging from collections import OrderedDict from typing import Callable, Mapping, Optional, cast from ignite.base import Serializable from ignite.engine import Engine __all__ = ["EarlyStopping"] class EarlyStopping(Serializable): """EarlyStopping handler can be used to stop the training if no improvement after a given number of events. Args: patience (int): Number of events to wait if no improvement and then stop the training. score_function (callable): It should be a function taking a single argument, an :class:`~ignite.engine.engine.Engine` object, and return a score `float`. An improvement is considered if the score is higher. trainer (Engine): trainer engine to stop the run if no improvement. min_delta (float, optional): A minimum increase in the score to qualify as an improvement, i.e. an increase of less than or equal to `min_delta`, will count as no improvement. cumulative_delta (bool, optional): It True, `min_delta` defines an increase since the last `patience` reset, otherwise, it defines an increase after the last event. Default value is False. Examples: .. code-block:: python from ignite.engine import Engine, Events from ignite.handlers import EarlyStopping def score_function(engine): val_loss = engine.state.metrics['nll'] return -val_loss handler = EarlyStopping(patience=10, score_function=score_function, trainer=trainer) # Note: the handler is attached to an *Evaluator* (runs one epoch on validation dataset). evaluator.add_event_handler(Events.COMPLETED, handler) """ _state_dict_all_req_keys = ( "counter", "best_score", ) def __init__( self, patience: int, score_function: Callable, trainer: Engine, min_delta: float = 0.0, cumulative_delta: bool = False, ): if not callable(score_function): raise TypeError("Argument score_function should be a function.") if patience < 1: raise ValueError("Argument patience should be positive integer.") if min_delta < 0.0: raise ValueError("Argument min_delta should not be a negative number.") if not isinstance(trainer, Engine): raise TypeError("Argument trainer should be an instance of Engine.") self.score_function = score_function self.patience = patience self.min_delta = min_delta self.cumulative_delta = cumulative_delta self.trainer = trainer self.counter = 0 self.best_score = None # type: Optional[float] self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__) def __call__(self, engine: Engine) -> None: score = self.score_function(engine) if self.best_score is None: self.best_score = score elif score <= self.best_score + self.min_delta: if not self.cumulative_delta and score > self.best_score: self.best_score = score self.counter += 1 self.logger.debug("EarlyStopping: %i / %i" % (self.counter, self.patience)) if self.counter >= self.patience: self.logger.info("EarlyStopping: Stop training") self.trainer.terminate() else: self.best_score = score self.counter = 0 def state_dict(self) -> "OrderedDict[str, float]": return OrderedDict([("counter", self.counter), ("best_score", cast(float, self.best_score))]) def load_state_dict(self, state_dict: Mapping) -> None: super().load_state_dict(state_dict) self.counter = state_dict["counter"] self.best_score = state_dict["best_score"]
py
b41488df27b9bef35c3387afdd485feb08fbe905
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2019-10-15 11:18 from __future__ import unicode_literals import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('mobile', models.CharField(max_length=11, unique=True, verbose_name='手机号')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': '用户', 'verbose_name_plural': '用户', 'db_table': 'tb_users', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
py
b4148949cecc77148f3c4a37f1b74ee87e2d9115
import os import weakref import numpy as np from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex from .fields import SkeletonFieldInfo class SkeletonGrid(AMRGridPatch): _id_offset = 0 def __init__(self, id, index, level): super().__init__(id, filename=index.index_filename, index=index) self.Parent = None self.Children = [] self.Level = level def __repr__(self): return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions) class SkeletonHierarchy(GridIndex): grid = SkeletonGrid def __init__(self, ds, dataset_type="skeleton"): self.dataset_type = dataset_type self.dataset = weakref.proxy(ds) # for now, the index file is the dataset! self.index_filename = self.dataset.parameter_filename self.directory = os.path.dirname(self.index_filename) # float type for the simulation edges and must be float64 now self.float_type = np.float64 super().__init__(ds, dataset_type) def _detect_output_fields(self): # This needs to set a self.field_list that contains all the available, # on-disk fields. No derived fields should be defined here. # NOTE: Each should be a tuple, where the first element is the on-disk # fluid type or particle type. Convention suggests that the on-disk # fluid type is usually the dataset_type and the on-disk particle type # (for a single population of particles) is "io". pass def _count_grids(self): # This needs to set self.num_grids (int) pass def _parse_index(self): # This needs to fill the following arrays, where N is self.num_grids: # self.grid_left_edge (N, 3) <= float64 # self.grid_right_edge (N, 3) <= float64 # self.grid_dimensions (N, 3) <= int # self.grid_particle_count (N, 1) <= int # self.grid_levels (N, 1) <= int # self.grids (N, 1) <= grid objects # self.max_level = self.grid_levels.max() pass def _populate_grid_objects(self): # the minimal form of this method is # # for g in self.grids: # g._prepare_grid() # g._setup_dx() # # This must also set: # g.Children <= list of child grids # g.Parent <= parent grid # This is handled by the frontend because often the children must be identified. pass class SkeletonDataset(Dataset): _index_class = SkeletonHierarchy _field_info_class = SkeletonFieldInfo def __init__( self, filename, dataset_type="skeleton", storage_filename=None, units_override=None, unit_system="cgs", default_species_fields=None, ): self.fluid_types += ("skeleton",) super().__init__( filename, dataset_type, units_override=units_override, unit_system=unit_system, default_species_fields=default_species_fields, ) self.storage_filename = storage_filename # refinement factor between a grid and its subgrid # self.refine_by = 2 def _set_code_unit_attributes(self): # This is where quantities are created that represent the various # on-disk units. These are the currently available quantities which # should be set, along with examples of how to set them to standard # values. # # self.length_unit = self.quan(1.0, "cm") # self.mass_unit = self.quan(1.0, "g") # self.time_unit = self.quan(1.0, "s") # self.time_unit = self.quan(1.0, "s") # # These can also be set: # self.velocity_unit = self.quan(1.0, "cm/s") # self.magnetic_unit = self.quan(1.0, "gauss") # this minimalistic implementation fills the requirements for # this frontend to run, change it to make it run _correctly_ ! for key, unit in self.__class__.default_units.items(): setdefaultattr(self, key, self.quan(1, unit)) def _parse_parameter_file(self): # This needs to set up the following items. Note that these are all # assumed to be in code units; domain_left_edge and domain_right_edge # will be converted to YTArray automatically at a later time. # This includes the cosmological parameters. # # self.unique_identifier <= unique identifier for the dataset # being read (e.g., UUID or ST_CTIME) # self.parameters <= dict full of code-specific items of use # self.domain_left_edge <= three-element array of float64 # self.domain_right_edge <= three-element array of float64 # self.dimensionality <= int # self.domain_dimensions <= three-element array of int64 # self.periodicity <= three-element tuple of booleans # self.current_time <= simulation time in code units (float) # # We also set up cosmological information. Set these to zero if # non-cosmological. # # self.cosmological_simulation <= int, 0 or 1 # self.current_redshift <= float # self.omega_lambda <= float # self.omega_matter <= float # self.hubble_constant <= float # optional (the following have default implementations) # self.unique_identifier <= unique identifier for the dataset # being read (e.g., UUID or ST_CTIME) (int) # # self.geometry <= a lower case string # ("cartesian", "polar", "cylindrical"...) # (defaults to 'cartesian') # this attribute is required. # Change this value to a constant 0 if time is not relevant to your dataset. # Otherwise, parse its value in any appropriate fashion. self.current_time = -1 # required. Change this if need be. self.cosmological_simulation = 0 @classmethod def _is_valid(cls, filename, *args, **kwargs): # This accepts a filename or a set of arguments and returns True or # False depending on if the file is of the type requested. # # The functionality in this method should be unique enough that it can # differentiate the frontend from others. Sometimes this means looking # for specific fields or attributes in the dataset in addition to # looking at the file name or extension. return False
py
b414898edad82e78fcfc6e5ec0f77f4d038c0002
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from nova import db from nova.objects import service from nova.openstack.common import timeutils from nova.tests.objects import test_compute_node from nova.tests.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) fake_service = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'host': 'fake-host', 'binary': 'fake-service', 'topic': 'fake-service-topic', 'report_count': 1, 'disabled': False, 'disabled_reason': None, } def compare(obj, db_obj): allow_missing = ('availability_zone', 'compute_node') for key in obj.fields: if key in allow_missing and not obj.obj_attr_is_set(key): continue obj_val = obj[key] if isinstance(obj_val, datetime.datetime): obj_val = obj_val.replace(tzinfo=None) db_val = db_obj[key] assert db_val == obj_val, '%s != %s' % (db_val, obj_val) class _TestServiceObject(object): def _test_query(self, db_method, obj_method, *args, **kwargs): self.mox.StubOutWithMock(db, db_method) getattr(db, db_method)(self.context, *args, **kwargs).AndReturn( fake_service) self.mox.ReplayAll() obj = getattr(service.Service, obj_method)(self.context, *args, **kwargs) compare(obj, fake_service) def test_get_by_id(self): self._test_query('service_get', 'get_by_id', 123) def test_get_by_host_and_topic(self): self._test_query('service_get_by_host_and_topic', 'get_by_host_and_topic', 'fake-host', 'fake-topic') def test_get_by_compute_host(self): self._test_query('service_get_by_compute_host', 'get_by_compute_host', 'fake-host') def test_get_by_args(self): self._test_query('service_get_by_args', 'get_by_args', 'fake-host', 'fake-service') def test_with_compute_node(self): self.mox.StubOutWithMock(db, 'service_get') self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id') _fake_service = dict( fake_service, compute_node=[test_compute_node.fake_compute_node]) db.service_get(self.context, 123).AndReturn(_fake_service) self.mox.ReplayAll() service_obj = service.Service.get_by_id(self.context, 123) self.assertTrue(service_obj.obj_attr_is_set('compute_node')) compare(service_obj.compute_node, test_compute_node.fake_compute_node) def test_create(self): self.mox.StubOutWithMock(db, 'service_create') db.service_create(self.context, {'host': 'fake-host'}).AndReturn( fake_service) self.mox.ReplayAll() service_obj = service.Service() service_obj.host = 'fake-host' service_obj.create(self.context) self.assertEqual(fake_service['id'], service_obj.id) def test_save(self): self.mox.StubOutWithMock(db, 'service_update') db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn( fake_service) self.mox.ReplayAll() service_obj = service.Service() service_obj.id = 123 service_obj.host = 'fake-host' service_obj.save(self.context) def _test_destroy(self): self.mox.StubOutWithMock(db, 'service_destroy') db.service_destroy(self.context, 123) self.mox.ReplayAll() service_obj = service.Service() service_obj.id = 123 service_obj.destroy(self.context) def test_destroy(self): # The test harness needs db.service_destroy to work, # so avoid leaving it broken here after we're done orig_service_destroy = db.service_destroy try: self._test_destroy() finally: db.service_destroy = orig_service_destroy def test_get_by_topic(self): self.mox.StubOutWithMock(db, 'service_get_all_by_topic') db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_topic(self.context, 'fake-topic') self.assertEqual(1, len(services)) compare(services[0], fake_service) def test_get_by_host(self): self.mox.StubOutWithMock(db, 'service_get_all_by_host') db.service_get_all_by_host(self.context, 'fake-host').AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_by_host(self.context, 'fake-host') self.assertEqual(1, len(services)) compare(services[0], fake_service) def test_get_all(self): self.mox.StubOutWithMock(db, 'service_get_all') db.service_get_all(self.context, disabled=False).AndReturn( [fake_service]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, disabled=False) self.assertEqual(1, len(services)) compare(services[0], fake_service) def test_get_all_with_az(self): self.mox.StubOutWithMock(db, 'service_get_all') self.mox.StubOutWithMock(db, 'aggregate_host_get_by_metadata_key') db.service_get_all(self.context, disabled=None).AndReturn( [dict(fake_service, topic='compute')]) db.aggregate_host_get_by_metadata_key( self.context, key='availability_zone').AndReturn( {fake_service['host']: ['test-az']}) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, set_zones=True) self.assertEqual(1, len(services)) self.assertEqual('test-az', services[0].availability_zone) def test_compute_node(self): self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id') db.compute_node_get_by_service_id(self.context, 123).AndReturn( test_compute_node.fake_compute_node) self.mox.ReplayAll() service_obj = service.Service() service_obj._context = self.context service_obj.id = 123 compare(service_obj.compute_node, test_compute_node.fake_compute_node) # Make sure it doesn't re-fetch this service_obj.compute_node class TestServiceObject(test_objects._LocalTest, _TestServiceObject): pass class TestRemoteServiceObject(test_objects._RemoteTest, _TestServiceObject): pass
py
b41489d9651ef0bad67bf3edc8b885bacd49006c
import unittest from collections import defaultdict from pathlib import Path from typing import Sequence import numpy as np import pytest from pydicom import dcmread from pydicom.data import get_testdata_files from pydicom.sr.codedict import codes from pydicom.sr.coding import Code from pydicom.uid import ( JPEG2000Lossless, JPEGLSLossless, ) from highdicom.content import ( PlanePositionSequence, PixelMeasuresSequence, PlaneOrientationSequence, ) from highdicom.enum import ContentQualificationValues, CoordinateSystemNames from highdicom.pm.content import RealWorldValueMapping from highdicom.pm.enum import ( DerivedPixelContrastValues, ImageFlavorValues, ) from highdicom.pm.sop import ParametricMap from highdicom.uid import UID class TestRealWorldValueMapping(unittest.TestCase): def setUp(self): super().setUp() def test_failed_construction_missing_or_unnecessary_parameters(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits value_range = [0, 255] lut_data = [v**2 for v in range(256)] intercept = 0 slope = 1 with pytest.raises(TypeError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, ) with pytest.raises(TypeError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, slope=slope ) with pytest.raises(TypeError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, slope=slope, intercept=intercept, lut_data=lut_data ) with pytest.raises(TypeError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, slope=slope, lut_data=lut_data ) with pytest.raises(TypeError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, intercept=intercept, lut_data=lut_data ) def test_construction_integer_linear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits value_range = [0, 255] intercept = 0 slope = 1 quantity_definition = Code('130402', 'DCM', 'Class activation') m = RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, intercept=intercept, slope=slope, quantity_definition=quantity_definition ) assert m.LUTLabel == lut_label assert m.LUTExplanation == lut_explanation assert isinstance(m.RealWorldValueSlope, float) assert m.RealWorldValueSlope == float(slope) assert isinstance(m.RealWorldValueIntercept, float) assert m.RealWorldValueIntercept == float(intercept) assert m.MeasurementUnitsCodeSequence[0] == unit assert isinstance(m.RealWorldValueFirstValueMapped, int) assert m.RealWorldValueFirstValueMapped == value_range[0] assert isinstance(m.RealWorldValueLastValueMapped, int) assert m.RealWorldValueLastValueMapped == value_range[1] with pytest.raises(AttributeError): m.DoubleFloatRealWorldValueFirstValueMapped with pytest.raises(AttributeError): m.DoubleFloatRealWorldValueLastValueMapped with pytest.raises(AttributeError): m.RealWorldValueLUTData assert len(m.QuantityDefinitionSequence) == 1 quantity_item = m.QuantityDefinitionSequence[0] assert quantity_item.name == codes.SCT.Quantity assert quantity_item.value == quantity_definition def test_construction_integer_nonlinear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits value_range = [0, 255] lut_data = [v**2 for v in range(256)] m = RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, lut_data=lut_data ) assert m.LUTLabel == lut_label assert m.LUTExplanation == lut_explanation assert len(m.RealWorldValueLUTData) == len(lut_data) assert isinstance(m.RealWorldValueLUTData[0], float) assert m.MeasurementUnitsCodeSequence[0] == unit assert isinstance(m.RealWorldValueFirstValueMapped, int) assert m.RealWorldValueFirstValueMapped == value_range[0] assert isinstance(m.RealWorldValueLastValueMapped, int) assert m.RealWorldValueLastValueMapped == value_range[1] with pytest.raises(AttributeError): m.DoubleFloatRealWorldValueFirstValueMapped with pytest.raises(AttributeError): m.DoubleFloatRealWorldValueLastValueMapped with pytest.raises(AttributeError): m.RealWorldValueSlope with pytest.raises(AttributeError): m.RealWorldValueIntercept def test_construction_floating_point_linear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits value_range = [0.0, 1.0] intercept = 0 slope = 1 m = RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, intercept=intercept, slope=slope ) assert m.LUTLabel == lut_label assert m.LUTExplanation == lut_explanation assert isinstance(m.RealWorldValueSlope, float) assert m.RealWorldValueSlope == float(slope) assert isinstance(m.RealWorldValueIntercept, float) assert m.RealWorldValueIntercept == float(intercept) assert m.MeasurementUnitsCodeSequence[0] == unit assert isinstance(m.DoubleFloatRealWorldValueFirstValueMapped, float) assert m.DoubleFloatRealWorldValueFirstValueMapped == value_range[0] assert isinstance(m.DoubleFloatRealWorldValueLastValueMapped, float) assert m.DoubleFloatRealWorldValueLastValueMapped == value_range[1] with pytest.raises(AttributeError): m.RealWorldValueFirstValueMapped with pytest.raises(AttributeError): m.RealWorldValueLastValueMapped with pytest.raises(AttributeError): m.RealWorldValueLUTData def test_failed_construction_floating_point_nonlinear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits value_range = [0.0, 1.0] lut_data = [ v**2 for v in np.arange(value_range[0], value_range[1], 0.1) ] with pytest.raises(ValueError): RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, unit=unit, value_range=value_range, lut_data=lut_data ) class TestParametricMap(unittest.TestCase): def setUp(self): super().setUp() file_path = Path(__file__) data_dir = file_path.parent.parent.joinpath('data') self._series_instance_uid = UID() self._series_number = 1 self._sop_instance_uid = UID() self._instance_number = 1 self._manufacturer = 'MyManufacturer' self._manufacturer_model_name = 'MyModel' self._software_versions = 'v1.0' self._device_serial_number = '1-2-3' self._content_description = 'Test Parametric Map' self._content_creator_name = 'Will^I^Am' self._ct_image = dcmread( str(data_dir.joinpath('test_files', 'ct_image.dcm')) ) self._sm_image = dcmread( str(data_dir.joinpath('test_files', 'sm_image.dcm')) ) ct_series = [ dcmread(f) for f in get_testdata_files('dicomdirtests/77654033/CT2/*') ] self._ct_series = sorted( ct_series, key=lambda x: x.ImagePositionPatient[2] ) @staticmethod def check_dimension_index_vals(seg): # Function to apply some checks (necessary but not sufficient for # correctness) to ensure that the dimension indices are correct is_patient_coord_system = hasattr( seg.PerFrameFunctionalGroupsSequence[0], 'PlanePositionSequence' ) if is_patient_coord_system: # Build up the mapping from index to value index_mapping = defaultdict(list) for f in seg.PerFrameFunctionalGroupsSequence: posn_index = f.FrameContentSequence[0].DimensionIndexValues[1] # This is not general, but all the tests run here use axial # images so just check the z coordinate posn_val = f.PlanePositionSequence[0].ImagePositionPatient[2] index_mapping[posn_index].append(posn_val) # Check that each index value found references a unique value for values in index_mapping.values(): assert [v == values[0] for v in values] # Check that the indices are monotonically increasing from 1 expected_keys = range(1, len(index_mapping) + 1) assert set(index_mapping.keys()) == set(expected_keys) # Check that values are sorted old_v = float('-inf') for k in expected_keys: assert index_mapping[k][0] > old_v old_v = index_mapping[k][0] else: # Build up the mapping from index to value for dim_kw, dim_ind in zip([ 'ColumnPositionInTotalImagePixelMatrix', 'RowPositionInTotalImagePixelMatrix' ], [1, 2]): index_mapping = defaultdict(list) for f in seg.PerFrameFunctionalGroupsSequence: content_item = f.FrameContentSequence[0] posn_index = content_item.DimensionIndexValues[dim_ind] # This is not general, but all the tests run here use axial # images so just check the z coordinate posn_item = f.PlanePositionSlideSequence[0] posn_val = getattr(posn_item, dim_kw) index_mapping[posn_index].append(posn_val) # Check that each index value found references a unique value for values in index_mapping.values(): assert [v == values[0] for v in values] # Check that the indices are monotonically increasing from 1 expected_keys = range(1, len(index_mapping) + 1) assert set(index_mapping.keys()) == set(expected_keys) # Check that values are sorted old_v = float('-inf') for k in expected_keys: assert index_mapping[k][0] > old_v old_v = index_mapping[k][0] def test_multi_frame_sm_image_single_native(self): pixel_array = np.random.random(self._sm_image.pixel_array.shape[:3]) pixel_array = pixel_array.astype(np.float32) window_center = 0.5 window_width = 1.0 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0.0, 1.0], intercept=0, slope=1 ) content_label = 'MY_MAP' pmap = ParametricMap( [self._sm_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, content_label=content_label ) assert pmap.SOPClassUID == '1.2.840.10008.5.1.4.1.1.30' assert pmap.SOPInstanceUID == self._sop_instance_uid assert pmap.SeriesInstanceUID == self._series_instance_uid assert pmap.SeriesNumber == self._series_number assert pmap.Manufacturer == self._manufacturer assert pmap.ManufacturerModelName == self._manufacturer_model_name assert pmap.SoftwareVersions == self._software_versions assert pmap.DeviceSerialNumber == self._device_serial_number assert pmap.StudyInstanceUID == self._sm_image.StudyInstanceUID assert pmap.PatientID == self._sm_image.PatientID assert pmap.ContentLabel == content_label assert pmap.TotalPixelMatrixRows == \ self._sm_image.TotalPixelMatrixRows assert pmap.TotalPixelMatrixColumns == \ self._sm_image.TotalPixelMatrixColumns assert pmap.TotalPixelMatrixOriginSequence == \ self._sm_image.TotalPixelMatrixOriginSequence assert np.array_equal(pmap.pixel_array, pixel_array) assert isinstance(pmap.AcquisitionContextSequence, Sequence) assert pmap.ContentQualification == 'RESEARCH' assert pmap.ImageType[0] == 'DERIVED' assert pmap.ImageType[1] == 'PRIMARY' assert pmap.ImageType[2] == 'VOLUME' assert pmap.ImageType[3] == 'QUANTITY' sffg_item = pmap.SharedFunctionalGroupsSequence[0] voi_lut_item = sffg_item.FrameVOILUTSequence[0] assert voi_lut_item.WindowCenter == str(window_center) assert voi_lut_item.WindowWidth == str(window_width) def test_multi_frame_sm_image_ushort_native(self): pixel_array = np.random.randint( low=0, high=2**8, size=self._sm_image.pixel_array.shape[:3], dtype=np.uint8 ) window_center = 128 window_width = 256 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 255], intercept=0, slope=1 ) instance = ParametricMap( [self._sm_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, content_qualification=ContentQualificationValues.SERVICE, image_flavor=ImageFlavorValues.WHOLE_BODY, derived_pixel_contrast=DerivedPixelContrastValues.NONE ) sffg_item = instance.SharedFunctionalGroupsSequence[0] assert hasattr(sffg_item, 'RealWorldValueMappingSequence') assert len(sffg_item.RealWorldValueMappingSequence) == 1 pffg_item = instance.PerFrameFunctionalGroupsSequence[0] assert not hasattr(pffg_item, 'RealWorldValueMappingSequence') assert instance.BitsAllocated == 8 assert instance.pixel_array.dtype == np.uint8 assert np.array_equal(instance.pixel_array, pixel_array) assert instance.ContentQualification == 'SERVICE' assert instance.ImageType[0] == 'DERIVED' assert instance.ImageType[1] == 'PRIMARY' assert instance.ImageType[2] == 'WHOLE_BODY' assert instance.ImageType[3] == 'NONE' def test_multi_frame_sm_image_ushort_encapsulated_jpeg2000(self): pixel_array = np.random.randint( low=0, high=2**8, size=self._sm_image.pixel_array.shape[:3], dtype=np.uint8 ) window_center = 128 window_width = 256 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 255], intercept=0, slope=1 ) pmap = ParametricMap( [self._sm_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, transfer_syntax_uid=JPEG2000Lossless ) assert pmap.BitsAllocated == 8 assert np.array_equal(pmap.pixel_array, pixel_array) def test_multi_frame_sm_image_ushort_encapsulated_jpegls(self): pixel_array = np.random.randint( low=0, high=2**8, size=self._sm_image.pixel_array.shape[:3], dtype=np.uint16 ) window_center = 128 window_width = 256 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 255], intercept=0, slope=1 ) pmap = ParametricMap( [self._sm_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, transfer_syntax_uid=JPEGLSLossless ) assert pmap.BitsAllocated == 16 assert np.array_equal(pmap.pixel_array, pixel_array) def test_single_frame_ct_image_double(self): pixel_array = np.random.uniform(-1, 1, self._ct_image.pixel_array.shape) window_center = 0.0 window_width = 2.0 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[-1, 1], intercept=0, slope=1 ) pmap = ParametricMap( [self._ct_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, ) assert pmap.BitsAllocated == 64 assert np.array_equal(pmap.pixel_array, pixel_array) def test_single_frame_ct_image_ushort_native(self): pixel_array = np.random.randint( low=0, high=2**12, size=self._ct_image.pixel_array.shape, dtype=np.uint16 ) window_center = 2**12 / 2.0 window_width = 2**12 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 4095], intercept=0, slope=1 ) pmap = ParametricMap( [self._ct_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, ) assert pmap.BitsAllocated == 16 assert np.array_equal(pmap.pixel_array, pixel_array) def test_single_frame_ct_image_ushort(self): pixel_array = np.random.randint( low=120, high=24000, size=self._ct_image.pixel_array.shape, dtype=np.uint16 ) window_center = 2**16 / 2 window_width = 2**16 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 2**16], intercept=-1, slope=2. / (2**16 - 1) ) pmap = ParametricMap( [self._ct_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, ) retrieved_pixel_array = pmap.pixel_array shared_fg = pmap.SharedFunctionalGroupsSequence[0] transformation = shared_fg.PixelValueTransformationSequence[0] slope = transformation.RescaleSlope intercept = transformation.RescaleIntercept rescaled_pixel_array = ( retrieved_pixel_array.astype(float) * float(slope) + float(intercept) ).astype(np.int16) assert np.array_equal(rescaled_pixel_array, pixel_array) def test_series_single_frame_ct_image_single(self): size = (len(self._ct_series), ) + self._ct_series[0].pixel_array.shape pixel_array = np.random.uniform(-1, 1, size) pixel_array = pixel_array.astype(np.float32) window_center = 0.0 window_width = 2.0 real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[-1, 1], intercept=0, slope=1 ) pmap = ParametricMap( self._ct_series, pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, ) assert np.array_equal(pmap.pixel_array, pixel_array) def test_multi_frame_sm_image_with_spatial_positions_not_preserved(self): pixel_array = np.random.randint( low=0, high=2**8, size=self._sm_image.pixel_array.shape[:3], dtype=np.uint8 ) window_center = 128 window_width = 256 pixel_spacing = (0.5, 0.5) slice_thickness = 0.3 pixel_measures = PixelMeasuresSequence( pixel_spacing=pixel_spacing, slice_thickness=slice_thickness ) image_orientation = (-1.0, 0.0, 0.0, 0.0, -1.0, 0.0) plane_orientation = PlaneOrientationSequence( coordinate_system=CoordinateSystemNames.SLIDE, image_orientation=image_orientation ) plane_positions = [ PlanePositionSequence( coordinate_system=CoordinateSystemNames.SLIDE, image_position=(i * 1.0, i * 1.0, 1.0), pixel_matrix_position=(i * 1, i * 1) ) for i in range(self._sm_image.pixel_array.shape[0]) ] real_world_value_mapping = RealWorldValueMapping( lut_label='1', lut_explanation='feature_001', unit=codes.UCUM.NoUnits, value_range=[0, 255], intercept=0, slope=1 ) instance = ParametricMap( [self._sm_image], pixel_array, self._series_instance_uid, self._series_number, self._sop_instance_uid, self._instance_number, self._manufacturer, self._manufacturer_model_name, self._software_versions, self._device_serial_number, contains_recognizable_visual_features=False, real_world_value_mappings=[real_world_value_mapping], window_center=window_center, window_width=window_width, pixel_measures=pixel_measures, plane_orientation=plane_orientation, plane_positions=plane_positions ) assert instance.pixel_array.dtype == np.uint8 assert instance.BitsAllocated == 8 shared_item = instance.SharedFunctionalGroupsSequence[0] assert len(shared_item.PixelMeasuresSequence) == 1 pm_item = shared_item.PixelMeasuresSequence[0] assert pm_item.PixelSpacing == list(pixel_spacing) assert len(shared_item.PlaneOrientationSequence) == 1 po_item = shared_item.PlaneOrientationSequence[0] assert po_item.ImageOrientationSlide == list(image_orientation) self.check_dimension_index_vals(instance)
py
b4148b4a846c64ed9c965a147b7592d25acce754
from imageai.Detection import ObjectDetection import os execution_path = os.getcwd() detector = ObjectDetection() detector.setModelTypeAsRetinaNet() detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.1.0.h5")) detector.loadModel() detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , "image.jpg"), output_image_path=os.path.join(execution_path , "imagenew.jpg")) for eachObject in detections: print(eachObject["name"] , " : " , eachObject["percentage_probability"] )
py
b4148b82caffcb3d401203b514031ef55ddaf4b5
#DATOS DE ENTRADA ANIMAL= int(input("¿De cual animal quiere conocer la caracteristicas? 1.Leon 2.Ballena 3.Tucan? ")) class Animal: def __init__(self, ANIMAL): self.ANIMAL = ANIMAL def acciones_comun(): comun = "Comer" return comun def sentido_vista(): vista = "Puede ver" return vista class Animal_Tierra: def acciones_Tierra(): Tierra = "camina en cuatro patas" return Tierra class Animal_Agua: def acciones_Agua(): return "Nada bajo el agua" class Animal_Aire (Animal): def acciones_Aire(): return "Vuela" class Leon (Animal, Animal_Tierra): def llamar(): caracteristicas = () return caracteristicas class Ballena(Animal, Animal_Agua): def llamar(): caracteristicas = () return caracteristicas class Tucan(Animal, Animal_Aire): def llamar(): caracteristicas = () return caracteristicas if ANIMAL == 1 : print ("debe imprimir las caracteristicas del leon, el leon es clase hija de animal y debe agragar animal_tierra" ) elif ANIMAL == 2 : print ("lo mismo que el leon, pero con la ballena") elif ANIMAL == 3 : print("Lo mismo pero con el tucan")
py
b4148c5a86bc3a0cb9aa5fcb037eaadc0213aaa1
import os from time import time as timestamp from multiprocessing import current_process from .abstract_writer import AbstractWriter from .helpers import gen_filename, time, add_value_wrapper from ..helpers import concurrent class FileWriter(AbstractWriter): def __init__(self, **kwargs): AbstractWriter.__init__(self, **kwargs) self.filename = gen_filename() self.init_time = timestamp() @property def wall_time(self): return timestamp() - self.init_time def _run(self): AbstractWriter._run(self) self.write_to_disc() @concurrent def write_to_disc(self): if 'text' in self.data: try: with open(os.path.join(self.output_dir, self.filename + '.log'), 'a') as f: for line in self.data['text']: f.write(line + '\n') except Exception as e: self.logger.error(f'[{self}] > Error while writing to {os.path.join(self.output_dir, self.filename + ".log")}') if self.debug: print(str(e)) self.data.clear() def fixed_prefix(self, key): return f'[{key}] {time()} > ' @concurrent @add_value_wrapper def add_text(self, key, value): if 'text' not in self.data: self.data['text'] = self.manager.list() message = self.fixed_prefix(key) + value self.data['text'].append(message) return message @concurrent @add_value_wrapper def add_scalar(self, key, value, step): raise NotImplementedError(f'{self} only supports add_text') @concurrent @add_value_wrapper def add_histogram(self, key, value, step): raise NotImplementedError(f'{self} only supports add_text') @concurrent @add_value_wrapper def add_image(self, key, value, step): raise NotImplementedError(f'{self} only supports add_text') @concurrent @add_value_wrapper def add_scalars(self, key, value, step): raise NotImplementedError(f'{self} only supports add_text') @concurrent @add_value_wrapper def add_array(self, key, value, step): raise NotImplementedError(f'{self} only supports add_text') def __repr__(self): return 'FileWriter' def close(self): AbstractWriter.close(self) if current_process().name == 'MainProcess' or self.scope != 'root': self.write_to_disc() self.logger.info(f'{self} closed')
py
b4148d45edc68a445c8d9ceb3d68d9172a2f9d2d
""" Django settings for covid project. Generated by 'django-admin startproject' using Django 3.2. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path from decouple import config import os import django_heroku # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-9p)*17!k(m*0p)e1_=cn_a=kn))r&wmx0ca8l-ru1bijpd3116' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'corsheaders', 'rest_framework', 'rest_framework.authtoken', 'rest_auth', 'django.contrib.sites', 'allauth', 'allauth.account', 'rest_auth.registration', 'drf_yasg', 'import_export', 'core', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'covid.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'covid.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = "Asia/Kolkata" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = "/static/" STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")] MEDIA_URL = "/media/" MEDIA_ROOT = os.path.join(BASE_DIR, "media") STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ] PROJECT_DIR = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = os.path.join(PROJECT_DIR, "static") # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' CORS_ORIGIN_ALLOW_ALL = True SITE_ID = 1 ACCOUNT_UNIQUE_EMAIL = False ACCOUNT_EMAIL_REQUIRED = False ACCOUNT_AUTHENTICATION_METHOD = "username" ACCOUNT_EMAIL_VERIFICATION = "none" ACCOUNT_USERNAME_REQUIRED = True REST_FRAMEWORK = { "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.AllowAny",), "DEFAULT_AUTHENTICATION_CLASSES": ( "rest_framework.authentication.TokenAuthentication", "rest_framework.authentication.SessionAuthentication", ), "DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"], "DATETIME_FORMAT": "%b %d %Y %H:%M:%S", } CSRF_COOKIE_NAME = "csrftoken" EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" EMAIL_URL = os.environ.get("EMAIL_URL") SENDGRID_USERNAME = os.environ.get("SENDGRID_USERNAME") SENDGRID_PASSWORD = os.environ.get("SENDGRID_PASSWORD") if not EMAIL_URL and SENDGRID_USERNAME and SENDGRID_PASSWORD: EMAIL_URL = "smtp://%s:%[email protected]:587/?tls=True" % ( SENDGRID_USERNAME, SENDGRID_PASSWORD, ) CORS_ALLOW_ALL_ORIGINS = True django_heroku.settings(locals())
py
b4148d46a4e84486c3329df76b2c3daf1d6b75c3
"""Value and Q-Functions parametrized with Neural Networks.""" import torch import torch.jit from rllib.util.neural_networks import DeterministicNN, one_hot_encode from .abstract_value_function import AbstractQFunction, AbstractValueFunction class NNValueFunction(AbstractValueFunction): """Implementation of a Value Function implemented with a Neural Network. Parameters ---------- layers: list, optional (default=No layers). width of layers, each layer is connected with a non-linearity. biased_head: bool, optional (default=True). flag that indicates if head of NN has a bias term or not. non_linearity: string, optional (default=Tanh). Neural Network non-linearity. input_transform: nn.Module, optional (default=None). Module with which to transform inputs. """ def __init__( self, layers=(200, 200), biased_head=True, non_linearity="Tanh", input_transform=None, *args, **kwargs, ): super().__init__(*args, **kwargs) if self.discrete_state: num_inputs = (self.num_states,) else: num_inputs = self.dim_state self.input_transform = input_transform if hasattr(input_transform, "extra_dim"): assert len(num_inputs) == 1, "Only implemented N x 1 inputs." num_inputs = (num_inputs[0] + getattr(input_transform, "extra_dim"),) self.nn = DeterministicNN( num_inputs, (1,), layers=layers, squashed_output=False, non_linearity=non_linearity, biased_head=biased_head, ) self.dimension = self.nn.embedding_dim @classmethod def default(cls, environment, *args, **kwargs): """See AbstractValueFunction.default.""" return super().default(environment, *args, **kwargs) @classmethod def from_other(cls, other, copy=True): """Create new Value Function from another Value Function.""" new = cls( dim_state=other.dim_state, num_states=other.num_states, tau=other.tau, input_transform=other.input_transform, ) new.nn = other.nn.__class__.from_other(other.nn, copy=copy) return new @classmethod def from_nn(cls, module, dim_state, num_states=-1, tau=0.0, input_transform=None): """Create new Value Function from a Neural Network Implementation.""" new = cls( dim_state=dim_state, num_states=num_states, tau=tau, input_transform=input_transform, ) new.nn = module return new def forward(self, state, action=torch.tensor(float("nan"))): """Get value of the value-function at a given state.""" if self.input_transform is not None: state = self.input_transform(state) if self.discrete_state: state = one_hot_encode(state, self.num_states) return self.nn(state).squeeze(-1) @torch.jit.export def embeddings(self, state): """Get embeddings of the value-function at a given state.""" if self.discrete_state: state = one_hot_encode(state, self.num_states) return self.nn.last_layer_embeddings(state).squeeze(-1) class NNQFunction(AbstractQFunction): """Implementation of a Q-Function implemented with a Neural Network. Parameters ---------- layers: list, optional (default=No layers). width of layers, each layer is connected with a non-linearity. biased_head: bool, optional (default=True). flag that indicates if head of NN has a bias term or not. non_linearity: string, optional (default=Tanh). Neural Network non-linearity. input_transform: nn.Module, optional (default=None). Module with which to transform inputs. Other Parameters ---------------- See AbstractQFunction. """ def __init__( self, layers=(200, 200), biased_head=True, non_linearity="Tanh", input_transform=None, *args, **kwargs, ): super().__init__(*args, **kwargs) if not self.discrete_state and not self.discrete_action: num_inputs = (self.dim_state[0] + self.dim_action[0],) num_outputs = (1,) elif self.discrete_state and self.discrete_action: num_inputs = (self.num_states,) num_outputs = (self.num_actions,) elif not self.discrete_state and self.discrete_action: num_inputs = self.dim_state num_outputs = (self.num_actions,) else: raise NotImplementedError("If states are discrete, so should be actions.") self.input_transform = input_transform if hasattr(input_transform, "extra_dim"): assert len(num_inputs) == 1, "Only implemented N x 1 inputs." num_inputs = (num_inputs[0] + getattr(input_transform, "extra_dim"),) self.nn = DeterministicNN( num_inputs, num_outputs, layers=layers, non_linearity=non_linearity, biased_head=biased_head, squashed_output=False, ) @classmethod def default(cls, environment, *args, **kwargs): """See AbstractQFunction.default.""" return super().default(environment, *args, **kwargs) @classmethod def from_other(cls, other, copy=True): """Create new Value Function from another Value Function.""" new = cls( dim_state=other.dim_state, dim_action=other.dim_action, num_states=other.num_states, num_actions=other.num_actions, tau=other.tau, input_transform=other.input_transform, ) new.nn = other.nn.__class__.from_other(other.nn, copy=copy) return new @classmethod def from_nn( cls, module, dim_state, dim_action, num_states=-1, num_actions=-1, tau=0.0, input_transform=None, ): """Create new Value Function from a Neural Network Implementation.""" new = cls( dim_state=dim_state, dim_action=dim_action, num_states=num_states, num_actions=num_actions, tau=tau, input_transform=input_transform, ) new.nn = module return new def forward(self, state, action=torch.tensor(float("nan"))): """Get value of the value-function at a given state. Parameters ---------- state: torch.Tensor action: torch.Tensor Returns ------- value: torch.Tensor """ if self.discrete_state: state = one_hot_encode(state, self.num_states) if self.input_transform is not None: state = self.input_transform(state) if torch.isnan(action).all(): if not self.discrete_action: raise NotImplementedError action_value = self.nn(state) return action_value if self.discrete_action: action = action.unsqueeze(-1).long() if action.dim() < state.dim(): resqueeze = True action = action.unsqueeze(0) else: resqueeze = False if not self.discrete_action: state_action = torch.cat((state, action), dim=-1) return self.nn(state_action).squeeze(-1) else: out = self.nn(state).gather(-1, action).squeeze(-1) if resqueeze: return out.squeeze(0) else: return out class DuelingQFunction(NNQFunction): """Dueling Q Function Network. Parameters ---------- average_or_max: str, optional (default="average"). Whether to take the average or the max of the advantage when computing q. Other Parameters ---------------- See NNQFunction. References ---------- Wang, Z., Schaul, T., Hessel, M., Hasselt, H., Lanctot, M., & Freitas, N. (2016). Dueling network architectures for deep reinforcement learning. ICML. """ def __init__(self, average_or_max="average", *args, **kwargs): super().__init__(*args, **kwargs) if not self.discrete_action: raise NotImplementedError("Only Discrete Actions Allowed.") self.average_or_max = average_or_max nn_kwargs = self.nn.kwargs nn_kwargs["out_dim"] = (nn_kwargs["out_dim"][0] + 1,) self.nn = DeterministicNN(**nn_kwargs) def forward(self, state, action=torch.tensor(float("nan"))): """See `NNQFunction.forward()'.""" q_values = super().forward(state) if torch.isnan(action).all(): return q_values[..., 1:] else: value, advantage = q_values[..., 1], q_values[..., 1:] if self.discrete_action: action = action.unsqueeze(-1).long() if action.dim() < state.dim(): resqueeze = True action = action.unsqueeze(0) else: resqueeze = False advantage_action = advantage.gather(-1, action).squeeze(-1) if resqueeze: advantage_action = advantage_action.squeeze(0) if self.average_or_max == "average": advantage_offset = advantage.mean(dim=-1) elif self.average_or_max == "max": advantage_offset = advantage.max(dim=-1)[0] else: raise NotImplementedError("Only average or mean are implemented.") return value + advantage_action - advantage_offset
py
b4148d553f74119c94d89c16a10b36552663d3c9
# -*- coding: utf-8 -*- """ Compute Largest Difference ========================== """ # sphinx_gallery_thumbnail_number = 1 import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from gpdre.benchmarks import CortesDensityRatio from gpdre.metrics import normalized_mean_squared_error from sklearn.svm import SVR from sklearn.datasets import load_diabetes, load_boston from sklearn.preprocessing import MinMaxScaler # %% # constants num_splits = 20 num_seeds = 10 scale = -2.0 model = SVR(kernel="rbf", gamma="scale", C=1.0, epsilon=0.1) feature_scaler = MinMaxScaler() # %% def metric(model, X_train, y_train, X_test, y_test, sample_weight=None): model.fit(X_train, y_train, sample_weight=sample_weight) y_pred = model.predict(X_test) return normalized_mean_squared_error(y_test, y_pred) def make_data(rows): return pd.DataFrame(rows).set_index(["split", "seed"]) def get_metrics_pair(X, y, num_splits=20, num_seeds=10, scale=-2.0): input_dim = X.shape[-1] rows_uniform = [] rows_exact = [] for split in range(num_splits): r = CortesDensityRatio(input_dim=input_dim, scale=scale, seed=split) for seed in range(num_seeds): (X_train, y_train), (X_test, y_test) = r.train_test_split(X, y, seed=seed) nmse = metric(model, X_train, y_train, X_test, y_test) rows_uniform.append(dict(split=split, seed=seed, nmse=nmse)) nmse = metric(model, X_train, y_train, X_test, y_test, sample_weight=r.ratio(X_train) + 1e-6) rows_exact.append(dict(split=split, seed=seed, nmse=nmse)) data_uniform = make_data(rows_uniform) data_exact = make_data(rows_exact) return data_uniform, data_exact def get_metrics_diff(data_uniform, data_exact): return data_uniform.sub(data_exact) def get_split_largest_diff(data_diff): data_diff_mean = data_diff.groupby(level="split").mean() return data_diff_mean["nmse"].argmax() # %% dataset = load_boston() X = feature_scaler.fit_transform(dataset.data) y = dataset.target # %% data_uniform, data_exact = get_metrics_pair(X, y, num_splits=num_splits, num_seeds=num_seeds, scale=scale) data = pd.concat([data_uniform.assign(importance="uniform"), data_exact.assign(importance="exact")], axis="index", sort=True).reset_index() # %% g = sns.catplot(x="importance", y="nmse", hue="split", data=data, kind="point", alpha=0.8, dodge=True, join=True, markers="d", scale=0.8, palette="tab20", facet_kws=dict(sharex=False, sharey=False)) # %% data_diff = get_metrics_diff(data_uniform, data_exact) # %% fig, ax = plt.subplots() sns.stripplot(x="split", y="nmse", data=data_diff.reset_index(), palette="tab20", alpha=0.4, zorder=1, ax=ax) sns.pointplot(x="split", y="nmse", data=data_diff.reset_index(), palette="tab20", join=False, ci=None, markers='d', scale=0.75, ax=ax) plt.show() # %% get_split_largest_diff(data_diff)
py
b4148df94268dd02a652081a0183263c583382bf
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class RoutesOperations(object): """RoutesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _delete_initial( self, resource_group_name, # type: str route_table_name, # type: str route_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'routeName': self._serialize.url("route_name", route_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str route_table_name, # type: str route_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified route from a route table. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_table_name: The name of the route table. :type route_table_name: str :param route_name: The name of the route. :type route_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the ARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'routeName': self._serialize.url("route_name", route_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore def get( self, resource_group_name, # type: str route_table_name, # type: str route_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.Route" """Gets the specified route from a route table. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_table_name: The name of the route table. :type route_table_name: str :param route_name: The name of the route. :type route_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Route, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_04_01.models.Route :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'routeName': self._serialize.url("route_name", route_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Route', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore def _create_or_update_initial( self, resource_group_name, # type: str route_table_name, # type: str route_name, # type: str route_parameters, # type: "_models.Route" **kwargs # type: Any ): # type: (...) -> "_models.Route" cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'routeName': self._serialize.url("route_name", route_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(route_parameters, 'Route') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('Route', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Route', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str route_table_name, # type: str route_name, # type: str route_parameters, # type: "_models.Route" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.Route"] """Creates or updates a route in the specified route table. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_table_name: The name of the route table. :type route_table_name: str :param route_name: The name of the route. :type route_name: str :param route_parameters: Parameters supplied to the create or update route operation. :type route_parameters: ~azure.mgmt.network.v2020_04_01.models.Route :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the ARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either Route or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.Route] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, route_parameters=route_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Route', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'routeName': self._serialize.url("route_name", route_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore def list( self, resource_group_name, # type: str route_table_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.RouteListResult"] """Gets all routes in a route table. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_table_name: The name of the route table. :type route_table_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either RouteListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.RouteListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('RouteListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
py
b4148e67be6824e01ba97baea225f230e740a0ba
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'main.ui' # # Created by: PyQt5 UI code generator 5.15.6 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(486, 228) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.layoutWidget = QtWidgets.QWidget(self.centralwidget) self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 446, 184)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout_6.setContentsMargins(0, 0, 0, 0) self.verticalLayout_6.setObjectName("verticalLayout_6") self.verticalLayout_5 = QtWidgets.QVBoxLayout() self.verticalLayout_5.setObjectName("verticalLayout_5") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setMaximumSize(QtCore.QSize(16777215, 47)) font = QtGui.QFont() font.setKerning(False) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout.addWidget(self.label) self.comboBox = QtWidgets.QComboBox(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth()) self.comboBox.setSizePolicy(sizePolicy) self.comboBox.setBaseSize(QtCore.QSize(20, 60)) self.comboBox.setObjectName("comboBox") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.verticalLayout.addWidget(self.comboBox) self.comboBox_2 = QtWidgets.QComboBox(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.comboBox_2.sizePolicy().hasHeightForWidth()) self.comboBox_2.setSizePolicy(sizePolicy) self.comboBox_2.setObjectName("comboBox_2") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.comboBox_2.addItem("") self.verticalLayout.addWidget(self.comboBox_2) self.comboBox_3 = QtWidgets.QComboBox(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.comboBox_3.sizePolicy().hasHeightForWidth()) self.comboBox_3.setSizePolicy(sizePolicy) self.comboBox_3.setObjectName("comboBox_3") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.comboBox_3.addItem("") self.verticalLayout.addWidget(self.comboBox_3) self.comboBox_4 = QtWidgets.QComboBox(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.comboBox_4.sizePolicy().hasHeightForWidth()) self.comboBox_4.setSizePolicy(sizePolicy) self.comboBox_4.setObjectName("comboBox_4") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.comboBox_4.addItem("") self.verticalLayout.addWidget(self.comboBox_4) self.horizontalLayout.addLayout(self.verticalLayout) self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setObjectName("verticalLayout_2") self.label_2 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setKerning(False) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.lineEdit_a1 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_a1.setEnabled(True) self.lineEdit_a1.setObjectName("lineEdit_a1") self.verticalLayout_2.addWidget(self.lineEdit_a1) self.lineEdit_a2 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_a2.setObjectName("lineEdit_a2") self.verticalLayout_2.addWidget(self.lineEdit_a2) self.lineEdit_a3 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_a3.setObjectName("lineEdit_a3") self.verticalLayout_2.addWidget(self.lineEdit_a3) self.lineEdit_a4 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_a4.setObjectName("lineEdit_a4") self.verticalLayout_2.addWidget(self.lineEdit_a4) self.horizontalLayout.addLayout(self.verticalLayout_2) self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_7 = QtWidgets.QLabel(self.layoutWidget) self.label_7.setText("") self.label_7.setObjectName("label_7") self.verticalLayout_3.addWidget(self.label_7) self.label_4 = QtWidgets.QLabel(self.layoutWidget) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.label_3 = QtWidgets.QLabel(self.layoutWidget) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_6 = QtWidgets.QLabel(self.layoutWidget) self.label_6.setObjectName("label_6") self.verticalLayout_3.addWidget(self.label_6) self.label_5 = QtWidgets.QLabel(self.layoutWidget) self.label_5.setObjectName("label_5") self.verticalLayout_3.addWidget(self.label_5) self.horizontalLayout.addLayout(self.verticalLayout_3) self.verticalLayout_4 = QtWidgets.QVBoxLayout() self.verticalLayout_4.setObjectName("verticalLayout_4") self.label_8 = QtWidgets.QLabel(self.layoutWidget) self.label_8.setObjectName("label_8") self.verticalLayout_4.addWidget(self.label_8) self.lineEdit_b1 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_b1.setObjectName("lineEdit_b1") self.verticalLayout_4.addWidget(self.lineEdit_b1) self.lineEdit_b2 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_b2.setObjectName("lineEdit_b2") self.verticalLayout_4.addWidget(self.lineEdit_b2) self.lineEdit_b3 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_b3.setObjectName("lineEdit_b3") self.verticalLayout_4.addWidget(self.lineEdit_b3) self.lineEdit_b4 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_b4.setObjectName("lineEdit_b4") self.verticalLayout_4.addWidget(self.lineEdit_b4) self.horizontalLayout.addLayout(self.verticalLayout_4) self.verticalLayout_5.addLayout(self.horizontalLayout) self.verticalLayout_6.addLayout(self.verticalLayout_5) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.pushButton = QtWidgets.QPushButton(self.layoutWidget) self.pushButton.setObjectName("pushButton") self.horizontalLayout_2.addWidget(self.pushButton) self.pushButton_2 = QtWidgets.QPushButton(self.layoutWidget) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout_2.addWidget(self.pushButton_2) self.verticalLayout_6.addLayout(self.horizontalLayout_2) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "WizardButton")) self.label.setText(_translate("MainWindow", "按键")) self.comboBox.setCurrentText(_translate("MainWindow", "A")) self.comboBox.setItemText(0, _translate("MainWindow", "A")) self.comboBox.setItemText(1, _translate("MainWindow", "B")) self.comboBox.setItemText(2, _translate("MainWindow", "C")) self.comboBox.setItemText(3, _translate("MainWindow", "D")) self.comboBox.setItemText(4, _translate("MainWindow", "E")) self.comboBox.setItemText(5, _translate("MainWindow", "F")) self.comboBox.setItemText(6, _translate("MainWindow", "G")) self.comboBox.setItemText(7, _translate("MainWindow", "H")) self.comboBox.setItemText(8, _translate("MainWindow", "I")) self.comboBox.setItemText(9, _translate("MainWindow", "J")) self.comboBox.setItemText(10, _translate("MainWindow", "K")) self.comboBox.setItemText(11, _translate("MainWindow", "L")) self.comboBox.setItemText(12, _translate("MainWindow", "M")) self.comboBox.setItemText(13, _translate("MainWindow", "N")) self.comboBox.setItemText(14, _translate("MainWindow", "O")) self.comboBox.setItemText(15, _translate("MainWindow", "P")) self.comboBox.setItemText(16, _translate("MainWindow", "Q")) self.comboBox.setItemText(17, _translate("MainWindow", "R")) self.comboBox.setItemText(18, _translate("MainWindow", "S")) self.comboBox.setItemText(19, _translate("MainWindow", "T")) self.comboBox.setItemText(20, _translate("MainWindow", "U")) self.comboBox.setItemText(21, _translate("MainWindow", "V")) self.comboBox.setItemText(22, _translate("MainWindow", "W")) self.comboBox.setItemText(23, _translate("MainWindow", "X")) self.comboBox.setItemText(24, _translate("MainWindow", "Y")) self.comboBox.setItemText(25, _translate("MainWindow", "Z")) self.comboBox.setItemText(26, _translate("MainWindow", "1")) self.comboBox.setItemText(27, _translate("MainWindow", "2")) self.comboBox.setItemText(28, _translate("MainWindow", "3")) self.comboBox.setItemText(29, _translate("MainWindow", "4")) self.comboBox.setItemText(30, _translate("MainWindow", "5")) self.comboBox.setItemText(31, _translate("MainWindow", "6")) self.comboBox.setItemText(32, _translate("MainWindow", "7")) self.comboBox.setItemText(33, _translate("MainWindow", "8")) self.comboBox.setItemText(34, _translate("MainWindow", "9")) self.comboBox.setItemText(35, _translate("MainWindow", "0")) self.comboBox_2.setCurrentText(_translate("MainWindow", "A")) self.comboBox_2.setItemText(0, _translate("MainWindow", "A")) self.comboBox_2.setItemText(1, _translate("MainWindow", "B")) self.comboBox_2.setItemText(2, _translate("MainWindow", "C")) self.comboBox_2.setItemText(3, _translate("MainWindow", "D")) self.comboBox_2.setItemText(4, _translate("MainWindow", "E")) self.comboBox_2.setItemText(5, _translate("MainWindow", "F")) self.comboBox_2.setItemText(6, _translate("MainWindow", "G")) self.comboBox_2.setItemText(7, _translate("MainWindow", "H")) self.comboBox_2.setItemText(8, _translate("MainWindow", "I")) self.comboBox_2.setItemText(9, _translate("MainWindow", "J")) self.comboBox_2.setItemText(10, _translate("MainWindow", "K")) self.comboBox_2.setItemText(11, _translate("MainWindow", "L")) self.comboBox_2.setItemText(12, _translate("MainWindow", "M")) self.comboBox_2.setItemText(13, _translate("MainWindow", "N")) self.comboBox_2.setItemText(14, _translate("MainWindow", "O")) self.comboBox_2.setItemText(15, _translate("MainWindow", "P")) self.comboBox_2.setItemText(16, _translate("MainWindow", "Q")) self.comboBox_2.setItemText(17, _translate("MainWindow", "R")) self.comboBox_2.setItemText(18, _translate("MainWindow", "S")) self.comboBox_2.setItemText(19, _translate("MainWindow", "T")) self.comboBox_2.setItemText(20, _translate("MainWindow", "U")) self.comboBox_2.setItemText(21, _translate("MainWindow", "V")) self.comboBox_2.setItemText(22, _translate("MainWindow", "W")) self.comboBox_2.setItemText(23, _translate("MainWindow", "X")) self.comboBox_2.setItemText(24, _translate("MainWindow", "Y")) self.comboBox_2.setItemText(25, _translate("MainWindow", "Z")) self.comboBox_2.setItemText(26, _translate("MainWindow", "1")) self.comboBox_2.setItemText(27, _translate("MainWindow", "2")) self.comboBox_2.setItemText(28, _translate("MainWindow", "3")) self.comboBox_2.setItemText(29, _translate("MainWindow", "4")) self.comboBox_2.setItemText(30, _translate("MainWindow", "5")) self.comboBox_2.setItemText(31, _translate("MainWindow", "6")) self.comboBox_2.setItemText(32, _translate("MainWindow", "7")) self.comboBox_2.setItemText(33, _translate("MainWindow", "8")) self.comboBox_2.setItemText(34, _translate("MainWindow", "9")) self.comboBox_2.setItemText(35, _translate("MainWindow", "0")) self.comboBox_3.setCurrentText(_translate("MainWindow", "A")) self.comboBox_3.setItemText(0, _translate("MainWindow", "A")) self.comboBox_3.setItemText(1, _translate("MainWindow", "B")) self.comboBox_3.setItemText(2, _translate("MainWindow", "C")) self.comboBox_3.setItemText(3, _translate("MainWindow", "D")) self.comboBox_3.setItemText(4, _translate("MainWindow", "E")) self.comboBox_3.setItemText(5, _translate("MainWindow", "F")) self.comboBox_3.setItemText(6, _translate("MainWindow", "G")) self.comboBox_3.setItemText(7, _translate("MainWindow", "H")) self.comboBox_3.setItemText(8, _translate("MainWindow", "I")) self.comboBox_3.setItemText(9, _translate("MainWindow", "J")) self.comboBox_3.setItemText(10, _translate("MainWindow", "K")) self.comboBox_3.setItemText(11, _translate("MainWindow", "L")) self.comboBox_3.setItemText(12, _translate("MainWindow", "M")) self.comboBox_3.setItemText(13, _translate("MainWindow", "N")) self.comboBox_3.setItemText(14, _translate("MainWindow", "O")) self.comboBox_3.setItemText(15, _translate("MainWindow", "P")) self.comboBox_3.setItemText(16, _translate("MainWindow", "Q")) self.comboBox_3.setItemText(17, _translate("MainWindow", "R")) self.comboBox_3.setItemText(18, _translate("MainWindow", "S")) self.comboBox_3.setItemText(19, _translate("MainWindow", "T")) self.comboBox_3.setItemText(20, _translate("MainWindow", "U")) self.comboBox_3.setItemText(21, _translate("MainWindow", "V")) self.comboBox_3.setItemText(22, _translate("MainWindow", "W")) self.comboBox_3.setItemText(23, _translate("MainWindow", "X")) self.comboBox_3.setItemText(24, _translate("MainWindow", "Y")) self.comboBox_3.setItemText(25, _translate("MainWindow", "Z")) self.comboBox_3.setItemText(26, _translate("MainWindow", "1")) self.comboBox_3.setItemText(27, _translate("MainWindow", "2")) self.comboBox_3.setItemText(28, _translate("MainWindow", "3")) self.comboBox_3.setItemText(29, _translate("MainWindow", "4")) self.comboBox_3.setItemText(30, _translate("MainWindow", "5")) self.comboBox_3.setItemText(31, _translate("MainWindow", "6")) self.comboBox_3.setItemText(32, _translate("MainWindow", "7")) self.comboBox_3.setItemText(33, _translate("MainWindow", "8")) self.comboBox_3.setItemText(34, _translate("MainWindow", "9")) self.comboBox_3.setItemText(35, _translate("MainWindow", "0")) self.comboBox_4.setCurrentText(_translate("MainWindow", "A")) self.comboBox_4.setItemText(0, _translate("MainWindow", "A")) self.comboBox_4.setItemText(1, _translate("MainWindow", "B")) self.comboBox_4.setItemText(2, _translate("MainWindow", "C")) self.comboBox_4.setItemText(3, _translate("MainWindow", "D")) self.comboBox_4.setItemText(4, _translate("MainWindow", "E")) self.comboBox_4.setItemText(5, _translate("MainWindow", "F")) self.comboBox_4.setItemText(6, _translate("MainWindow", "G")) self.comboBox_4.setItemText(7, _translate("MainWindow", "H")) self.comboBox_4.setItemText(8, _translate("MainWindow", "I")) self.comboBox_4.setItemText(9, _translate("MainWindow", "J")) self.comboBox_4.setItemText(10, _translate("MainWindow", "K")) self.comboBox_4.setItemText(11, _translate("MainWindow", "L")) self.comboBox_4.setItemText(12, _translate("MainWindow", "M")) self.comboBox_4.setItemText(13, _translate("MainWindow", "N")) self.comboBox_4.setItemText(14, _translate("MainWindow", "O")) self.comboBox_4.setItemText(15, _translate("MainWindow", "P")) self.comboBox_4.setItemText(16, _translate("MainWindow", "Q")) self.comboBox_4.setItemText(17, _translate("MainWindow", "R")) self.comboBox_4.setItemText(18, _translate("MainWindow", "S")) self.comboBox_4.setItemText(19, _translate("MainWindow", "T")) self.comboBox_4.setItemText(20, _translate("MainWindow", "U")) self.comboBox_4.setItemText(21, _translate("MainWindow", "V")) self.comboBox_4.setItemText(22, _translate("MainWindow", "W")) self.comboBox_4.setItemText(23, _translate("MainWindow", "X")) self.comboBox_4.setItemText(24, _translate("MainWindow", "Y")) self.comboBox_4.setItemText(25, _translate("MainWindow", "Z")) self.comboBox_4.setItemText(26, _translate("MainWindow", "1")) self.comboBox_4.setItemText(27, _translate("MainWindow", "2")) self.comboBox_4.setItemText(28, _translate("MainWindow", "3")) self.comboBox_4.setItemText(29, _translate("MainWindow", "4")) self.comboBox_4.setItemText(30, _translate("MainWindow", "5")) self.comboBox_4.setItemText(31, _translate("MainWindow", "6")) self.comboBox_4.setItemText(32, _translate("MainWindow", "7")) self.comboBox_4.setItemText(33, _translate("MainWindow", "8")) self.comboBox_4.setItemText(34, _translate("MainWindow", "9")) self.comboBox_4.setItemText(35, _translate("MainWindow", "0")) self.label_2.setText(_translate("MainWindow", "延迟")) self.lineEdit_a1.setText(_translate("MainWindow", "100")) self.lineEdit_a2.setText(_translate("MainWindow", "100")) self.lineEdit_a3.setText(_translate("MainWindow", "100")) self.lineEdit_a4.setText(_translate("MainWindow", "100")) self.label_4.setText(_translate("MainWindow", "毫秒")) self.label_3.setText(_translate("MainWindow", "毫秒")) self.label_6.setText(_translate("MainWindow", "毫秒")) self.label_5.setText(_translate("MainWindow", "毫秒")) self.label_8.setText(_translate("MainWindow", "热键")) self.lineEdit_b1.setText(_translate("MainWindow", "1")) self.lineEdit_b2.setText(_translate("MainWindow", "2")) self.lineEdit_b3.setText(_translate("MainWindow", "3")) self.lineEdit_b4.setText(_translate("MainWindow", "4")) self.pushButton.setText(_translate("MainWindow", "准备完毕")) self.pushButton_2.setText(_translate("MainWindow", "停止"))
py
b4148f2eb111bb5d3f21f49aab1249f983f5d675
import os import datetime time = str(datetime.datetime.now()) time = '_' + time[:10].replace('-','_') confirmation = 'Your AR regen file has been created under "C:\python27\"' ##Get brokerage file #'s with open('ARfile.txt') as arfile: with open('department.txt', 'w') as dep: bfiles = arfile.readlines() ##Get department #'s from brokerage file #'s for f in range(len(bfiles)): department = bfiles[f] department = department[:2] dep.write('0' + department + '\n') dep.close() ##Create file to generate output with open('ARfile.txt', 'r') as finfiles: with open('department.txt', 'r') as departmentfile: with open(str('AR_regen' + time + '.txt'), 'w') as script: ffiles = finfiles.readlines() dfiles = departmentfile.readlines() ##Optional variables for SQL script - AR Process functionstart = str("INSERT INTO [ITSMain2].[dbo].[ITSWEB_SCHEDULE_TASK] ([USER_NAME],[DB_NAME],[TASK_TYPE],[PROCESS_TYPE],[DEPTNO],[INVNO],[IN_PROCESS]) values( 'ITS-AlKoh', 'bdp', 'XML-OUTBOUND', 'AR_INV', '") functionend = str("' ,0 )") ##Contatenate all values together & write data to file for i in range(len(ffiles)): line = functionstart + dfiles[i].strip() + "', '" + ffiles[i].strip() + functionend + '\n' script.write(line) ##Clean up departmentfile.close() os.remove('department.txt') print(confirmation)
py
b4149108d89ab8682a77d7fb42adc5ba4953c4b1
import os import numpy as np import matplotlib.pyplot as plt import astropy.units as u from astropy.io import fits from astropy.wcs import WCS from astropy.coordinates import SkyCoord from astropy.table import Table import matplotlib.pyplot as plt from reproject import reproject_interp import matplotlib.gridspec as gs from matplotlib.colors import LogNorm from astropy.visualization import simple_norm COLOR = 'k' plt.rcParams['font.size'] = 16 plt.rcParams['text.color'] = COLOR plt.rcParams['axes.labelcolor'] = COLOR plt.rcParams['xtick.color'] = COLOR plt.rcParams['ytick.color'] = COLOR plt.rcParams['xtick.major.width'] = 3 plt.rcParams['ytick.major.width'] = 3 plt.rcParams['xtick.major.size'] = 8 plt.rcParams['ytick.major.size'] = 8 plt.rcParams['xtick.minor.width'] = 1 plt.rcParams['ytick.minor.width'] = 1 plt.rcParams['xtick.minor.size'] = 6 plt.rcParams['ytick.minor.size'] = 6 plt.rcParams['axes.linewidth'] = 3 lw = 5 plt.rcParams['text.color'] = COLOR plt.rcParams['xtick.color'] = COLOR plt.rcParams['ytick.color'] = COLOR plt.rcParams['axes.labelcolor'] = COLOR plt.rcParams['axes.labelcolor'] = COLOR plt.rcParams['axes.edgecolor'] = COLOR plt.rcParams['figure.facecolor'] = 'none' plt.rcParams['legend.facecolor'] = 'none' pixelsize = 11 # select a target and cutout size in pixels ra, dec = 61.331654, 20.157032 target_x = 217 target_y = 1940 target = '{0}, {1}'.format(ra, dec) x = y = pixelsize ## pixel size # set local file path to current working directory path = '../../data' ffi = 'hlsp_tica_tess_ffi_s0043-o1-00180084-cam3-ccd2_tess_v01_img.fits' sdss = 'frame-i-004334-6-0131.fits' # grab the first file in the list hdu = fits.open(os.path.join(path, ffi)) #tpf = k2flix.TargetPixelFile(filename) n_pix = hdu[0].data.shape[0] res = 21.0 * (u.arcsec/u.pixel) ## I think it's actually 20.98 or something. This is fine. area = res * (n_pix * u.pixel) d = area.to(u.degree) fov = d.value # compute the wcs of the image wcs = WCS(hdu[0].header)#tpf.hdulist['APERTURE'].header) ## Grab nearby star data # I'm worried this will cause issues if MAST is down, # so right now I'm importing a saved table #catalogData = Catalogs.query_region(target, radius=1, catalog="Tic") #catalogData[np.where(catalogData['Tmag']<14)] catalogData = Table.read(os.path.join(path, 'catalogData.tab'), format='ascii') # get RA and Dec coords of catalog data catalogData = catalogData[np.where(catalogData['Tmag']<14)] tic_ra = catalogData['ra'] tic_dec = catalogData['dec'] # get pixel coordinates of RA and Dec coords = wcs.all_world2pix(list(zip(tic_ra, tic_dec)),0) xc = np.array([c[0] for c in coords]) yc = np.array([c[1] for c in coords]) inds = np.where( (np.abs(np.diff(xc-target_x))<5) & (np.abs(np.diff(yc-target_y))<5) )[0] xc = xc[inds] yc = yc[inds] # compute the pixel area in arcmin data = hdu[0].data + 0.0 norm = simple_norm(data, 'log') arcmin = area.to(u.arcmin).value # retrieve the DSS file dss = fits.open(os.path.join(path, sdss)) dss_data = dss[0].data dss_wcs = WCS(dss[0].header) q = ((tic_ra<61.4) & (tic_ra>61.26) * (tic_dec>20.13) & (tic_dec<20.2)) reproj_tesscut, footprint = reproject_interp((data, wcs,), dss_wcs, shape_out=dss_data.shape, order='nearest-neighbor') plt.rcParams.update({'font.size': 15}) alpha = 0.6 fig = plt.figure(figsize=(10,10)) fig.set_facecolor('w') ax = plt.subplot(projection=dss_wcs) #ax.figure.set_size_inches((10,10)) # show image 1 ax.imshow(dss_data, origin='lower', cmap='Greys', vmax=2) # overlay image 2 ax.imshow(dss_data, origin='lower', cmap='Greys', vmin=0, vmax=2)#, alpha=0.5) img = ax.imshow(reproj_tesscut, origin='lower', alpha=0.5, norm=LogNorm(vmin=1e5, vmax=1e8)) plt.colorbar(img, ax=ax,label=r'Flux (e$^{-1}$ s$^{-1}$)') ax.scatter(tic_ra[q][1:],#-0.0007, tic_dec[q][1:],#+0.00015, marker='x', transform=ax.get_transform('world'), s=100, #facecolors='none', color='darkorange', linewidths=2) ax.scatter(tic_ra[q][0],#-0.0007, tic_dec[q][0],#+0.00015, marker='o', transform=ax.get_transform('world'), facecolors='none', edgecolors='w', s=300, linewidths=2) # add axis labels ax.set_xlabel('Right Ascension',fontsize=20) ax.set_ylabel('Declination',fontsize=20) xlim = ax.get_xlim() ylim = ax.get_ylim() xax = ax.coords[0] yax = ax.coords[1] xax.set_ticks(spacing=0.5*u.arcmin) yax.set_ticks(spacing=1. * u.arcmin) ax.set_xlim(0,750) ax.set_ylim(0,800) ax.set_rasterized(True) plt.savefig('TESSaperture.pdf',dpi=300, bbox_inches='tight', rasterize=True)
py
b41491c97775d3a9bd4d386278a318e42b5f8dd7
# Generated by Django 2.2 on 2020-10-15 16:11 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField(blank=True, null=True)), ('image', models.FileField(blank=True, null=True, upload_to='images/')), ], ), ]
py
b414928bcc92153be94f2534be539f9b60e4c178
import sys import socket import selectors import traceback import libclient import libserver sel = selectors.DefaultSelector() def create_request(action): return dict( type="binary/custom-client-binary-type", encoding="binary", content=bytes(action , encoding="utf-8") ) def start_connection(host, port, request): addr = (host, port) print("starting connection to", addr) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(False) sock.connect_ex(addr) events = selectors.EVENT_READ | selectors.EVENT_WRITE message = libclient.Message(sel, sock, addr, request) sel.register(sock, events, data=message) def listening(host , port): lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Avoid bind() exception: OSError: [Errno 48] Address already in use lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) lsock.bind((host, port)) lsock.listen() print("listening on", (host, port)) lsock.setblocking(False) sel.register(lsock, selectors.EVENT_READ, data=None) def accept_wrapper(sock): conn, addr = sock.accept() # Should be ready to read print("accepted connection from", addr) conn.setblocking(False) message = libserver.Message(sel, conn, addr) sel.register(conn, selectors.EVENT_READ, data=message) action = 'client2' request = create_request(action) start_connection('127.0.0.1', 1234, request) listening('127.0.0.1', 9002) try: while True: events = sel.select(timeout=1) for key, mask in events: message = key.data if message!=None: try: message.process_events(mask) except Exception: message.close() # Check for a socket being monitored to continue. else: print("received Message from gateway") accept_wrapper(key.fileobj) if not sel.get_map(): break except KeyboardInterrupt: print("caught keyboard interrupt, exiting") finally: sel.close()
py
b41492c365f3b0cc6af24f7c0e53c4b88c4ae079
""" Example (in ipython -pylab): Plot average whisker angle for whisker 2 from the first session. >>> run qc_session.py >>> a = annotate_trials(r"F:\CuratedSessions", filename=r"F:\CuratedSessions\all.trialtypes.pickle") >>> out = a.by_type_session() >>> f = lambda i: gen_trial_matrix(list(seq2measurements(out[str(i)].values()[0])),iwhisker=2,ifeature=5) >>> for i in range(1,6): plot(f(i).mean(0)) >>> legend(out.keys()) """ from __future__ import print_function from __future__ import absolute_import from __future__ import division from future import standard_library standard_library.install_aliases() from builtins import zip from builtins import next from builtins import map from builtins import str from builtins import range from past.utils import old_div from builtins import object from . import traj from numpy import * from pylab import * def trial_matrix(root,iwhisker=0,ifeature=3): """ image plot of a feature for a whisker where each row is a trial """ import fnmatch import os def gen_names(root): for r,dirnames,filenames in os.walk(root): for filename in fnmatch.filter(filenames,'*.measurements'): yield os.path.join(r,filename) return gen_trial_matrix(list(gen_names),iwhisker,ifeature) def seq2measurements(seq_generator): for n in seq_generator: yield os.path.splitext(n)[0]+'.measurements' def select_by_trialid(annotation,trialid): for k,v in annotation.data.items(): if v == trialid: yield os.path.splitext(k)[0]+'.measurements' def gen_trial_matrix(generator,iwhisker=0,ifeature=5): filecount = len(list(generator)) timecount = 0; for n in generator: table = traj.MeasurementsTable(n) data = table.asarray() timecount = data[:,1].max()+1 + 10 #adding a fudge factor break im = nan*zeros((filecount,timecount)); for i,n in enumerate(generator): try: print("[%5d of %5d] %s"%(i,filecount,n)) table = traj.MeasurementsTable(n) data = table.asarray() mask = data[:,0]==iwhisker; # select whisker t = data[mask,1].astype(uint); # get times v = data[mask,ifeature]; # get feature im[i,t] = v; # fill row except: pass return im def gen_trial_matrix2(generator,iwhisker=0,ifeatures=[5,6]): nfeat = len(ifeatures) filecount = len(list(generator)) timecount = 0; for n in generator: table = traj.MeasurementsTable(n) data = table.asarray() timecount = data[:,1].max()+1 + 10 #adding a fudge factor break im = nan*zeros((nfeat,filecount,timecount)); for i,n in enumerate(generator): try: print("[%5d of %5d] %s"%(i,filecount,n)) table = traj.MeasurementsTable(n) data = table.asarray() mask = data[:,0]==iwhisker; # select whisker t = data[mask,1].astype(uint); # get times for j,ifeat in enumerate(ifeatures): im[j,i,t] = data[mask,ifeat];# get feature except: pass return im def robustmean(im): out = im.mean(0) for icol in find(isnan(out)): mask = ~isnan(im[:,icol]) out[icol] = im[mask,icol].mean() return out import fnmatch import os from reader import Reader import pickle as pickle class annotate_trials(object): """ This class is a little utility that is launched when the class is instanced. It will look for data in the file specified by <filename> and match that against .seq files (recursively) found under <root>. For any unmatched files, a window will pop showing a frame. The user has to click on the image to give it keyboard focus and then press a key on the keyboard to annotate it. After each keypress, an image for the next unmatched file will be shown, and the process repeated until every .seq file under <root> is annotated. As annotation is happening, data is saved to <filename>. If the annotation process is interupted, it can be picked up later and completed by instancing the class with the same arguments. Example: >>> a = annotate_trials(r"F:\CuratedSessions",r"F:\all.trialypes.pickle") >>> for e in a.data.keys(): print e F:\CuratedSessions\LTPANM25668_111609\LTPANM25668_111609_C1_A_0078.seq F:\CuratedSessions\LTPANM41663_012310\LTPANM41663_012310_C3_B_0066.seq F:\CuratedSessions\LTPANM25668_111609\LTPANM25668_111609_C1_A_0084.seq ... F:\CuratedSessions\LTPANM41663_012310\LTPANM41663_012310_C2_D_0012.seq """ def __init__(self,root,iframe=1000,filename="default.trialtypes.pickle"): try: self.load(filename) except IOError: self.data = {} self.default_filename = filename def gen_names(root): for r,dirnames,filenames in os.walk(root): for filename in fnmatch.filter(filenames,'*.seq'): name = os.path.join(r,filename) if name not in self.data: yield name self.iframe = iframe self.g = gen_names(root) try: self.n = next(self.g) except StopIteration: return # start annotating if there is any work to do movie = Reader(self.n,adjuststipple=True) self.f = figure() def totitle(name): return os.path.split(name)[-1] def onkeypress(event): print(totitle(self.n), event.key) self.data[self.n] = event.key if(self.default_filename): self.save(self.default_filename) self.n = next(self.g) print("Loading: "+totitle(self.n)) movie = Reader(self.n,adjuststipple=True) print("Showing") clf() imshow(movie[self.iframe],interpolation='nearest') self.drawrefs() grid(True) axis('image') title(totitle(self.n)) draw() self.f.canvas.mpl_connect('key_press_event',onkeypress) imshow(movie[self.iframe],interpolation='nearest') self.drawrefs() grid(True) title(totitle(self.n)) axis('image') gray() show() def drawrefs(self): #bar positions - specific to Leo's curated data xs = [129,179,228,275,426] ys = [88,88,88,88,88] for i,(x,y) in enumerate(zip(xs,ys)): text(x,y,str(i+1),color='r') plot(xs,ys,'r.') def save(self,filename): pickle.dump(self.data,open(filename,"w")); self.default_filename = filename def load(self,filename): self.data = pickle.load(open(filename,"r")); self.default_filename = filename def by_type_session(self): out = {} for k,v in self.data.items(): t = out.get(v,{}) head = lambda x: os.path.split(x)[0] tail = lambda x: os.path.split(x)[-1] s = t.get( tail(head(k)), [] ) s.append(k) t[tail(head(k))] = s out[v] = t; return out def plot_angle_v_curvature(isession,saveto=''): a = annotate_trials(r"F:\CuratedSessions", filename=r"F:\CuratedSessions\all.trialtypes.pickle") out = a.by_type_session() #do 3 whiskers for i in range(3): f = lambda k: gen_trial_matrix2( list(seq2measurements(list(out[str(k)].values())[isession])),i,[5,6]) figure() # do first 3 trial types, last two are less interesting (more noisy) for j in range(1,4): im = f(j) plot(im[0].ravel(),im[1].ravel(),'.',alpha = 0.1) axis([-130,-10,-0.015,0.015]) legend(list(map(str,list(range(1,4))))) title('Whisker %d'%i) if saveto: session = list(out.values())[0].keys()[isession] savefig(os.path.join(saveto,session+'__angle_v_curvature__whisker_%d.png'%i)) def plot_specgrams(isession): a = annotate_trials(r"F:\CuratedSessions", filename=r"F:\CuratedSessions\all.trialtypes.pickle") out = a.by_type_session() #do 3 whiskers for i in range(3): f = lambda k: gen_trial_matrix( list(seq2measurements(list(out[str(k)].values())[isession])),i,5) figure() for j in range(1,6): subplot(5,1,j) trials = f(j) trials[isnan(trials)] = trials[~isnan(trials)].mean() acc = None for row in trials: pxx,freqs,bins,im=specgram(row,Fs=1000,interpolation='nearest',NFFT=128,noverlap= 64,scale_by_freq=1,hold=0) if acc is None: acc = log(pxx) else: acc += log(pxx) acc /= float(len(trials)) cla() imshow(acc,interpolation='nearest') axis("tight") def gen_trial_matrix_summary(trialtypes=None,sessions=None): a = annotate_trials(r"F:\CuratedSessions", filename=r"F:\CuratedSessions\all.trialtypes.pickle") out = a.by_type_session() if trialtypes is None: trialtypes = list(out.keys()) if sessions is None: sessions = list(out.values())[0].keys() def gen_trials(): for t in trialtypes: for s in sessions: for e in out.get(str(t),{}).get(s,[]): yield e def count_trials(): return len(list(gen_trials())) def get_n_timepoints(): table = traj.MeasurementsTable( list(seq2measurements([list(a.data.keys())[0]]))[0] ) # -_-; data = table.asarray() return data[:,1].max() + 10 # add a fudge factor ntrials = count_trials() ntime = get_n_timepoints() nfeat = 4 # average angle, angle spread, projected mean follicle position, # projected follicle position spread index = zeros((ntrials,2),dtype=uint8) im = nan*zeros((nfeat,ntrials,ntime)) session_index = {} for i,k in enumerate(list(out.values())[0].iterkeys()): session_index[k] = i def gen_trial_index(): for t in trialtypes: for s in sessions: for e in out.get(str(t),{}).get(s,[]): yield int(t),session_index[s] row = 0; for i,(filename,code) in enumerate(zip(seq2measurements(gen_trials()),gen_trial_index())): print("[%5d of %5d] %s"%(i,ntrials,filename)) try: data = traj.MeasurementsTable(filename).asarray(); index[row,:] = code nwhiskers = max(data[:,0].astype(int))+1 count = zeros((nwhiskers,ntime)) working = zeros((nwhiskers,ntime)) mask = data[:,0]>=0 count[data[mask,0].astype(int),data[mask,1].astype(int)] = 1 # [whisker,time] = 1 #angles for iwhisker in range(nwhiskers): mask = data[:,0]==iwhisker working[iwhisker,data[mask,1].astype(int)] = data[mask,5] #mean angle im[0,row,:] = old_div(working.sum(0),count.sum(0)) #angle spread im[1,row,:] = working.ptp(0) #follicle position #For leo's data, just use the xposition (column 7) for iwhisker in range(nwhiskers): mask = data[:,0]==iwhisker working[iwhisker,data[mask,1].astype(int)] = data[mask,7] #mean follicle im[2,row,:] = old_div(working.sum(0),count.sum(0)) #angle follcile im[3,row,:] = working.ptp(0) row += 1 except IOError: pass return index,im def render_angle_spread_over_time(outdir,index,im,isession=0,itrials=[1,2,3,4,5],dt=50,every=10): """index,im should be returned from gen_trial_matrix_summary""" figure() colors = ['r','c','g','b','k'] for i,itime in enumerate(range(0,im.shape[2]-dt,every)): clf() for t in reversed(itrials): mask = (index[:,0]==t)*(index[:,1]==isession) plot(im[0,mask,itime:(itime+dt)].ravel(), im[1,mask,itime:(itime+dt)].ravel(), '.',markersize=2,color=colors[t-1],alpha=0.5) title("%5d"%itime) axis([-150,-20,0,90]) grid("on") show() savefig("%s/%04d.png"%(outdir,i),dpi=96,facecolor=(1,1,1)) def gen_trial_matrix_all_whiskers(n_whiskers=3,ifeatures=[5,6]): a = annotate_trials(r"F:\CuratedSessions", filename=r"F:\CuratedSessions\all.trialtypes.pickle") out = a.by_type_session() n_trials = len(a.data) n_trial_type = len(list(out.keys())) n_sessions = len(list(out.values())[0].keys()) def get_n_timepoints(): table = traj.MeasurementsTable( list(seq2measurements([list(a.data.keys())[0]]))[0] ) # -_-; data = table.asarray() return data[:,1].max() + 10 # add a fudge factor # each row: trial type id, session id, whisker id index = zeros((n_trials*n_whiskers,3),dtype=uint8) im = nan*zeros((len(ifeatures),n_trials*n_whiskers,get_n_timepoints())) session_index = {} for i,k in enumerate(list(out.values())[0].iterkeys()): session_index[k] = i row = 0 for kTrialType,vTrialType in out.items(): for kSession,vSession in vTrialType.items(): for filename in seq2measurements(vSession): print("[%5d of %5d] %s"%(row,im.shape[1],filename)) try: data = traj.MeasurementsTable(filename).asarray(); for iWhisker in range(n_whiskers): index[row,:] = [int(kTrialType), session_index[kSession], iWhisker] mask = data[:,0] == iWhisker; for i,ifeat in enumerate(ifeatures): im[i,row,data[mask,1].astype(uint32)] = data[mask,ifeat] row += 1 except IOError: pass return index,im
py
b41493982f0701b8d595a8112ad414b8aff22944
# -*- coding: utf-8 -*- from flask.ext.assets import Bundle, Environment less = Bundle( "less/main.less", filters="less", output="public/css/common.css", depends=('less/*.less', 'less/**/*.less') ) js = Bundle( "libs/jQuery/dist/jquery.js", "libs/bootstrap/dist/js/bootstrap.min.js", "js/plugins.js", "js/script.js", output="public/js/common.js" ) assets = Environment() assets.register("js_all", js) assets.register("css_all", less)
py
b4149403ba6db52f253e3f84c43a686baf5c4051
#!/usr/bin/python3 """Square module. This module contains a class that defines a square and its size and checking if the given values are right, and a setter and getter methods to set or get it. There's also an area method that returns the area of the square, another one that handles the print of the square. """ class Square(): """Defines a square.""" def __init__(self, size=0): """Sets the necessary attributes for the Square object. Args: size (int): the size of one edge of the square. """ self.size = size @property def size(self): """Get or set the size of the square.""" return self.__size @size.setter def size(self, value): if type(value) is int: if value >= 0: self.__size = value else: raise ValueError("size must be >= 0") else: raise TypeError("size must be an integer") def area(self): """Returns the current square area.""" return self.__size ** 2 def my_print(self): """Prints the square with the # character on stdout.""" if self.__size > 0: for x in range(self.__size): print('#' * self.__size) else: print()
py
b41495082ba1dd339ad1a3f9cf9700c363a4723c
# region IMPORTS from pathlib import Path import signal import psutil from whaaaaat import style_from_dict, Token # endregion # region Whatsapp WEBSITES websites = {'whatsapp': 'https://web.whatsapp.com/', 'wpp_unknown': 'https://web.whatsapp.com/send?phone='} # endregion # region SELECTORS whatsapp_selectors_dict = { 'login_area': '#app > div > div > div.landing-header', 'new_chat_button': '#side > header div[role="button"] span[data-icon="chat"]', 'search_contact_input_new_chat': '#app > div > div > div > div > span > div > span > div > div > div > label > div > div', 'contact_list_elements_filtered_new_chat': '#app > div > div > div > div > span > div > span > div > div > div > div > div > div > div > div > div > div > div > span > span[title][dir]', 'group_list_elements_filtered_new_chat': '#app > div > div > div > div > span > div > span > div > div > div > div > div > div > div > div > div > div > div > div > span[title][dir]', 'search_contact_input': '#side > div > div > label > div > div', 'chat_list_elements_filtered': '#pane-side > div > div > div > div > div > div > div > div > div > span > span[title][dir]', 'target_focused_title': '#main > header div > div > span[title]', 'message_area': '#main > footer div.selectable-text[contenteditable]', 'last_seen': '#main > header > div > div > span[title]', 'target_chat_header': '#main > header', 'contact_info_page_elements': '#app > div > div > div:nth-child(2) > div:last-of-type > span > div > span > div > div > div:first-child', 'contact_info_page_group_element_heading': '#app > div > div > div:nth-child(2) > div:last-of-type > ' 'span > div > span > div > div:nth-child(5)>div>div>div>div:first-child>span', 'contact_info_page_group_elements': '#app > div > div > div:nth-child(2) > div:last-of-type > ' 'span > div > span > div > div:nth-child(5)>div:nth-child(2)>div>div', 'contact_info_page_close_button': '#app > div > div > div > div > span > div > span > div > header > div > div > button', 'chat_or_message_search': '#side > div:nth-child(3) > div > label > div > div:last-child', 'chats_groups_messages_elements': '#side > div:last-child > div > div > div > div', 'contact_element': 'span > span > span[class^="matched-text"]', 'group_element': 'div:last-child > div:first-child > div:first-child > div > span > span[class^="matched-text"]', 'attach_file': '#main > header > div > div > div:nth-child(2) > div', 'choose_file': '#main > header > div > div > div > span > div > div > ul > li:nth-child(3) > button', 'send_file': '#app > div > div > div > div > span > div > span > div > div > div > span > div > div > span', 'profile_photo_element': '#side > header > div > div > img', 'about_edit_button_element': '#app > div > div > div > div > span > div > div > div > div:nth-child(4) > div > div > span > div > span', 'about_text_area': '#app > div > div > div > div > span > div > div > div > div:nth-child(4) > div > div > div > div', 'contact_info_page_target_group_name_element': 'div:nth-child(2)>div>div> div:last-of-type', 'contact_info_page_target_group_creation_info_element': ':scope > div:last-child > span', 'contact_info_page_target_group_description_element': ':scope > div:last-child span:first-of-type', 'contact_info_page_target_group_member_elements': ':scope > div:nth-child(4) > div > div', 'invalid_number_ok_button': '#app > div > span> div > span > div > div > div > div > div > div > div', 'target_name_selector': "#main > header > div > div > div > span", 'media_text': "#app > div > div > div > div > span > div > span > div > div > div > div > div > div > div > div > span", 'media_images': "#app > div > div > div > div > span > div > span > div > div > span > div > div > div > div > div > div", 'left_arrow_button': "#app > div > span > div > div > div > div > div > span", 'media_url_img': "#app > div > span:nth-child(3) > div > div > div > div > div > div > div > div > img", 'media_url_vid': "#app > div > span:nth-child(3) > div > div > div > div > div > div > div > div > video", } # endregion # region PATHS data_folder_path = Path.home() / 'wplay' logs_path = data_folder_path / 'logs' log_file_path = logs_path / 'wplay.log' test_log_file_path = logs_path / 'testwplay.log' user_data_folder_path = data_folder_path / '.userData' profile_photos_path = data_folder_path / 'media' / 'profilePhotos' tracking_folder_path = data_folder_path / 'trackingData' messages_json_folder_path = data_folder_path / 'messagesJSON' / 'system' messages_json_path = data_folder_path / 'messagesJSON' / 'messages.json' open_messages_json_path = data_folder_path / 'messagesJSON' / 'system' / 'openMessages.json' media_path = data_folder_path / 'media' / 'media' save_chat_folder_path = data_folder_path / 'savedChats' audio_file_folder_path = data_folder_path / 'audioFiles' chatbot_image_folder_path = data_folder_path / 'ChatbotImage' # endregion # region MENU STYLES menu_style = style_from_dict({ Token.Separator: '#6C6C6C', Token.QuestionMark: '#FF9D00 bold', Token.Selected: '#5F819D', Token.Pointer: '#FF9D00 bold', Token.Instruction: '', # default Token.Answer: '#5F819D bold', Token.Question: '', }) # endregion # region FUNCTIONS def create_dirs(): logs_path.mkdir(parents=True, exist_ok=True) user_data_folder_path.mkdir(parents=True, exist_ok=True) profile_photos_path.mkdir(parents=True, exist_ok=True) tracking_folder_path.mkdir(parents=True, exist_ok=True) messages_json_folder_path.mkdir(parents=True, exist_ok=True) media_path.mkdir(parents=True, exist_ok=True) save_chat_folder_path.mkdir(parents = True, exist_ok = True) audio_file_folder_path.mkdir(parents = True, exist_ok = True) tracking_folder_path.mkdir(parents = True, exist_ok = True) messages_json_folder_path.mkdir(parents = True, exist_ok = True) chatbot_image_folder_path.mkdir(parents= True, exist_ok=True) def kill_child_processes(parent_pid, sig=signal.SIGTERM): try: parent = psutil.Process(parent_pid) except psutil.NoSuchProcess: return children = parent.children(recursive=True) print('Process Killed!') for process in children: process.send_signal(sig) # endregion
py
b414951031eee5717d27d70ba20149015b3a8ee2
from gevent.local import local from werkzeug.local import LocalProxy from werkzeug.wrappers import Request from contextlib import contextmanager from gevent.wsgi import WSGIServer _requests = local() request = LocalProxy(lambda: _requests.request) @contextmanager def sessionmanager(environ): _requests.request = Request(environ) yield _requests.request = None def logic(): return "Hello " + request.remote_addr def application(environ, start_response): status = '200 OK' with sessionmanager(environ): body = logic() headers = [ ('Content-Type', 'text/html') ] start_response(status, headers) return [body] if __name__ == '__main__': print 'Serving on port 8000' WSGIServer(('', 8000), application).serve_forever()
py
b41495a89586fc02dfe32e8759fd7d4f36d87383
# coding: utf-8 """ App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: [email protected] Project Repository: https://github.com/b3nab/appcenter-sdks """ import pprint import re # noqa: F401 import six class XcodeSchemeContainer(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'path': 'string', 'shared_schemes': 'array', 'podfile_path': 'string', 'cartfile_path': 'string', 'xcode_project_sha': 'string', 'workspace_project_paths': 'string', 'app_extension_targets': 'array' } attribute_map = { 'path': 'path', 'shared_schemes': 'shared_schemes', 'podfile_path': 'podfile_path', 'cartfile_path': 'cartfile_path', 'xcode_project_sha': 'xcode_project_sha', 'workspace_project_paths': 'workspace_project_paths', 'app_extension_targets': 'app_extension_targets' } def __init__(self, path=None, shared_schemes=None, podfile_path=None, cartfile_path=None, xcode_project_sha=None, workspace_project_paths=None, app_extension_targets=None): # noqa: E501 """XcodeSchemeContainer - a model defined in Swagger""" # noqa: E501 self._path = None self._shared_schemes = None self._podfile_path = None self._cartfile_path = None self._xcode_project_sha = None self._workspace_project_paths = None self._app_extension_targets = None self.discriminator = None self.path = path if shared_schemes is not None: self.shared_schemes = shared_schemes if podfile_path is not None: self.podfile_path = podfile_path if cartfile_path is not None: self.cartfile_path = cartfile_path if xcode_project_sha is not None: self.xcode_project_sha = xcode_project_sha if workspace_project_paths is not None: self.workspace_project_paths = workspace_project_paths if app_extension_targets is not None: self.app_extension_targets = app_extension_targets @property def path(self): """Gets the path of this XcodeSchemeContainer. # noqa: E501 Path to project # noqa: E501 :return: The path of this XcodeSchemeContainer. # noqa: E501 :rtype: string """ return self._path @path.setter def path(self, path): """Sets the path of this XcodeSchemeContainer. Path to project # noqa: E501 :param path: The path of this XcodeSchemeContainer. # noqa: E501 :type: string """ if path is None: raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501 self._path = path @property def shared_schemes(self): """Gets the shared_schemes of this XcodeSchemeContainer. # noqa: E501 Project schemes # noqa: E501 :return: The shared_schemes of this XcodeSchemeContainer. # noqa: E501 :rtype: array """ return self._shared_schemes @shared_schemes.setter def shared_schemes(self, shared_schemes): """Sets the shared_schemes of this XcodeSchemeContainer. Project schemes # noqa: E501 :param shared_schemes: The shared_schemes of this XcodeSchemeContainer. # noqa: E501 :type: array """ self._shared_schemes = shared_schemes @property def podfile_path(self): """Gets the podfile_path of this XcodeSchemeContainer. # noqa: E501 Path to CocoaPods file, if present # noqa: E501 :return: The podfile_path of this XcodeSchemeContainer. # noqa: E501 :rtype: string """ return self._podfile_path @podfile_path.setter def podfile_path(self, podfile_path): """Sets the podfile_path of this XcodeSchemeContainer. Path to CocoaPods file, if present # noqa: E501 :param podfile_path: The podfile_path of this XcodeSchemeContainer. # noqa: E501 :type: string """ self._podfile_path = podfile_path @property def cartfile_path(self): """Gets the cartfile_path of this XcodeSchemeContainer. # noqa: E501 Path to Carthage file, if present # noqa: E501 :return: The cartfile_path of this XcodeSchemeContainer. # noqa: E501 :rtype: string """ return self._cartfile_path @cartfile_path.setter def cartfile_path(self, cartfile_path): """Sets the cartfile_path of this XcodeSchemeContainer. Path to Carthage file, if present # noqa: E501 :param cartfile_path: The cartfile_path of this XcodeSchemeContainer. # noqa: E501 :type: string """ self._cartfile_path = cartfile_path @property def xcode_project_sha(self): """Gets the xcode_project_sha of this XcodeSchemeContainer. # noqa: E501 repo object Id of the pbxproject # noqa: E501 :return: The xcode_project_sha of this XcodeSchemeContainer. # noqa: E501 :rtype: string """ return self._xcode_project_sha @xcode_project_sha.setter def xcode_project_sha(self, xcode_project_sha): """Sets the xcode_project_sha of this XcodeSchemeContainer. repo object Id of the pbxproject # noqa: E501 :param xcode_project_sha: The xcode_project_sha of this XcodeSchemeContainer. # noqa: E501 :type: string """ self._xcode_project_sha = xcode_project_sha @property def workspace_project_paths(self): """Gets the workspace_project_paths of this XcodeSchemeContainer. # noqa: E501 Related projects paths for xcworkspace # noqa: E501 :return: The workspace_project_paths of this XcodeSchemeContainer. # noqa: E501 :rtype: string """ return self._workspace_project_paths @workspace_project_paths.setter def workspace_project_paths(self, workspace_project_paths): """Sets the workspace_project_paths of this XcodeSchemeContainer. Related projects paths for xcworkspace # noqa: E501 :param workspace_project_paths: The workspace_project_paths of this XcodeSchemeContainer. # noqa: E501 :type: string """ self._workspace_project_paths = workspace_project_paths @property def app_extension_targets(self): """Gets the app_extension_targets of this XcodeSchemeContainer. # noqa: E501 Information regarding project app extensions, if present # noqa: E501 :return: The app_extension_targets of this XcodeSchemeContainer. # noqa: E501 :rtype: array """ return self._app_extension_targets @app_extension_targets.setter def app_extension_targets(self, app_extension_targets): """Sets the app_extension_targets of this XcodeSchemeContainer. Information regarding project app extensions, if present # noqa: E501 :param app_extension_targets: The app_extension_targets of this XcodeSchemeContainer. # noqa: E501 :type: array """ self._app_extension_targets = app_extension_targets def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, XcodeSchemeContainer): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
b41495d26c0a1036925fd3ac2f8e4f387e73d675
# -*- coding: utf-8 -*- """ Created on Sun May 23 22:37:00 2021 @author: laura """ import pandas as pd import numpy as np import seaborn as sns import plotly.express as px import chart_studio.plotly as py import cufflinks as cf import os import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.graph_objects as go from plotly.offline import plot #%% array de ejemplo arr1 = np.random.randn(50,4) df1 = pd.DataFrame(arr1, columns=['A', 'B', 'C', 'D']) import plotly.graph_objects as go df_stocks = px.data.stocks() figura = px.line(df_stocks, x='date', y = 'GOOG', labels = {'x':'Date', 'y': 'stock of google'}) # para poder ver esa gráfica me toca usar algun tipo de html output from plotly.offline import plot plot(figura) figura2 = px.line(df_stocks, x= 'date', y=['GOOG', 'AAPL'], labels = {'x': 'Fecha', 'y': 'Stocks value'} ) # figura2.write_image('ejemplo.png', engine= 'kaleido') plot(figura2) #%% LINEGRAPHS df3 = sums_year[0] df3.index years = ['2019','2020','2021'] print(top3_name) for i in range(3): df3 = sums_year[i] fig = px.line(df3, x= df3.index, y= principales_name, title=years[i]) fig.write_html(years[i],'.html') plot(fig) #%% PIECHARTS dfk = anuales[0] dfk = dfk[principales_name] dfk = dfk.transpose() dfk.columns = ['frec'] for i in range(len(years)): dfk = anuales[i] dfk = dfk[principales_name] dfk = dfk.transpose() dfk.columns = ['frec'] fig = px.pie(dfk,values='frec', names=dfk.index, title=years[i]) fig.write_html('piecahrt'+ years[i]+'.html') plot(fig) #%% BARCHART DE SEGUNDA INSTANCIA # uso mi tablita de gb5 que tiene index en los derechos principales y porcentajes de true y false df5.columns = ['False', "True"] fig = px.bar(df5, x=df5.index, y=["True", "False"], title="Wide-Form Input") plot(fig)
py
b41495eb08469016578d258d45f4daff7c5a3efe
from .command_registry_interface import CommandRegistryInterface from .models import CommandConfiguration, GroupConfiguration, SubgroupConfiguration from holobot.sdk.configs import ConfiguratorInterface from holobot.sdk.ioc.decorators import injectable from holobot.sdk.logging import LogInterface from holobot.sdk.utils import assert_not_none from typing import Dict, Optional, Tuple DEFAULT_GROUP_NAME = "" @injectable(CommandRegistryInterface) class CommandRegistry(CommandRegistryInterface): def __init__(self, configurator: ConfiguratorInterface, log: LogInterface) -> None: super().__init__() self.__log: LogInterface = log.with_name("Admin", "CommandRegistry") self.__registry: Dict[str, GroupConfiguration] = self.__parse_command_configs(configurator) def command_exists(self, command_name: str, group_name: Optional[str] = None, subgroup_name: Optional[str] = None) -> bool: assert_not_none(command_name, "command_name") group_name = group_name or DEFAULT_GROUP_NAME if (group := self.__registry.get(group_name, None)) is None: return False if not subgroup_name: return command_name in group.commands.keys() if (subgroup := group.subgroups.get(subgroup_name, None)) is None: return False return command_name in subgroup.commands.keys() def group_exists(self, group_name: str) -> bool: assert_not_none(group_name, "group_name") return group_name in self.__registry.keys() def get_commands(self) -> Dict[str, Dict[str, Tuple[str, ...]]]: result: Dict[str, Dict[str, Tuple[str, ...]]] = {} for group_name, group_config in self.__registry.items(): result[group_name] = group = {} group[DEFAULT_GROUP_NAME] = tuple([command_name for command_name in group_config.commands.keys()]) for subgroup_name, subgroup_config in group_config.subgroups.items(): group[subgroup_name] = tuple([command_name for command_name in subgroup_config.commands.keys()]) return result def get_group(self, group_name: str) -> Optional[GroupConfiguration]: return self.__registry.get(group_name, None) def get_subgroup(self, group_name: str, subgroup_name: str) -> Optional[SubgroupConfiguration]: if not (group := self.__registry.get(group_name, None)): return None return group.subgroups.get(subgroup_name, None) def get_command(self, command_name: str, group_name: Optional[str] = None, subgroup_name: Optional[str] = None) -> Optional[CommandConfiguration]: group_name = group_name or DEFAULT_GROUP_NAME if not (group := self.__registry.get(group_name, None)): return None if not subgroup_name: return group.commands.get(command_name, None) if not (subgroup := group.subgroups.get(subgroup_name, None)): return None return subgroup.commands.get(command_name, None) def __parse_command_configs(self, configurator: ConfiguratorInterface) -> Dict[str, GroupConfiguration]: configs: Dict[str, GroupConfiguration] = {} self.__log.debug("Parsing command group configurations...") for name, group_json in configurator.get("Admin", "CommandGroups", {}).items(): group = GroupConfiguration.from_json(name, group_json) configs[group.name] = group self.__log.debug(f"Registered command group configuration. {{ Group = {group.name} }}") self.__log.debug("Command group parsed.") return configs
py
b414977937209a0c798e5bee02aed14cfd115cdd
def busca_binaria(seq,procurado ): """ O(log n) em tempo de execução já que o while vai percorrer a sequencia O(1) em memória pois possuirá apenas a sequencia e variaveis auxiliares """ if len(seq)==0: return 0 else: i=0 f=len(seq)-1 while(i<=f): m=(i+f)//2 if procurado>seq[m]: i=m+1 else: f=m-1 if procurado>seq[m]: return m+1 else: return m import unittest class BuscaBinariaTestes(unittest.TestCase): def teste_lista_vazia(self): self.assertEqual(0, busca_binaria([], 1)) self.assertEqual(0, busca_binaria([], 2)) self.assertEqual(0, busca_binaria([], 3)) def teste_lista_unitaria(self): self.assertEqual(0, busca_binaria([1], 0)) self.assertEqual(0, busca_binaria([1], 1)) self.assertEqual(1, busca_binaria([1], 2)) self.assertEqual(1, busca_binaria([1], 3)) self.assertEqual(1, busca_binaria([1], 4)) def teste_lista_nao_unitaria(self): lista = list(range(10)) self.assertEqual(0, busca_binaria(lista, -2)) self.assertEqual(0, busca_binaria(lista, -1)) for i in lista: self.assertEqual(i, busca_binaria(lista, i)) self.assertEqual(10, busca_binaria(lista, 10)) self.assertEqual(10, busca_binaria(lista, 11)) self.assertEqual(10, busca_binaria(lista, 12)) def teste_lista_elementos_repetidos(self): lista = [1, 1, 1, 2, 2, 2] self.assertEqual(0, busca_binaria(lista, 1)) self.assertEqual(3, busca_binaria(lista, 2)) if __name__ == '__main__': unittest.main()
py
b41497cd5fcd5461edb4ecc11f272308a9837862
# coding: utf-8 import pprint import re import six from huaweicloudsdkcore.sdk_response import SdkResponse class ListIndirectPartnersResponse(SdkResponse): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'count': 'int', 'indirect_partners': 'list[IndirectPartnerInfo]' } attribute_map = { 'count': 'count', 'indirect_partners': 'indirect_partners' } def __init__(self, count=None, indirect_partners=None): """ListIndirectPartnersResponse - a model defined in huaweicloud sdk""" super().__init__() self._count = None self._indirect_partners = None self.discriminator = None if count is not None: self.count = count if indirect_partners is not None: self.indirect_partners = indirect_partners @property def count(self): """Gets the count of this ListIndirectPartnersResponse. 符合条件的记录个数,只有成功的时候出现。 :return: The count of this ListIndirectPartnersResponse. :rtype: int """ return self._count @count.setter def count(self, count): """Sets the count of this ListIndirectPartnersResponse. 符合条件的记录个数,只有成功的时候出现。 :param count: The count of this ListIndirectPartnersResponse. :type: int """ self._count = count @property def indirect_partners(self): """Gets the indirect_partners of this ListIndirectPartnersResponse. 精英服务商列表,具体参见表1。 :return: The indirect_partners of this ListIndirectPartnersResponse. :rtype: list[IndirectPartnerInfo] """ return self._indirect_partners @indirect_partners.setter def indirect_partners(self, indirect_partners): """Sets the indirect_partners of this ListIndirectPartnersResponse. 精英服务商列表,具体参见表1。 :param indirect_partners: The indirect_partners of this ListIndirectPartnersResponse. :type: list[IndirectPartnerInfo] """ self._indirect_partners = indirect_partners def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ListIndirectPartnersResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
b41497dd652a24b36e53e38cc67f4572479d965c
""" Entrypoint for the application """ import argparse from hpilo_exporter.exporter import ILOExporterServer def main(): parser = argparse.ArgumentParser(description='Exports ilo heath_at_a_glance state to Prometheus') parser.add_argument('--address', type=str, dest='address', default='0.0.0.0', help='address to serve on') parser.add_argument('--port', type=int, dest='port', default='9416', help='port to bind') parser.add_argument('--endpoint', type=str, dest='endpoint', default='/metrics', help='endpoint where metrics will be published') args = parser.parse_args() exporter = ILOExporterServer(**vars(args)) exporter.run() if __name__ == '__main__': main()
py
b4149877d634ac67c370c02874a11adba306ca20
# Generated by Django 3.0.3 on 2020-03-13 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attendance_management', '0002_auto_20200313_1411'), ] operations = [ migrations.AddField( model_name='dailyattendance', name='status', field=models.CharField(default=None, max_length=4, null=True), ), ]
py
b41498ce0af663f056e4f3da246741b4aef516d7
paired = False procs = 1 alt_dir = False initial_qc = True all_qc = False walkaway = True front_trim = 6 mismatch = 1 R1_bases_ls = ['TCC', 'TCT'] R2_bases_ls = ['TCC', 'TCT'] non_genomic = 1 end_score = 30 window = 10 min_len = 100 adapter_match = 12 q_min = 30 q_percent = 95 rm_transit = True
py
b4149a5f220f1fac45a46b67188bdb418d4cc99c
# (C) Copyright IBM Corporation 2005 # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # on the rights to use, copy, modify, merge, publish, distribute, sub # license, and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Ian Romanick <[email protected]> from __future__ import print_function import argparse import copy import license import gl_XML, glX_XML def should_use_push(registers): for [reg, offset] in registers: if reg[1:4] == "xmm": return 0 N = len(registers) return (N & 1) != 0 def local_size(registers): # The x86-64 ABI says "the value (%rsp - 8) is always a multiple of # 16 when control is transfered to the function entry point." This # means that the local stack usage must be (16*N)+8 for some value # of N. (16*N)+8 = (8*(2N))+8 = 8*(2N+1). As long as N is odd, we # meet this requirement. N = (len(registers) | 1) return 8*N def save_all_regs(registers): adjust_stack = 0 if not should_use_push(registers): adjust_stack = local_size(registers) print('\tsubq\t$%u, %%rsp' % (adjust_stack)) for [reg, stack_offset] in registers: save_reg( reg, stack_offset, adjust_stack ) return def restore_all_regs(registers): adjust_stack = 0 if not should_use_push(registers): adjust_stack = local_size(registers) temp = copy.deepcopy(registers) while len(temp): [reg, stack_offset] = temp.pop() restore_reg(reg, stack_offset, adjust_stack) if adjust_stack: print('\taddq\t$%u, %%rsp' % (adjust_stack)) return def save_reg(reg, offset, use_move): if use_move: if offset == 0: print('\tmovq\t%s, (%%rsp)' % (reg)) else: print('\tmovq\t%s, %u(%%rsp)' % (reg, offset)) else: print('\tpushq\t%s' % (reg)) return def restore_reg(reg, offset, use_move): if use_move: if offset == 0: print('\tmovq\t(%%rsp), %s' % (reg)) else: print('\tmovq\t%u(%%rsp), %s' % (offset, reg)) else: print('\tpopq\t%s' % (reg)) return class PrintGenericStubs(gl_XML.gl_print_base): def __init__(self): gl_XML.gl_print_base.__init__(self) self.name = "gl_x86-64_asm.py (from Mesa)" self.license = license.bsd_license_template % ("(C) Copyright IBM Corporation 2005", "IBM") return def get_stack_size(self, f): size = 0 for p in f.parameterIterator(): size += p.get_stack_size() return size def printRealHeader(self): print("/* If we build with gcc's -fvisibility=hidden flag, we'll need to change") print(" * the symbol visibility mode to 'default'.") print(' */') print('') print('#include "x86/assyntax.h"') print('') print('#ifdef __GNUC__') print('# pragma GCC visibility push(default)') print('# define HIDDEN(x) .hidden x') print('#else') print('# define HIDDEN(x)') print('#endif') print('') print('# define GL_PREFIX(n) GLNAME(CONCAT(gl,n))') print('') print('\t.text') print('') print('#ifdef USE_ELF_TLS') print('') print('_x86_64_get_dispatch:') print('\tmovq\t_glapi_tls_Dispatch@GOTTPOFF(%rip), %rax') print('\tmovq\t%fs:(%rax), %rax') print('\tret') print('\t.size\t_x86_64_get_dispatch, .-_x86_64_get_dispatch') print('') print('#elif defined(HAVE_PTHREAD)') print('') print('\t.extern\t_glapi_Dispatch') print('\t.extern\t_gl_DispatchTSD') print('\t.extern\tpthread_getspecific') print('') print('\t.p2align\t4,,15') print('_x86_64_get_dispatch:') print('\tmovq\t_gl_DispatchTSD@GOTPCREL(%rip), %rax') print('\tmovl\t(%rax), %edi') print('\tjmp\tpthread_getspecific@PLT') print('') print('#else') print('') print('\t.extern\t_glapi_get_dispatch') print('') print('#endif') print('') return def printRealFooter(self): print('') print('#if defined (__ELF__) && defined (__linux__)') print(' .section .note.GNU-stack,"",%progbits') print('#endif') return def printFunction(self, f): # The x86-64 ABI divides function parameters into a couple # classes. For the OpenGL interface, the only ones that are # relevant are INTEGER and SSE. Basically, the first 8 # GLfloat or GLdouble parameters are placed in %xmm0 - %xmm7, # the first 6 non-GLfloat / non-GLdouble parameters are placed # in registers listed in int_parameters. # # If more parameters than that are required, they are passed # on the stack. Therefore, we just have to make sure that # %esp hasn't changed when we jump to the actual function. # Since we're jumping to the function (and not calling it), we # have to make sure of that anyway! int_parameters = ["%rdi", "%rsi", "%rdx", "%rcx", "%r8", "%r9"] int_class = 0 sse_class = 0 stack_offset = 0 registers = [] for p in f.parameterIterator(): type_name = p.get_base_type_string() if p.is_pointer() or (type_name != "GLfloat" and type_name != "GLdouble"): if int_class < 6: registers.append( [int_parameters[int_class], stack_offset] ) int_class += 1 stack_offset += 8 else: if sse_class < 8: registers.append( ["%%xmm%u" % (sse_class), stack_offset] ) sse_class += 1 stack_offset += 8 if ((int_class & 1) == 0) and (sse_class == 0): registers.append( ["%rbp", 0] ) name = f.dispatch_name() print('\t.p2align\t4,,15') print('\t.globl\tGL_PREFIX(%s)' % (name)) print('\t.type\tGL_PREFIX(%s), @function' % (name)) if not f.is_static_entry_point(f.name): print('\tHIDDEN(GL_PREFIX(%s))' % (name)) print('GL_PREFIX(%s):' % (name)) print('#if defined(USE_ELF_TLS)') print('\tcall\t_x86_64_get_dispatch@PLT') print('\tmovq\t%u(%%rax), %%r11' % (f.offset * 8)) print('\tjmp\t*%r11') print('#elif defined(HAVE_PTHREAD)') save_all_regs(registers) print('\tcall\t_x86_64_get_dispatch@PLT') restore_all_regs(registers) if f.offset == 0: print('\tmovq\t(%rax), %r11') else: print('\tmovq\t%u(%%rax), %%r11' % (f.offset * 8)) print('\tjmp\t*%r11') print('#else') print('\tmovq\t_glapi_Dispatch(%rip), %rax') print('\ttestq\t%rax, %rax') print('\tje\t1f') print('\tmovq\t%u(%%rax), %%r11' % (f.offset * 8)) print('\tjmp\t*%r11') print('1:') save_all_regs(registers) print('\tcall\t_glapi_get_dispatch') restore_all_regs(registers) print('\tmovq\t%u(%%rax), %%r11' % (f.offset * 8)) print('\tjmp\t*%r11') print('#endif /* defined(USE_ELF_TLS) */') print('\t.size\tGL_PREFIX(%s), .-GL_PREFIX(%s)' % (name, name)) print('') return def printBody(self, api): for f in api.functionIterateByOffset(): self.printFunction(f) for f in api.functionIterateByOffset(): dispatch = f.dispatch_name() for n in f.entry_points: if n != f.name: if f.is_static_entry_point(n): text = '\t.globl GL_PREFIX(%s) ; .set GL_PREFIX(%s), GL_PREFIX(%s)' % (n, n, dispatch) if f.has_different_protocol(n): print('#ifndef GLX_INDIRECT_RENDERING') print(text) print('#endif') else: print(text) return def _parser(): """Parse arguments and return a namespace.""" parser = argparse.ArgumentParser() parser.add_argument('-f', default='gl_API.xml', dest='filename', help='An XML file describing an API') return parser.parse_args() def main(): """Main file.""" args = _parser() printer = PrintGenericStubs() api = gl_XML.parse_GL_API(args.filename, glX_XML.glx_item_factory()) printer.Print(api) if __name__ == '__main__': main()
py
b4149a736916a7cd5f5acb7bcf78042875e1c374
from exabgp.bgp.message.update.nlri.bgpls.tlvs.ipreach import IpReach from exabgp.bgp.message.update.attribute.bgpls.prefix.igptags import IgpTags from exabgp.bgp.message.update.attribute.bgpls.prefix.prefixmetric import PrefixMetric from exabgp.bgp.message.update.nlri.bgpls.tlvs.ospfroute import OspfRoute from exabgp.bgp.message.update.nlri.bgpls.tlvs.node import NodeDescriptor import unittest class TestTlvs(unittest.TestCase): def test_ip_reach_ipv4(self,): data = b'\n\n\x00' tlv = IpReach.unpack(data, 3) self.assertEqual(tlv.json(), '"ip-reachability-tlv": "10.0.0.0", "ip-reach-prefix": "10.0.0.0/10"') def test_ip_reach_ipv6(self,): data = b'\x7f \x01\x07\x00\x00\x00\x80' tlv = IpReach.unpack(data, 4) self.assertEqual( tlv.json(), '"ip-reachability-tlv": "2001:700:0:8000::", "ip-reach-prefix": "2001:700:0:8000::/127"' ) def test_igp_tags(self,): data = b'\x00\x00\xff\xfe' tlv = IgpTags.unpack(data, len(data)) self.assertEqual(tlv.json(), '"igp-route-tags": [65534]') def test_prefix_metric(self,): data = b'\x00\x00\x00\x14' tlv = PrefixMetric.unpack(data, len(data)) self.assertEqual(tlv.json(), '"prefix-metric": 20') def test_ospf_route_type(self,): data = b'\x04' tlv = OspfRoute.unpack(data) self.assertEqual(tlv.json(), '"ospf-route-type": 4') class TestDescriptors(unittest.TestCase): def test_node_descriptor(self,): data = b'\x02\x00\x00\x04\x00\x00\xff\xfd\x02\x01\x00\x04\x00\x00\x00\x00\x02\x03\x00\x04\nq?\xf0' igp_type = 3 descriptor, remain = NodeDescriptor.unpack(data, igp_type) self.assertEqual(descriptor.json(), '"autonomous-system": 65533') descriptor, remain = NodeDescriptor.unpack(remain, igp_type) self.assertEqual(descriptor.json(), '"bgp-ls-identifier": "0"') descriptor, remain = NodeDescriptor.unpack(remain, igp_type) self.assertEqual(descriptor.json(), '"router-id": "10.113.63.240"') if __name__ == '__main__': unittest.main()
py
b4149ab4d6fd2d0c0a0a976667f9d2d58ad6ab60
import autograd.numpy as np from autograd import grad from autograd.test_util import check_grads from sklearn.base import BaseEstimator, RegressorMixin from sklearn.utils import check_X_y from sklearn.utils.validation import check_is_fitted, check_array, FLOAT_DTYPES class DeadZoneRegressor(BaseEstimator, RegressorMixin): def __init__(self, threshold=0.3, relative=False, effect="linear", n_iter=2000, stepsize=0.01, check_grad=False): self.threshold = threshold self.relative = relative self.effect = effect self.n_iter = n_iter self.stepsize = stepsize self.check_grad = check_grad self.allowed_effects = ("linear", "quadratic", "constant") self.loss_log_ = None self.wts_log_ = None self.deriv_log_ = None self.coefs_ = None def fit(self, X, y): X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES) if self.effect not in self.allowed_effects: raise ValueError(f"effect {self.effect} must be in {self.allowed_effects}") def deadzone(errors): if self.effect == "linear": return np.where(errors > self.threshold, errors, np.zeros(errors.shape)) if self.effect == "quadratic": return np.where(errors > self.threshold, errors**2, np.zeros(errors.shape)) def training_loss(weights): diff = np.abs(np.dot(X, weights) - y) if self.relative: diff = diff / y return np.mean(deadzone(diff)) n, k = X.shape # Build a function that returns gradients of training loss using autograd. training_gradient_fun = grad(training_loss) # Check the gradients numerically, just to be safe. weights = np.random.normal(0, 1, k) if self.check_grad: check_grads(training_loss, modes=['rev'])(weights) # Optimize weights using gradient descent. self.loss_log_ = np.zeros(self.n_iter) self.wts_log_ = np.zeros((self.n_iter, k)) self.deriv_log_ = np.zeros((self.n_iter, k)) for i in range(self.n_iter): weights -= training_gradient_fun(weights) * self.stepsize self.wts_log_[i, :] = weights.ravel() self.loss_log_[i] = training_loss(weights) self.deriv_log_[i, :] = training_gradient_fun(weights).ravel() self.coefs_ = weights return self def predict(self, X): X = check_array(X, estimator=self, dtype=FLOAT_DTYPES) check_is_fitted(self, ['coefs_']) return np.dot(X, self.coefs_)
py
b4149ab8bd24e46ceaf857cbfe24b94e2d10e730
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Qt application that drives the solar eclipse rendering. This application receives the arguments defining a solar eclipse rendering (at a location, time range, and various camera parameters). It requires an OpenGL-capable X server, but renders offline (no window) and exits when complete. """ import argparse import piexif import math import ephem import json import sys import os import pickle from eclipse_gis import eclipse_gis import signal from PyQt5 import QtGui, QtCore, QtWidgets from convert_location import convert_location from eclipse_renderer import EclipseRenderer, RES_X, RES_Y from util import get_phase, TOTALITY, NO_ECLIPSE, PARTIAL def write_stat(eclipse_renderer, stats, suffix, pt): """Write a single line of data describing the sun center and size for this time point.""" sun_center, moon_center = eclipse_renderer.getSunMoonCenter(pt) c = eclipse_renderer.getSunSize(pt) sun_radius = c[1] # sun_radius_correct = eclipse_renderer.getSunSizeProj(pt) # print sun_radius, sun_radius_correct # TODO(dek): replace this hack with code a getMoonSize function that works like getSunSize moon_radius = sun_radius l = "(%d, %d, %d)" % (RES_X - sun_center[0], sun_center[1], sun_radius) lune = pt[7] if lune == 0.: t = 'NO_ECLIPSE' elif lune == 100.: t = 'TOTALITY' else: t = 'PARTIAL' if t == 'NO_ECLIPSE' or t == 'TOTAL': l2 = "None" else: l2 = "(%d, %d, %d)" % (RES_X - moon_center[0], moon_center[1], moon_radius) stats.write("%s|%s|%s|%s\n" % (suffix, t, l, l2)) def write_image(image, fname, pt, lat, lon, fov, pan): """Write the image corresponding to a time and space point to fname.""" dt = pt[0] s = pt[7] image.save(fname) datestamp, time = dt.split(" ") h, m, s = time.split(":") timestamp = ((int(h),1), (int(m),1), (int(s),1)) lat_pretty = ephem.degrees(math.radians(lat)) lat_h, lat_m, lat_s = str(lat_pretty).split(":") lat_fmt = ((int(lat_h), 1), (int(lat_m), 1), (int(float(lat_s)), 1)) lon_pretty = ephem.degrees(math.radians(lon)) lon_h, lon_m, lon_s = str(lon_pretty).split(":") lon_fmt = ((-int(lon_h), 1), (int(lon_m), 1), (int(float(lon_s)), 1)) exif = piexif.load(fname) # Update the EXIF GPS time and date exif['GPS'][piexif.GPSIFD.GPSLatitude ] = lat_fmt exif['GPS'][piexif.GPSIFD.GPSLatitudeRef ] = 'N' exif['GPS'][piexif.GPSIFD.GPSLongitude ] = lon_fmt exif['GPS'][piexif.GPSIFD.GPSLongitudeRef ] = 'W' exif['GPS'][piexif.GPSIFD.GPSDateStamp ] = datestamp exif['GPS'][piexif.GPSIFD.GPSTimeStamp ] = timestamp t = get_phase(s) # Write custom data into the MakerNote exif['Exif'][piexif.ExifIFD.MakerNote] = "%s|%d, %d|%d" % (t, fov, pan[0], pan[1]) b = piexif.dump(exif) piexif.insert(b, fname) class MainWindow(QtWidgets.QWidget): def __init__(self, fov, pan_x, pan_y, subset, index, outdir, inclusion_threshold, generate, load_file, save_file): super(MainWindow, self).__init__() self.fov = fov self.pan_x = pan_x self.pan_y = pan_y self.subset = subset self.index = index self.outdir = outdir self.inclusion_threshold = inclusion_threshold self.eclipse_renderer = EclipseRenderer(self.fov, (self.pan_x, self.pan_y), generate=generate, load_file=load_file, save_file=save_file) def run(self): """Driver function for iterating over and rendering time-space point images to files.""" min_dts = "2017/08/21 16:00:00" max_dts = "2017/08/21 20:00:00" if self.subset == 'inside': posl = pickle.load(open("locations_inside.pkl", "rb")) elif self.subset == 'outside': posl = pickle.load(open("locations_outside.pkl", "rb")) else: raise RuntimeError, "Unrecognized subset: '%s'" % self.subset pos = posl[self.index] lat, lon = pos # Generate points to render, filtering by inclusion threshold pts = convert_location(lat, lon, min_dts, max_dts, self.inclusion_threshold, dt=30) # Only create dirs and write stats if there are any time points if len(pts): if not os.path.exists(self.outdir): os.mkdir(self.outdir) dir_ = os.path.join(self.outdir, "%.4f,%.4f,%d,%d,%d,%s" % ( lat, lon,self.fov,self.pan_x,self.pan_y,self.subset)) if not os.path.exists(dir_): os.mkdir(dir_) stats = open(os.path.join(dir_, "stats.txt"), "w") for i, pt in enumerate(pts): suffix = "%05d.jpg" % i fname = os.path.join(dir_, suffix) write_stat(self.eclipse_renderer, stats, suffix, pt) image = self.eclipse_renderer.paint(pt) write_image(image, fname, pt, lat, lon, self.fov, (self.pan_x, self.pan_y)) stats.close() sys.exit(0) def get_arguments(): parser = argparse.ArgumentParser(description='') parser.add_argument('--fov', type=int, default=2) parser.add_argument('--pan_x', type=int, default=0) parser.add_argument('--pan_y', type=int, default=0) parser.add_argument('--subset', type=str, default='inside') parser.add_argument('--index', type=int, default=0) parser.add_argument('--save-file', type=str, default="random.txt") parser.add_argument('--load-file', type=str, default="random.txt") parser.add_argument('--generate', action='store_true') parser.add_argument('--output_dir', type=str, default="/mnt/dek/images/generated-images-7") parser.add_argument('--inclusion_threshold', type=float, default=95) return parser.parse_args() def main(): args = get_arguments() signal.signal(signal.SIGINT, signal.SIG_DFL) app = QtWidgets.QApplication(['Eclipse']) window = MainWindow(args.fov, args.pan_x, args.pan_y, args.subset, args.index, args.output_dir, args.inclusion_threshold, args.generate, args.load_file, args.save_file) QtCore.QTimer.singleShot(0, window.run) app.exec_() if __name__ == '__main__': main()
py
b4149b02b0ca7b908eed9fde421d6b050f55d1d9
import argparse import h5py from torchvision import transforms #from utilities import CXRDataset import os import pydicom from pydicom import dcmread import numpy as np ######################### # GENERATE pydicom DATASET # # ######################## parser = argparse.ArgumentParser() parser.add_argument('--img_size', type=int, default=64) parser.add_argument('--crop_size', type=int, default=64) # parser.add_argument('--CXR_dataset', type=str, default='CheXpert') args = parser.parse_args() # IMG_DIR = f'/home/aisinai/data/{args.CXR_dataset}' # DATA_DIR = f'/home/aisinai/data/{args.CXR_dataset}' IMG_DIR = 'C:/Users/User/Desktop/philips/vqvae2/dataset_MY/train_init' DATA_DIR = 'C:/Users/User/Desktop/philips/vqvae2/dataset_MY/train_init' # HDF5_DIR = '/home/aisinai/work/HDF5_datasets' PYDICOM_DIR = 'C:/Users/User/Desktop/philips/vqvae2/dataset_MY/train_np/' os.makedirs(PYDICOM_DIR, exist_ok=True) num_label = 14 nc = 1 # number of channels; 3 for RGB, 1 for grayscale # mean = [0.485, 0.456, 0.406] # ImageNet mean # std = [0.229, 0.224, 0.225] # ImageNet std # for info view jupyter notebook Projections_pydicom.ipynb mean = 15398 std = 11227 normalization = transforms.Normalize(mean=mean, std=std) transform_array = [transforms.ToPILImage(), transforms.Resize(args.img_size), # transforms.CenterCrop(args.crop_size), transforms.RandomCrop(args.crop_size), transforms.ToTensor(), normalization] file_list = os.listdir(DATA_DIR) for f in file_list: dcm = DATA_DIR + '/' + f dcm_1 = dcmread(dcm) im_1 = dcm_1.pixel_array.astype(np.uint8) img = transforms.Compose(transform_array)(im_1) np.save(PYDICOM_DIR+f, img)
py
b4149b042f4b328aacb84aef535575a2410a93d0
from pytorch_transformers import GPT2Config, GPT2Model from onmt.modules.embeddings import Embeddings from onmt.encoders.transformer import EncoderBase import os from programmingalpha.models import expandEmbeddingByN class OnmtGPT2Encoder(EncoderBase): def __init__(self, model_path): super(OnmtGPT2Encoder, self).__init__() config=GPT2Config.from_json_file(os.path.join( model_path, "config.json") ) pretrained_dict=os.path.join( model_path, "pytorch_model.bin") if os.path.exists(pretrained_dict): model=GPT2Model.from_pretrained(pretrained_model_name_or_path=pretrained_dict, config=config) print("init GPT2 model with {} weights".format(len(model.state_dict()))) else: model=GPT2Model(config) model.wte=expandEmbeddingByN(model.wte, 4) self.encoder=model #print(model) print("***"*20) def forward(self, src, lengths=None): """ Args: src (LongTensor): padded sequences of sparse indices ``(src_len, batch, nfeat)`` lengths (LongTensor): length of each sequence ``(batch,)`` """ inputids=src.squeeze(2).transpose(0,1).contiguous() outputs=self.encoder(input_ids=inputids) #print(len(outputs)) #print(outputs) emb=outputs[2][-1] memory_bank=outputs[0] emb=emb.transpose(0,1).contiguous() memory_bank=memory_bank.transpose(0,1).contiguous() #print("src--> outs", src.size(), emb.size(), memory_bank.size()) return emb, memory_bank, lengths def getWordEmbeddingFromGPT2Encoder(model:OnmtGPT2Encoder): return model.encoder.wte def buildGPT2(**kwargs): if "model_path" not in kwargs: import programmingalpha kwargs["model_path"] = programmingalpha.GPT2Base encoder=OnmtGPT2Encoder(kwargs["model_path"]) return encoder
py
b4149d00285c467fd919160ea4c941c394752e98
from pyperp.contracts.accountBalance import * from pyperp.contracts.clearingHouse import * from pyperp.contracts.marketRegistry import * from pyperp.contracts.orderBook import * from pyperp.contracts.vault import * from pyperp.contracts.types import *
py
b4149d2f524709842583f67554979fee571dfd40
import threading import time from . import article_checker from . import tools from loguru import logger class CheckerThread(threading.Thread): def __init__(self, parser, queue, database, database_table_name, timeout): super(CheckerThread, self).__init__() self.setDaemon(True) self.database = database self.database_table_name = database_table_name self.parser = parser self.timeout = timeout self.queue = queue logger.info(f'{self.parser.__class__.__name__} thread initialized') def run(self): logger.info('Checker thread started') while True: try: while True: last_info = tools.delete_duplicates( self.parser.get_latest()) for uri in last_info: self.database.check_table(self.database_table_name) if not self.database.is_exist(self.database_table_name, uri): this_article = self.parser.get_article(uri) self.database.insert_uri( self.database_table_name, uri) if this_article: this_article = article_checker.Article( this_article) self.queue.append(this_article) time.sleep(1) time.sleep(self.timeout) except Exception as error: logger.exception( f'Error... Reload worker thread for {self.parser.__class__.__name__} after 15 seconds..') time.sleep(15) class ArticleQueueThread(threading.Thread): def __init__(self, telegram_bot, parsers_settings): super(ArticleQueueThread, self).__init__() self.setDaemon(True) self.queue = [] self.telegram_bot = telegram_bot self.parsers_settings = parsers_settings logger.debug('ArticleQueue thread: Initialized') def append(self, article): self.queue.append(article) def get(self): if len(self.queue) > 0: article = self.queue[0] del self.queue[0] return article def run(self): logger.info('ArticleQueue thread: Started') while True: if len(self.queue) > 0: last_article = self.get() last_article.send_key_words = self.parsers_settings[ last_article.source_name]['send_key_words'] if last_article.check_for_match(self.parsers_settings[last_article.source_name]['key_words']): try: self.telegram_bot.send_article(last_article) except Exception as error: logger.exception(error) time.sleep(1) class BotPollingThread(threading.Thread): def __init__(self, article_bot, database, password): super(BotPollingThread, self).__init__() self.setDaemon(True) self.bot = article_bot.bot self.database = database self.password = password def run(self): @self.bot.message_handler(commands=['my_id']) def send_id(message): logger.info('Bot: Message from {0}: {1}'.format( message.chat.id, message.text)) self.bot.send_message(message.chat.id, str(message.chat.id)) @self.bot.message_handler(commands=['start']) def send_start(message): logger.info('Bot: Message from {0}: {1}'.format( message.chat.id, message.text)) text = 'Hello!' self.bot.reply_to(message, text) logger.info('Bot: Send text to {0}: {1}'.format( message.chat.id, text)) @self.bot.message_handler(commands=['subscribe']) def add_user(message): logger.info('Bot: Message from {0}: {1}'.format( message.chat.id, message.text)) user_id = message.chat.id is_password = self.password if not is_password: if not self.database.is_user_exist(user_id): text = 'Теперь вы получатель. Вы будете получать новые статьи в этом чате.' self.database.insert_user_id(user_id) logger.info('Bot: Insert user {0}'.format(user_id)) logger.info( 'Bot: Send text to {0}: {1}'.format(user_id, text)) self.bot.reply_to(message, text) else: text = 'Вы уже получатель.' logger.info( 'Bot: Send text to {0}: {1}'.format(user_id, text)) self.bot.reply_to(message, text) elif self.database.is_user_exist(user_id): self.bot.reply_to(message, 'Вы уже получатель.') else: self.bot.reply_to(message, 'Упс! Пришлите пароль.') @self.bot.message_handler(commands=['stop']) def delete_user(message): logger.info('Bot: Message from {0}: {1}'.format( message.chat.id, message.text)) user_id = message.chat.id if self.database.is_user_exist(user_id): text = 'Теперь вы не будете получать новые статьи. Чтобы снова получать их, снова подпишитесь через команду /subscribe.' self.database.delete_user_id(user_id) logger.info('Bot: Delete user {0}'.format(user_id)) logger.info('Bot: Send text to {0}: {1}'.format(user_id, text)) self.bot.reply_to(message, text) else: text = 'Вы не являетесь получателем.' logger.info('Bot: Send text to {0}: {1}'.format(user_id, text)) self.bot.reply_to(message, text) @self.bot.message_handler(func=lambda message: True) def echo_all(message): logger.info('Bot: (message_handler) Message from {0}: {1}'.format( message.chat.id, message.text)) if message.text == self.password: if not self.database.is_user_exist(message.chat.id): user_id = message.chat.id self.database.insert_user_id(user_id) self.bot.reply_to( message, 'Теперь вы получатель. Вы будете получать новые статьи в этом чате.') else: self.bot.reply_to(message, 'Вы уже получатель.') self.bot.polling(timeout=0.2)
py
b4149d3ef007e807c3dbc3a00a513d0a0d4cceda
import asyncore import base64 import email.mime.text from email.message import EmailMessage from email.base64mime import body_encode as encode_base64 import email.utils import hmac import socket import smtpd import smtplib import io import re import sys import time import select import errno import textwrap import threading import unittest from test import support, mock_socket from test.support import HOST, HOSTv4, HOSTv6 from unittest.mock import Mock if sys.platform == 'darwin': # select.poll returns a select.POLLHUP at the end of the tests # on darwin, so just ignore it def handle_expt(self): pass smtpd.SMTPChannel.handle_expt = handle_expt def server(evt, buf, serv): serv.listen() evt.set() try: conn, addr = serv.accept() except socket.timeout: pass else: n = 500 while buf and n > 0: r, w, e = select.select([], [conn], []) if w: sent = conn.send(buf) buf = buf[sent:] n -= 1 conn.close() finally: serv.close() evt.set() class GeneralTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket self.port = 25 def tearDown(self): smtplib.socket = socket # This method is no longer used but is retained for backward compatibility, # so test to make sure it still works. def testQuoteData(self): teststr = "abc\n.jkl\rfoo\r\n..blue" expected = "abc\r\n..jkl\r\nfoo\r\n...blue" self.assertEqual(expected, smtplib.quotedata(teststr)) def testBasic1(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port) smtp.close() def testSourceAddress(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port, source_address=('127.0.0.1',19876)) self.assertEqual(smtp.source_address, ('127.0.0.1', 19876)) smtp.close() def testBasic2(self): mock_socket.reply_with(b"220 Hola mundo") # connects, include port in host name smtp = smtplib.SMTP("%s:%s" % (HOST, self.port)) smtp.close() def testLocalHostName(self): mock_socket.reply_with(b"220 Hola mundo") # check that supplied local_hostname is used smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost") self.assertEqual(smtp.local_hostname, "testhost") smtp.close() def testTimeoutDefault(self): mock_socket.reply_with(b"220 Hola mundo") self.assertIsNone(mock_socket.getdefaulttimeout()) mock_socket.setdefaulttimeout(30) self.assertEqual(mock_socket.getdefaulttimeout(), 30) try: smtp = smtplib.SMTP(HOST, self.port) finally: mock_socket.setdefaulttimeout(None) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() def testTimeoutNone(self): mock_socket.reply_with(b"220 Hola mundo") self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: smtp = smtplib.SMTP(HOST, self.port, timeout=None) finally: socket.setdefaulttimeout(None) self.assertIsNone(smtp.sock.gettimeout()) smtp.close() def testTimeoutValue(self): mock_socket.reply_with(b"220 Hola mundo") smtp = smtplib.SMTP(HOST, self.port, timeout=30) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() def test_debuglevel(self): mock_socket.reply_with(b"220 Hello world") smtp = smtplib.SMTP() smtp.set_debuglevel(1) with support.captured_stderr() as stderr: smtp.connect(HOST, self.port) smtp.close() expected = re.compile(r"^connect:", re.MULTILINE) self.assertRegex(stderr.getvalue(), expected) def test_debuglevel_2(self): mock_socket.reply_with(b"220 Hello world") smtp = smtplib.SMTP() smtp.set_debuglevel(2) with support.captured_stderr() as stderr: smtp.connect(HOST, self.port) smtp.close() expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ", re.MULTILINE) self.assertRegex(stderr.getvalue(), expected) # Test server thread using the specified SMTP server class def debugging_server(serv, serv_evt, client_evt): serv_evt.set() try: if hasattr(select, 'poll'): poll_fun = asyncore.poll2 else: poll_fun = asyncore.poll n = 1000 while asyncore.socket_map and n > 0: poll_fun(0.01, asyncore.socket_map) # when the client conversation is finished, it will # set client_evt, and it's then ok to kill the server if client_evt.is_set(): serv.close() break n -= 1 except socket.timeout: pass finally: if not client_evt.is_set(): # allow some time for the client to read the result time.sleep(0.5) serv.close() asyncore.close_all() serv_evt.set() MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n' MSG_END = '------------ END MESSAGE ------------\n' # NOTE: Some SMTP objects in the tests below are created with a non-default # local_hostname argument to the constructor, since (on some systems) the FQDN # lookup caused by the default local_hostname sometimes takes so long that the # test server times out, causing the test to fail. # Test behavior of smtpd.DebuggingServer class DebuggingServerTests(unittest.TestCase): maxDiff = None def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn # temporarily replace sys.stdout to capture DebuggingServer output self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.serv_evt = threading.Event() self.client_evt = threading.Event() # Capture SMTPChannel debug output self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM smtpd.DEBUGSTREAM = io.StringIO() # Pick a random unused port by passing 0 for the port number self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what server host and port were assigned self.host, self.port = self.serv.socket.getsockname()[:2] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() # restore sys.stdout sys.stdout = self.old_stdout # restore DEBUGSTREAM smtpd.DEBUGSTREAM.close() smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM def get_output_without_xpeer(self): test_output = self.output.getvalue() return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2', test_output, flags=re.MULTILINE|re.DOTALL) def testBasic(self): # connect smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.quit() def testSourceAddress(self): # connect src_port = support.find_unused_port() try: smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost', timeout=3, source_address=(self.host, src_port)) self.assertEqual(smtp.source_address, (self.host, src_port)) self.assertEqual(smtp.local_hostname, 'localhost') smtp.quit() except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to source port %d" % src_port) raise def testNOOP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.noop(), expected) smtp.quit() def testRSET(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.rset(), expected) smtp.quit() def testELHO(self): # EHLO isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'\nSIZE 33554432\nHELP') self.assertEqual(smtp.ehlo(), expected) smtp.quit() def testEXPNNotImplemented(self): # EXPN isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (502, b'EXPN not implemented') smtp.putcmd('EXPN') self.assertEqual(smtp.getreply(), expected) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (252, b'Cannot VRFY user, but will accept message ' + \ b'and attempt delivery') self.assertEqual(smtp.vrfy('[email protected]'), expected) self.assertEqual(smtp.verify('[email protected]'), expected) smtp.quit() def testSecondHELO(self): # check that a second HELO returns a message that it's a duplicate # (this behavior is specific to smtpd.SMTPChannel) smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.helo() expected = (503, b'Duplicate HELO/EHLO') self.assertEqual(smtp.helo(), expected) smtp.quit() def testHELP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \ b'RCPT DATA RSET NOOP QUIT VRFY') smtp.quit() def testSend(self): # connect and send mail m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX(nnorwitz): this test is flaky and dies with a bad file descriptor # in asyncore. This sleep might help, but should really be fixed # properly by using an Event variable. time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendBinary(self): m = b'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNeedingDotQuote(self): # Issue 12283 m = '.A test\n.mes.sage.' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNullSender(self): m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('<>', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: <>$", re.MULTILINE) self.assertRegex(debugout, sender) def testSendMessage(self): m = email.mime.text.MIMEText('A test message') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='John', to_addrs='Sally') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds as figuring out # exactly what IP address format is put there is not easy (and # irrelevant to our test). Typically 127.0.0.1 or ::1, but it is # not always the same as socket.gethostbyname(HOST). :( test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) def testSendMessageWithAddresses(self): m = email.mime.text.MIMEText('A test message') m['From'] = '[email protected]' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() # make sure the Bcc header is still in the message. self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" ' '<[email protected]>') self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] # The Bcc header should not be transmitted. del m['Bcc'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: [email protected]$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Sally', 'Fred', 'root@localhost', '[email protected]'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSomeAddresses(self): # Make sure nothing breaks if not all of the three 'to' headers exist m = email.mime.text.MIMEText('A test message') m['From'] = '[email protected]' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: [email protected]$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSpecifiedAddresses(self): # Make sure addresses specified in call override those in message. m = email.mime.text.MIMEText('A test message') m['From'] = '[email protected]' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='[email protected]', to_addrs='[email protected]') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: [email protected]$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertNotRegex(debugout, to_addr) recip = re.compile(r"^recips: .*'[email protected]'.*$", re.MULTILINE) self.assertRegex(debugout, recip) def testSendMessageWithMultipleFrom(self): # Sender overrides To m = email.mime.text.MIMEText('A test message') m['From'] = 'Bernard, Bianca' m['Sender'] = '[email protected]' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: [email protected]$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageResent(self): m = email.mime.text.MIMEText('A test message') m['From'] = '[email protected]' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = '[email protected]' m['Resent-To'] = 'Martha <[email protected]>, Jeff' m['Resent-Bcc'] = '[email protected]' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # The Resent-Bcc headers are deleted before serialization. del m['Bcc'] del m['Resent-Bcc'] # Remove the X-Peer header that DebuggingServer adds. test_output = self.get_output_without_xpeer() del m['X-Peer'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(test_output, mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: [email protected]$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('[email protected]', 'Jeff', '[email protected]'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageMultipleResentRaises(self): m = email.mime.text.MIMEText('A test message') m['From'] = '[email protected]' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = '[email protected]' m['Resent-To'] = 'Martha <[email protected]>, Jeff' m['Resent-Bcc'] = '[email protected]' m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000' m['Resent-To'] = '[email protected]' m['Resent-From'] = 'Martha <[email protected]>, Jeff' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) with self.assertRaises(ValueError): smtp.send_message(m) smtp.close() class NonConnectingTests(unittest.TestCase): def testNotConnected(self): # Test various operations on an unconnected SMTP object that # should raise exceptions (at present the attempt in SMTP.send # to reference the nonexistent 'sock' attribute of the SMTP object # causes an AttributeError) smtp = smtplib.SMTP() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, 'test msg') def testNonnumericPort(self): # check that non-numeric port raises OSError self.assertRaises(OSError, smtplib.SMTP, "localhost", "bogus") self.assertRaises(OSError, smtplib.SMTP, "localhost:bogus") class DefaultArgumentsTests(unittest.TestCase): def setUp(self): self.msg = EmailMessage() self.msg['From'] = 'Páolo <fő[email protected]>' self.smtp = smtplib.SMTP() self.smtp.ehlo = Mock(return_value=(200, 'OK')) self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock() def testSendMessage(self): expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME') self.smtp.send_message(self.msg) self.smtp.send_message(self.msg) self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3], expected_mail_options) self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3], expected_mail_options) def testSendMessageWithMailOptions(self): mail_options = ['STARTTLS'] expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME') self.smtp.send_message(self.msg, None, None, mail_options) self.assertEqual(mail_options, ['STARTTLS']) self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3], expected_mail_options) # test response of client to a non-successful HELO message class BadHELOServerTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket mock_socket.reply_with(b"199 no hello for you!") self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.port = 25 def tearDown(self): smtplib.socket = socket sys.stdout = self.old_stdout def testFailingHELO(self): self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP, HOST, self.port, 'localhost', 3) class TooLongLineTests(unittest.TestCase): respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n' def setUp(self): self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(15) self.port = support.bind_port(self.sock) servargs = (self.evt, self.respdata, self.sock) thread = threading.Thread(target=server, args=servargs) thread.start() self.addCleanup(thread.join) self.evt.wait() self.evt.clear() def tearDown(self): self.evt.wait() sys.stdout = self.old_stdout def testLineTooLong(self): self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP, HOST, self.port, 'localhost', 3) sim_users = {'[email protected]':'John A', '[email protected]':'Sally B', '[email protected]':'Ruth C', } sim_auth = ('[email protected]', 'somepassword') sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn' 'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=') sim_lists = {'list-1':['[email protected]','[email protected]'], 'list-2':['[email protected]',], } # Simulated SMTP channel & server class ResponseException(Exception): pass class SimSMTPChannel(smtpd.SMTPChannel): quit_response = None mail_response = None rcpt_response = None data_response = None rcpt_count = 0 rset_count = 0 disconnect = 0 AUTH = 99 # Add protocol state to enable auth testing. authenticated_user = None def __init__(self, extra_features, *args, **kw): self._extrafeatures = ''.join( [ "250-{0}\r\n".format(x) for x in extra_features ]) super(SimSMTPChannel, self).__init__(*args, **kw) # AUTH related stuff. It would be nice if support for this were in smtpd. def found_terminator(self): if self.smtp_state == self.AUTH: line = self._emptystring.join(self.received_lines) print('Data:', repr(line), file=smtpd.DEBUGSTREAM) self.received_lines = [] try: self.auth_object(line) except ResponseException as e: self.smtp_state = self.COMMAND self.push('%s %s' % (e.smtp_code, e.smtp_error)) return super().found_terminator() def smtp_AUTH(self, arg): if not self.seen_greeting: self.push('503 Error: send EHLO first') return if not self.extended_smtp or 'AUTH' not in self._extrafeatures: self.push('500 Error: command "AUTH" not recognized') return if self.authenticated_user is not None: self.push( '503 Bad sequence of commands: already authenticated') return args = arg.split() if len(args) not in [1, 2]: self.push('501 Syntax: AUTH <mechanism> [initial-response]') return auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_') try: self.auth_object = getattr(self, auth_object_name) except AttributeError: self.push('504 Command parameter not implemented: unsupported ' ' authentication mechanism {!r}'.format(auth_object_name)) return self.smtp_state = self.AUTH self.auth_object(args[1] if len(args) == 2 else None) def _authenticated(self, user, valid): if valid: self.authenticated_user = user self.push('235 Authentication Succeeded') else: self.push('535 Authentication credentials invalid') self.smtp_state = self.COMMAND def _decode_base64(self, string): return base64.decodebytes(string.encode('ascii')).decode('utf-8') def _auth_plain(self, arg=None): if arg is None: self.push('334 ') else: logpass = self._decode_base64(arg) try: *_, user, password = logpass.split('\0') except ValueError as e: self.push('535 Splitting response {!r} into user and password' ' failed: {}'.format(logpass, e)) return self._authenticated(user, password == sim_auth[1]) def _auth_login(self, arg=None): if arg is None: # base64 encoded 'Username:' self.push('334 VXNlcm5hbWU6') elif not hasattr(self, '_auth_login_user'): self._auth_login_user = self._decode_base64(arg) # base64 encoded 'Password:' self.push('334 UGFzc3dvcmQ6') else: password = self._decode_base64(arg) self._authenticated(self._auth_login_user, password == sim_auth[1]) del self._auth_login_user def _auth_cram_md5(self, arg=None): if arg is None: self.push('334 {}'.format(sim_cram_md5_challenge)) else: logpass = self._decode_base64(arg) try: user, hashed_pass = logpass.split() except ValueError as e: self.push('535 Splitting response {!r} into user and password ' 'failed: {}'.format(logpass, e)) return False valid_hashed_pass = hmac.HMAC( sim_auth[1].encode('ascii'), self._decode_base64(sim_cram_md5_challenge).encode('ascii'), 'md5').hexdigest() self._authenticated(user, hashed_pass == valid_hashed_pass) # end AUTH related stuff. def smtp_EHLO(self, arg): resp = ('250-testhost\r\n' '250-EXPN\r\n' '250-SIZE 20000000\r\n' '250-STARTTLS\r\n' '250-DELIVERBY\r\n') resp = resp + self._extrafeatures + '250 HELP' self.push(resp) self.seen_greeting = arg self.extended_smtp = True def smtp_VRFY(self, arg): # For max compatibility smtplib should be sending the raw address. if arg in sim_users: self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg))) else: self.push('550 No such user: %s' % arg) def smtp_EXPN(self, arg): list_name = arg.lower() if list_name in sim_lists: user_list = sim_lists[list_name] for n, user_email in enumerate(user_list): quoted_addr = smtplib.quoteaddr(user_email) if n < len(user_list) - 1: self.push('250-%s %s' % (sim_users[user_email], quoted_addr)) else: self.push('250 %s %s' % (sim_users[user_email], quoted_addr)) else: self.push('550 No access for you!') def smtp_QUIT(self, arg): if self.quit_response is None: super(SimSMTPChannel, self).smtp_QUIT(arg) else: self.push(self.quit_response) self.close_when_done() def smtp_MAIL(self, arg): if self.mail_response is None: super().smtp_MAIL(arg) else: self.push(self.mail_response) if self.disconnect: self.close_when_done() def smtp_RCPT(self, arg): if self.rcpt_response is None: super().smtp_RCPT(arg) return self.rcpt_count += 1 self.push(self.rcpt_response[self.rcpt_count-1]) def smtp_RSET(self, arg): self.rset_count += 1 super().smtp_RSET(arg) def smtp_DATA(self, arg): if self.data_response is None: super().smtp_DATA(arg) else: self.push(self.data_response) def handle_error(self): raise class SimSMTPServer(smtpd.SMTPServer): channel_class = SimSMTPChannel def __init__(self, *args, **kw): self._extra_features = [] self._addresses = {} smtpd.SMTPServer.__init__(self, *args, **kw) def handle_accepted(self, conn, addr): self._SMTPchannel = self.channel_class( self._extra_features, self, conn, addr, decode_data=self._decode_data) def process_message(self, peer, mailfrom, rcpttos, data): self._addresses['from'] = mailfrom self._addresses['tos'] = rcpttos def add_feature(self, feature): self._extra_features.append(feature) def handle_error(self): raise # Test various SMTP & ESMTP commands/behaviors that require a simulated server # (i.e., something with more features than DebuggingServer) class SMTPSimTests(unittest.TestCase): def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def testBasic(self): # smoke test smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.quit() def testEHLO(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) # no features should be present before the EHLO self.assertEqual(smtp.esmtp_features, {}) # features expected from the test server expected_features = {'expn':'', 'size': '20000000', 'starttls': '', 'deliverby': '', 'help': '', } smtp.ehlo() self.assertEqual(smtp.esmtp_features, expected_features) for k in expected_features: self.assertTrue(smtp.has_extn(k)) self.assertFalse(smtp.has_extn('unsupported-feature')) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for addr_spec, name in sim_users.items(): expected_known = (250, bytes('%s %s' % (name, smtplib.quoteaddr(addr_spec)), "ascii")) self.assertEqual(smtp.vrfy(addr_spec), expected_known) u = '[email protected]' expected_unknown = (550, ('No such user: %s' % u).encode('ascii')) self.assertEqual(smtp.vrfy(u), expected_unknown) smtp.quit() def testEXPN(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for listname, members in sim_lists.items(): users = [] for m in members: users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m))) expected_known = (250, bytes('\n'.join(users), "ascii")) self.assertEqual(smtp.expn(listname), expected_known) u = 'PSU-Members-List' expected_unknown = (550, b'No access for you!') self.assertEqual(smtp.expn(u), expected_unknown) smtp.quit() def testAUTH_PLAIN(self): self.serv.add_feature("AUTH PLAIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_LOGIN(self): self.serv.add_feature("AUTH LOGIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_CRAM_MD5(self): self.serv.add_feature("AUTH CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def testAUTH_multiple(self): # Test that multiple authentication methods are tried. self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) resp = smtp.login(sim_auth[0], sim_auth[1]) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def test_auth_function(self): supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'} for mechanism in supported: self.serv.add_feature("AUTH {}".format(mechanism)) for mechanism in supported: with self.subTest(mechanism=mechanism): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.ehlo('foo') smtp.user, smtp.password = sim_auth[0], sim_auth[1] method = 'auth_' + mechanism.lower().replace('-', '_') resp = smtp.auth(mechanism, getattr(smtp, method)) self.assertEqual(resp, (235, b'Authentication Succeeded')) smtp.close() def test_quit_resets_greeting(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) code, message = smtp.ehlo() self.assertEqual(code, 250) self.assertIn('size', smtp.esmtp_features) smtp.quit() self.assertNotIn('size', smtp.esmtp_features) smtp.connect(HOST, self.port) self.assertNotIn('size', smtp.esmtp_features) smtp.ehlo_or_helo_if_needed() self.assertIn('size', smtp.esmtp_features) smtp.quit() def test_with_statement(self): with smtplib.SMTP(HOST, self.port) as smtp: code, message = smtp.noop() self.assertEqual(code, 250) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') with smtplib.SMTP(HOST, self.port) as smtp: smtp.close() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') def test_with_statement_QUIT_failure(self): with self.assertRaises(smtplib.SMTPResponseException) as error: with smtplib.SMTP(HOST, self.port) as smtp: smtp.noop() self.serv._SMTPchannel.quit_response = '421 QUIT FAILED' self.assertEqual(error.exception.smtp_code, 421) self.assertEqual(error.exception.smtp_error, b'QUIT FAILED') #TODO: add tests for correct AUTH method fallback now that the #test infrastructure can support it. # Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception def test__rest_from_mail_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.mail_response = '451 Requested action aborted' self.serv._SMTPchannel.disconnect = True with self.assertRaises(smtplib.SMTPSenderRefused): smtp.sendmail('John', 'Sally', 'test message') self.assertIsNone(smtp.sock) # Issue 5713: make sure close, not rset, is called if we get a 421 error def test_421_from_mail_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.mail_response = '421 closing connection' with self.assertRaises(smtplib.SMTPSenderRefused): smtp.sendmail('John', 'Sally', 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) def test_421_from_rcpt_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing'] with self.assertRaises(smtplib.SMTPRecipientsRefused) as r: smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')}) def test_421_from_data_cmd(self): class MySimSMTPChannel(SimSMTPChannel): def found_terminator(self): if self.smtp_state == self.DATA: self.push('421 closing') else: super().found_terminator() self.serv.channel_class = MySimSMTPChannel smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() with self.assertRaises(smtplib.SMTPDataError): smtp.sendmail('[email protected]', ['[email protected]'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0) def test_smtputf8_NotSupportedError_if_no_server_support(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertTrue(smtp.does_esmtp) self.assertFalse(smtp.has_extn('smtputf8')) self.assertRaises( smtplib.SMTPNotSupportedError, smtp.sendmail, 'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8']) self.assertRaises( smtplib.SMTPNotSupportedError, smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8']) def test_send_unicode_without_SMTPUTF8(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '') self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice') def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self): # This test is located here and not in the SMTPUTF8SimTests # class because it needs a "regular" SMTP server to work msg = EmailMessage() msg['From'] = "Páolo <fő[email protected]>" msg['To'] = 'Dinsdale' msg['Subject'] = 'Nudge nudge, wink, wink \u1F609' smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) with self.assertRaises(smtplib.SMTPNotSupportedError): smtp.send_message(msg) def test_name_field_not_included_in_envelop_addresses(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3 ) self.addCleanup(smtp.close) message = EmailMessage() message['From'] = email.utils.formataddr(('Michaël', '[email protected]')) message['To'] = email.utils.formataddr(('René', '[email protected]')) self.assertDictEqual(smtp.send_message(message), {}) self.assertEqual(self.serv._addresses['from'], '[email protected]') self.assertEqual(self.serv._addresses['tos'], ['[email protected]']) class SimSMTPUTF8Server(SimSMTPServer): def __init__(self, *args, **kw): # The base SMTP server turns these on automatically, but our test # server is set up to munge the EHLO response, so we need to provide # them as well. And yes, the call is to SMTPServer not SimSMTPServer. self._extra_features = ['SMTPUTF8', '8BITMIME'] smtpd.SMTPServer.__init__(self, *args, **kw) def handle_accepted(self, conn, addr): self._SMTPchannel = self.channel_class( self._extra_features, self, conn, addr, decode_data=self._decode_data, enable_SMTPUTF8=self.enable_SMTPUTF8, ) def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None, rcpt_options=None): self.last_peer = peer self.last_mailfrom = mailfrom self.last_rcpttos = rcpttos self.last_message = data self.last_mail_options = mail_options self.last_rcpt_options = rcpt_options class SMTPUTF8SimTests(unittest.TestCase): maxDiff = None def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1), decode_data=False, enable_SMTPUTF8=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def test_test_server_supports_extensions(self): smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertTrue(smtp.does_esmtp) self.assertTrue(smtp.has_extn('smtputf8')) def test_send_unicode_with_SMTPUTF8_via_sendmail(self): m = '¡a test message containing unicode!'.encode('utf-8') smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.sendmail('Jőhn', 'Sálly', m, mail_options=['BODY=8BITMIME', 'SMTPUTF8']) self.assertEqual(self.serv.last_mailfrom, 'Jőhn') self.assertEqual(self.serv.last_rcpttos, ['Sálly']) self.assertEqual(self.serv.last_message, m) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) def test_send_unicode_with_SMTPUTF8_via_low_level_API(self): m = '¡a test message containing unicode!'.encode('utf-8') smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) smtp.ehlo() self.assertEqual( smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']), (250, b'OK')) self.assertEqual(smtp.rcpt('János'), (250, b'OK')) self.assertEqual(smtp.data(m), (250, b'OK')) self.assertEqual(self.serv.last_mailfrom, 'Jő') self.assertEqual(self.serv.last_rcpttos, ['János']) self.assertEqual(self.serv.last_message, m) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) def test_send_message_uses_smtputf8_if_addrs_non_ascii(self): msg = EmailMessage() msg['From'] = "Páolo <fő[email protected]>" msg['To'] = 'Dinsdale' msg['Subject'] = 'Nudge nudge, wink, wink \u1F609' # XXX I don't know why I need two \n's here, but this is an existing # bug (if it is one) and not a problem with the new functionality. msg.set_content("oh là là, know what I mean, know what I mean?\n\n") # XXX smtpd converts received /r/n to /n, so we can't easily test that # we are successfully sending /r/n :(. expected = textwrap.dedent("""\ From: Páolo <fő[email protected]> To: Dinsdale Subject: Nudge nudge, wink, wink \u1F609 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit MIME-Version: 1.0 oh là là, know what I mean, know what I mean? """) smtp = smtplib.SMTP( HOST, self.port, local_hostname='localhost', timeout=3) self.addCleanup(smtp.close) self.assertEqual(smtp.send_message(msg), {}) self.assertEqual(self.serv.last_mailfrom, 'fő[email protected]') self.assertEqual(self.serv.last_rcpttos, ['Dinsdale']) self.assertEqual(self.serv.last_message.decode(), expected) self.assertIn('BODY=8BITMIME', self.serv.last_mail_options) self.assertIn('SMTPUTF8', self.serv.last_mail_options) self.assertEqual(self.serv.last_rcpt_options, []) EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='') class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel): def smtp_AUTH(self, arg): # RFC 4954's AUTH command allows for an optional initial-response. # Not all AUTH methods support this; some require a challenge. AUTH # PLAIN does those, so test that here. See issue #15014. args = arg.split() if args[0].lower() == 'plain': if len(args) == 2: # AUTH PLAIN <initial-response> with the response base 64 # encoded. Hard code the expected response for the test. if args[1] == EXPECTED_RESPONSE: self.push('235 Ok') return self.push('571 Bad authentication') class SimSMTPAUTHInitialResponseServer(SimSMTPServer): channel_class = SimSMTPAUTHInitialResponseChannel class SMTPAUTHInitialResponseSimTests(unittest.TestCase): def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPAUTHInitialResponseServer( (HOST, 0), ('nowhere', -1), decode_data=True) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def testAUTH_PLAIN_initial_response_login(self): self.serv.add_feature('AUTH PLAIN') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.login('psu', 'doesnotexist') smtp.close() def testAUTH_PLAIN_initial_response_auth(self): self.serv.add_feature('AUTH PLAIN') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.user = 'psu' smtp.password = 'doesnotexist' code, response = smtp.auth('plain', smtp.auth_plain) smtp.close() self.assertEqual(code, 235) if __name__ == '__main__': unittest.main()
py
b4149d7c9cc12c5bac2cb6fe4bd5425b56a51308
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class vpnglobal_vpnintranetapplication_binding(base_resource) : """Binding class showing the vpnintranetapplication that can be bound to vpnglobal.""" def __init__(self) : self._intranetapplication = "" self.___count = 0 @property def intranetapplication(self) : """The intranet vpn application.""" try : return self._intranetapplication except Exception as e: raise e @intranetapplication.setter def intranetapplication(self, intranetapplication) : """The intranet vpn application. :param intranetapplication: """ try : self._intranetapplication = intranetapplication except Exception as e: raise e def _get_nitro_response(self, service, response) : """converts nitro response into object and returns the object array in case of get request. :param service: :param response: """ try : result = service.payload_formatter.string_to_resource(vpnglobal_vpnintranetapplication_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.vpnglobal_vpnintranetapplication_binding except Exception as e : raise e def _get_object_name(self) : """Returns the value of object identifier argument""" try : return 0 except Exception as e : raise e @classmethod def add(cls, client, resource) : """ :param client: :param resource: """ try : if resource and type(resource) is not list : updateresource = vpnglobal_vpnintranetapplication_binding() updateresource.intranetapplication = resource.intranetapplication return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [vpnglobal_vpnintranetapplication_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].intranetapplication = resource[i].intranetapplication return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : """ :param client: :param resource: """ try : if resource and type(resource) is not list : deleteresource = vpnglobal_vpnintranetapplication_binding() deleteresource.intranetapplication = resource.intranetapplication return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [vpnglobal_vpnintranetapplication_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].intranetapplication = resource[i].intranetapplication return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service) : """Use this API to fetch a vpnglobal_vpnintranetapplication_binding resources. :param service: """ try : obj = vpnglobal_vpnintranetapplication_binding() response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, filter_) : """Use this API to fetch filtered set of vpnglobal_vpnintranetapplication_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param service: :param filter_: """ try : obj = vpnglobal_vpnintranetapplication_binding() option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service) : """Use this API to count vpnglobal_vpnintranetapplication_binding resources configued on NetScaler. :param service: """ try : obj = vpnglobal_vpnintranetapplication_binding() option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, filter_) : """Use this API to count the filtered set of vpnglobal_vpnintranetapplication_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param service: :param filter_: """ try : obj = vpnglobal_vpnintranetapplication_binding() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class vpnglobal_vpnintranetapplication_binding_response(base_response) : """ """ def __init__(self, length=1) : self.vpnglobal_vpnintranetapplication_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.vpnglobal_vpnintranetapplication_binding = [vpnglobal_vpnintranetapplication_binding() for _ in range(length)]
py
b4149e5cfb478ed7a80069994a6f78eefad70e36
"""Testing functions used for numpoly only functionality.""" from pytest import raises import sympy import numpy import numpoly from numpoly.poly_function.monomial.cross_truncation import cross_truncate X, Y = numpoly.symbols("X Y") def test_numpoly_call(): poly = X+Y with raises(TypeError): poly(1, X=2) with raises(TypeError): poly(1, 2, Y=3) with raises(TypeError): poly(not_an_arg=45) def test_numpoly_ndpoly(): poly = numpoly.ndpoly(exponents=[(1,)], shape=(), names="X") poly["<"] = 1 assert poly == X poly = numpoly.ndpoly(exponents=[(1,)], shape=(), names=X) poly["<"] = 1 assert poly == X poly = numpoly.ndpoly( exponents=[(1, 0), (0, 1)], shape=(), names=("X" ,"Y")) poly["<;"] = 2 poly[";<"] = 3 assert poly == 2*X+3*Y poly = numpoly.ndpoly( exponents=[(1, 0), (0, 1)], shape=(2,), names="Q") poly["<;"] = [1, 0] poly[";<"] = [0, 1] assert numpy.all(poly == numpoly.symbols("Q0 Q1")) def test_numpoly_polynomial(): assert numpoly.polynomial() == 0 assert numpoly.polynomial({(0,): 4}) == 4 assert numpoly.polynomial({(1,): 5}, names="X") == 5*X assert numpoly.polynomial( {(0, 1): 2, (1, 0): 3}, names=("X", "Y")) == 3*X+2*Y assert numpy.all(numpoly.polynomial( {(0, 1): [0, 1], (1, 0): [1, 0]}, names="Q" ) == numpoly.symbols("Q0 Q1")) assert numpoly.polynomial(X) == X assert numpoly.polynomial(numpy.array((3,), dtype=[(";", int)])) == 3 assert numpoly.polynomial(5.5) == 5.5 assert numpoly.polynomial(sympy.symbols("X")) == X assert numpy.all(numpoly.polynomial([1, 2, 3]) == [1, 2, 3]) assert numpy.all(numpoly.polynomial([[1, 2], [3, 4]]) == [[1, 2], [3, 4]]) assert numpy.all(numpoly.polynomial( numpy.array([[1, 2], [3, 4]])) == [[1, 2], [3, 4]]) def test_numpoly_isconstant(): assert not numpoly.polynomial(X).isconstant() assert numpoly.polynomial(1).isconstant() assert not numpoly.polynomial([1, X]).isconstant() assert numpoly.polynomial([1, 2]).isconstant() def test_numpoly_tonumpy(): assert isinstance(numpoly.tonumpy(numpoly.polynomial([1, 2, 3])), numpy.ndarray) with raises(ValueError): numpoly.tonumpy(X) def test_numpoly_cross_truncate(): indices = numpy.array(numpy.mgrid[:10, :10]).reshape(2, -1).T assert not numpy.any(cross_truncate(indices, -1, norm=0)) assert numpy.all(indices[cross_truncate(indices, 0, norm=0)].T == [[0], [0]]) assert numpy.all(indices[cross_truncate(indices, 1, norm=0)].T == [[0, 0, 1], [0, 1, 0]]) assert numpy.all(indices[cross_truncate(indices, 2, norm=0)].T == [[0, 0, 0, 1, 2], [0, 1, 2, 0, 0]]) assert not numpy.any(cross_truncate(indices, -1, norm=1)) assert numpy.all(indices[cross_truncate(indices, 0, norm=1)].T == [[0], [0]]) assert numpy.all(indices[cross_truncate(indices, 1, norm=1)].T == [[0, 0, 1], [0, 1, 0]]) assert numpy.all(indices[cross_truncate(indices, 2, norm=1)].T == [[0, 0, 0, 1, 1, 2], [0, 1, 2, 0, 1, 0]]) assert not numpy.any(cross_truncate(indices, -1, norm=100)) assert numpy.all(indices[cross_truncate(indices, 0, norm=100)].T == [[0], [0]]) assert numpy.all(indices[cross_truncate(indices, 1, norm=100)].T == [[0, 0, 1], [0, 1, 0]]) assert numpy.all(indices[cross_truncate(indices, 2, norm=100)].T == [[0, 0, 0, 1, 1, 1, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1]]) assert not numpy.any(cross_truncate(indices, -1, norm=numpy.inf)) assert numpy.all(indices[cross_truncate(indices, 0, norm=numpy.inf)].T == [[0], [0]]) assert numpy.all(indices[cross_truncate(indices, 1, norm=numpy.inf)].T == [[0, 0, 1, 1], [0, 1, 0, 1]]) assert numpy.all(indices[cross_truncate(indices, 2, norm=numpy.inf)].T == [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]]) def test_numpoly_bindex(): assert not numpoly.bindex(0).size assert numpy.all(numpoly.bindex(1) == [[0]]) assert numpy.all(numpoly.bindex(5) == [[0], [1], [2], [3], [4]]) assert numpy.all(numpoly.bindex(2, dimensions=2) == [[0, 0], [0, 1], [1, 0]]) assert numpy.all(numpoly.bindex(start=2, stop=3, dimensions=2) == [[0, 2], [1, 1], [2, 0]]) assert numpy.all(numpoly.bindex(start=2, stop=[3, 4], dimensions=2) == [[0, 2], [1, 1], [2, 0], [0, 3]]) assert numpy.all(numpoly.bindex(start=[2, 5], stop=[3, 6], dimensions=2) == [[1, 1], [2, 0], [1, 2], [0, 5]]) assert numpy.all(numpoly.bindex(start=2, stop=3, dimensions=2, ordering="I") == [[2, 0], [1, 1], [0, 2]]) assert numpy.all(numpoly.bindex(start=2, stop=4, dimensions=2, cross_truncation=0) == [[0, 2], [2, 0], [0, 3], [3, 0]]) assert numpy.all(numpoly.bindex(start=2, stop=4, dimensions=2, cross_truncation=1) == [[0, 2], [1, 1], [2, 0], [0, 3], [1, 2], [2, 1], [3, 0]]) assert numpy.all(numpoly.bindex(start=2, stop=4, dimensions=2, cross_truncation=2) == [[0, 2], [1, 1], [2, 0], [0, 3], [1, 2], [2, 1], [3, 0], [2, 2]]) assert numpy.all(numpoly.bindex(start=0, stop=2, dimensions=3) == [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]]) def test_numpoly_monomial(): assert not numpoly.monomial(0).size assert numpoly.monomial(1) == 1 assert numpy.all(numpoly.monomial(2, names="X") == [1, X])
py
b4149f2be11748853716d85cabc0ec39c35cff16
# -*- coding: utf-8 -*- from multiprocessing import Process from multiprocessing import Pipe import tensorflow as tf import numpy as np import random from ..game.game import Game from ..common.utils import ConsoleUtil as cu from ..common.utils import ConsoleColor as cc ADVANTAGE_STEPS = 5 GAMMA = 0.99 MIN_BATCH = 5 GAME_EVENT_TYPE_NONE = 0 GAME_EVENT_TYPE_REQUEST_ACTION = 1 GAME_EVENT_TYPE_SEND_RESULT = 2 GAME_EVENT_TYPE_EPOCH_FINISHED = 3 class Slave(object): def __init__(self, master, name): self.master = master self.name = name self.network = self.master.network.clone(self.name) self.network.build() self.epoch = 0 self.total_reward = 0 self.memory = [] self.R = 0 self.train_queue = [] self.apply_gradients_op = None self.copy_trainable_vars_op = None self._build_graph() def _build_graph(self): with tf.name_scope(self.name): self.apply_gradients_op = self.master.network.get_optimizer_op.apply_gradients( zip(self.network.get_gradients_op, self.master.network.get_trainable_variables_op)) self.copy_trainable_vars_op = [dst.assign(src) for dst, src in zip( self.network.get_trainable_variables_op, self.master.network.get_trainable_variables_op)] def select_action(self, step, state, enabled_actions): if not enabled_actions: return None # Use ε-greedy algorithm. epsilon = 0.1 + 0.9 / (1.0 + (self.epoch / 10)) use_random = not self.master.is_test() and epsilon >= np.random.uniform(0, 1) if step == 0 and self.is_main_slave(): cu.print_color('##### epoch {} #####'.format(self.epoch), cc.YELLOW) if self.master.is_test(): cu.print_color('test', cc.BLUE) else: cu.print_color('ε={}'.format(epsilon), cc.BLUE) selected_action = None if use_random: selected_action = random.choice(enabled_actions) elif self.master.training_target == 'map': selected_action = self._select_action_map_mode(state, enabled_actions) elif self.master.training_target == 'unit': selected_action = self._select_action_unit_mode(state, enabled_actions) return selected_action def _select_action_map_mode(self, state, enabled_actions): id_distribution = np.concatenate([state.unit_id_distribution, state.enemy_id_distribution], axis=0) probabilities, _ = self.network.model.predict(np.array([id_distribution])) # Adjust probabilities so as to be selected only enabled actions. enabled_probabilities = np.zeros(probabilities[0].shape) for action in enabled_actions: enabled_probabilities[action.id] = probabilities[0][action.id] probabilities_sum = sum(enabled_probabilities) # If sum of enabled probabilities is zero, chose random. if probabilities_sum == 0: return random.choice(enabled_actions) # Adjust probabilities so that sum of probabilities is 1. for action in enabled_actions: enabled_probabilities[action.id] = enabled_probabilities[action.id] / probabilities_sum if self.master.is_test(): # Select max when test mode. selected_action_id = np.argmax(enabled_probabilities) else: # Select following probability when training mode. selected_action_id = np.random.choice(self.network.action_count, p=enabled_probabilities) selected_action = [x for x in enabled_actions if x.id == selected_action_id][0] return selected_action def _select_action_unit_mode(self, state, enabled_actions): input_state = np.array([np.concatenate([state.unit_hp_distribution, state.enemy_hp_distribution])]) selected_action = None max_value = None for action in enabled_actions: pos = np.zeros((2, 10, 10), dtype=int) if action.location_param: x, y = int(action.location_param.pos[0] / 64), int(action.location_param.pos[1] / 64) dim = 0 if action.location_param.type == 'melee' else 1 pos[dim, x, y] = 1 input_location = np.array([pos]) unit = [0, 0, 0] if action.unit_param: unit = [action.unit_param.hp / 100, action.unit_param.attack_power / 100, action.unit_param.block_count / 5] input_unit = np.array([[unit]], dtype=float) action_type_id = 0 if action.type == 'sortie': action_type_id = 1 elif action.type == 'withdraw': action_type_id = 2 input_action = [0, 0, 0] input_action[action_type_id] = 1 input_action = np.array([[input_action]], dtype=int) value = self.network.model.predict([input_state, input_location, input_unit, input_action]) if not selected_action or value > max_value: selected_action = action max_value = value return selected_action def _calc_reward(self, action_result): max_reward = self.master.game.enemies.len() return (action_result.kills - action_result.lost_life) / max_reward def receive_action_result(self, action_result): self.total_reward += action_result.kills if not self.master.is_test(): self.memory.append(action_result) r = self._calc_reward(action_result) self.R = (self.R + r * GAMMA ** ADVANTAGE_STEPS) / GAMMA if action_result.is_terminal: while len(self.memory) > 0: self.train_queue.append(self._sample_memory(len(self.memory))) self.R = (self.R - self._calc_reward(self.memory[0])) / GAMMA self.memory.pop(0) elif len(self.memory) >= ADVANTAGE_STEPS: self.train_queue.append(self._sample_memory(ADVANTAGE_STEPS)) self.memory.pop(0) if action_result.is_terminal or action_result.step % ADVANTAGE_STEPS == 0: self._sync() if action_result.is_terminal: with self.master.lock: self.master.kills.append((self.epoch, self.total_reward)) self.master.epoch += 1 self.epoch = self.master.epoch self.total_reward = 0 self.memory.clear() self.train_queue.clear() self.R = 0 def _sync(self): if len(self.train_queue) < MIN_BATCH: return if self.master.training_target == 'unit': self._apply_gradients_on_unit_mode() elif self.master.training_target == 'map': self._apply_gradients_on_map_mode() with self.master.lock: self.master.sess.run(self.copy_trainable_vars_op) self.train_queue.clear() def _apply_gradients_on_map_mode(self): for s_t, a_t, r, s_tn, terminal in self.train_queue: id_distribution_ = np.concatenate([s_tn.unit_id_distribution, s_tn.enemy_id_distribution], axis=0) _, v = self.network.model.predict(np.array([id_distribution_])) v_tn = r + (GAMMA ** ADVANTAGE_STEPS) * v * (not terminal) id_distribution = np.concatenate([s_t.unit_id_distribution, s_t.enemy_id_distribution], axis=0) input_state = np.array([id_distribution]) action_vec = np.zeros(self.network.action_count, dtype=float) for i in range(self.network.action_count): action_vec[i] = 1.0 if i == a_t.id else 1e-10 output_policy = np.array([action_vec]) output_v = np.array(v_tn) feed_dict = {self.network.input_state_ph: input_state, self.network.output_policy_ph: output_policy, self.network.output_state_value_ph: output_v} with self.master.lock: self.master.sess.run(self.apply_gradients_op, feed_dict=feed_dict) def _apply_gradients_on_unit_mode(self): for s, a, r, s_, t in self.train_queue: input_state_ = np.array([np.concatenate([s_.unit_hp_distribution, s_.enemy_hp_distribution])]) pos = np.zeros((2, 10, 10), dtype=int) input_location_ = np.array([pos]) input_unit_ = np.array([[[0, 0, 0]]], dtype=float) input_action_ = np.array([[[0, 0, 0]]], dtype=int) value = self.network.model.predict([input_state_, input_location_, input_unit_, input_action_]) v_n = r + (GAMMA ** ADVANTAGE_STEPS) * value * (not t) input_state = np.array([np.concatenate([s.unit_hp_distribution, s.enemy_hp_distribution])]) pos = np.zeros((2, 10, 10), dtype=int) if a.location_param: x, y = int(a.location_param.pos[0] / 64), int(a.location_param.pos[1] / 64) pos[0, x, y] = 1 input_location = np.array([pos]) unit = [0, 0, 0] if a.unit_param: unit = [a.unit_param.hp / 100, a.unit_param.attack_power / 100, a.unit_param.block_count / 5] input_unit = np.array([[unit]], dtype=float) action_type_id = 0 if a.type == 'sortie': action_type_id = 1 elif a.type == 'withdraw': action_type_id = 2 input_action = [0, 0, 0] input_action[action_type_id] = 1 input_action = np.array([[input_action]], dtype=int) output_v = np.array(v_n) feed_dict = {self.network.input_state_ph: input_state, self.network.input_location_ph: input_location, self.network.input_unit_ph: input_unit, self.network.input_action_ph: input_action, self.network.output_value_ph: output_v} with self.master.lock: self.master.sess.run(self.apply_gradients_op, feed_dict=feed_dict) def _sample_memory(self, n): s = self.memory[0].state a = self.memory[0].action r = self.R s_ = self.memory[n - 1].state t = self.memory[n - 1].is_terminal return s, a, r, s_, t def is_main_slave(self): return self.name == 'slave_0' def run(self): self.master.sess.run(self.copy_trainable_vars_op) conn, worker_conn = Pipe() process = Process(target=GameWorker.run, args=(worker_conn, self.master.game_params,)) process.start() while True: message = conn.recv() event = message[0] if event == GAME_EVENT_TYPE_REQUEST_ACTION: action = self.select_action(message[1], message[2], message[3]) conn.send([action]) elif event == GAME_EVENT_TYPE_SEND_RESULT: self.receive_action_result(message[1]) conn.send([]) class GameWorker(object): def __init__(self, conn, game): self.game = game self.conn = conn def _request_action(self, step, state, enabled_actions): self.conn.send([GAME_EVENT_TYPE_REQUEST_ACTION, step, state, enabled_actions]) return self.conn.recv()[0] def _send_action_result(self, action_result): self.conn.send([GAME_EVENT_TYPE_SEND_RESULT, action_result]) self.conn.recv() def _run(self): while True: self.game.request_action_func = self._request_action self.game.send_action_result_func = self._send_action_result self.game.play() self.game.reset_level() @staticmethod def run(conn, game_params): Game.init() game = Game(fps=game_params.fps, speed=game_params.speed, rendering=True, training_interval=game_params.training_interval, max_step=game_params.max_step, blank_action_count=game_params.blank_action_count, request_action_func=None, send_action_result_func=None) game.load_level(game_params.level) worker = GameWorker(conn=conn, game=game) worker._run()
py
b4149fbe548e79f03ae4640d73617bd88df6e21c
import numpy as np import matplotlib.pyplot as plt import matplotlib from astroquery.irsa import Irsa # had to install astroquery w/ pip # from astroquery.simbad import Simbad matplotlib.rcParams.update({'font.size':18}) matplotlib.rcParams.update({'font.family':'serif'}) Irsa.ROW_LIMIT = 0 # a list of targets that astroquery can resolve # need list of WISE targets within 47 arcmin of the Ecliptic Poles # Simbad defines the NEP = 18 00 00.000 +66 33 38.55 # SEP = 06 00 00.000 -66 33 38.55 ### Some random targets pulled up by hand # targets = ['WISE J060224.34-661926.1', 'WISE J180214.15+661150.5', 'WISE J180022.32+663315.1', # 'WISE J055625.16-662924.6', 'WISE J175946.15+663746.7', 'WISE J060051.07-664439.4', # 'WISE J060113.59-662905.1', 'WISE J055552.81-662920.9', 'WISE J060105.40-663425.1', # 'WISE J060132.35-660815.0', 'WISE J180201.72+663739.0'] targets = ['WISE J060559.74-655908.2'] # the WISE tables to search cats = ['neowiser_p1bs_psd', 'allsky_4band_p1bs_psd', 'allsky_3band_p1bs_psd', 'allsky_2band_p1bs_psd'] for obj in targets: print('Running '+ str(obj)) table1 = Irsa.query_region(obj, catalog=cats[0], spatial='Cone', width='2 arcsec') table2 = Irsa.query_region(obj, catalog=cats[1], spatial='Cone', width='2 arcsec') table3 = Irsa.query_region(obj, catalog=cats[2], spatial='Cone', width='2 arcsec') table4 = Irsa.query_region(obj, catalog=cats[3], spatial='Cone', width='2 arcsec') table1.sort('mjd') table2.sort('mjd') table3.sort('mjd') table4.sort('mjd') df1 = table1.to_pandas() df2 = table2.to_pandas() df3 = table3.to_pandas() df4 = table4.to_pandas() print(' found '+str(len(df1))+' + '+ str(len(df2))+' + '+ str(len(df3))+' + '+ str(len(df4))+' visits') # dump to CSV files for possible later analysis df1.to_csv('data/' + obj + cats[0] + '.csv') df2.to_csv('data/' + obj + cats[1] + '.csv') df3.to_csv('data/' + obj + cats[2] + '.csv') df4.to_csv('data/' + obj + cats[3] + '.csv') #### NEED TO ADD QUALITY CUTS # can't add this to the latter 3 surveys... (df1['qual_frame'] > 8) ok1 = (df1['ph_qual'].str[0] == 'A') & (df1['nb'] == 1) & (df1['cc_flags'].str[0:2] == '00') & (df1['w1rchi2'] < 5) & (df1['qual_frame'] > 8) ok2 = (df2['ph_qual'].str[0] == 'A') & (df2['nb'] == 1) & (df2['cc_flags'].str[0:2] == '00') & (df2['w1rchi2'] < 5) ok3 = (df3['ph_qual'].str[0] == 'A') & (df3['nb'] == 1) & (df3['cc_flags'].str[0:2] == '00') & (df3['w1rchi2'] < 5) ok4 = (df4['ph_qual'].str[0] == 'A') & (df4['nb'] == 1) & (df4['cc_flags'].str[0:2] == '00') & (df4['w1rchi2'] < 5) # colors colors = ['#1f77b4', '#ff7f0e', '#c5b0d5', '#d62728'] ## make 3 basic figures: # 1) W1 light curve plt.figure(figsize=(13,8)) # plt.scatter(df1['mjd'], df1['w1mpro'], c='k', s=5, alpha=0.2) # plt.scatter(df2['mjd'], df2['w1mpro'], c='k', s=5, alpha=0.2) # plt.scatter(df3['mjd'], df3['w1mpro'], c='k', s=5, alpha=0.2) # plt.scatter(df4['mjd'], df4['w1mpro'], c='k', s=5, alpha=0.2) plt.errorbar(df1['mjd'][ok1], df1['w1mpro'][ok1], yerr=df1['w1sigmpro'][ok1], marker='o', linestyle='none', alpha=0.25, color=colors[0]) plt.errorbar(df2['mjd'][ok2], df2['w1mpro'][ok2], yerr=df2['w1sigmpro'][ok2], marker='o', linestyle='none', alpha=0.25, color=colors[1]) plt.errorbar(df3['mjd'][ok3], df3['w1mpro'][ok3], yerr=df3['w1sigmpro'][ok3], marker='o', linestyle='none', alpha=0.25, color=colors[2]) plt.errorbar(df4['mjd'][ok4], df4['w1mpro'][ok4], yerr=df4['w1sigmpro'][ok4], marker='o', linestyle='none', alpha=0.25, color=colors[3]) plt.ylabel('W1 (mag)') plt.xlabel('MJD (days)') plt.gca().invert_yaxis() plt.title(obj) plt.savefig('img/'+obj + '_W1.png', dpi=150, bbox_inches='tight', pad_inches=0.25) # plt.show() plt.close() # 2) W1-W2 color light curve plt.figure(figsize=(13,8)) plt.errorbar(df1['mjd'][ok1], df1['w1mpro'][ok1] - df1['w2mpro'][ok1], yerr=np.sqrt(df1['w1sigmpro'][ok1]**2 + df1['w2sigmpro'][ok1]**2), marker='o', linestyle='none', alpha=0.25, color=colors[0]) plt.errorbar(df2['mjd'][ok2], df2['w1mpro'][ok2] - df2['w2mpro'][ok2], yerr=np.sqrt(df2['w1sigmpro'][ok2]**2 + df2['w2sigmpro'][ok2]**2), marker='o', linestyle='none', alpha=0.25, color=colors[1]) plt.errorbar(df3['mjd'][ok3], df3['w1mpro'][ok3] - df3['w2mpro'][ok3], yerr=np.sqrt(df3['w1sigmpro'][ok3]**2 + df3['w2sigmpro'][ok3]**2), marker='o', linestyle='none', alpha=0.25, color=colors[2]) plt.errorbar(df4['mjd'][ok4], df4['w1mpro'][ok4] - df4['w2mpro'][ok4], yerr=np.sqrt(df4['w1sigmpro'][ok4]**2 + df4['w2sigmpro'][ok4]**2), marker='o', linestyle='none', alpha=0.25, color=colors[3]) plt.xlabel('MJD (days)') plt.ylabel('W1-W2 (mag)') plt.title(obj) plt.savefig('img/'+obj + '_W1W2.png', dpi=150, bbox_inches='tight', pad_inches=0.25) # plt.show() plt.close() # 3) CMD # plt.figure(figsize=(8,8)) # plt.errorbar(df1['w1mpro'] - df1['w2mpro'], df1['w1mpro'], # xerr=np.sqrt(df1['w1sigmpro']**2 + df1['w2sigmpro']**2), yerr=df1['w1sigmpro'], # marker='o', linestyle='none', alpha=0.25, color='#1f77b4') # plt.errorbar(df2['w1mpro_ep'] - df2['w2mpro_ep'], df2['w1mpro_ep'], # xerr=np.sqrt(df2['w1sigmpro_ep']**2 + df2['w2sigmpro_ep']**2 ), yerr=df2['w1sigmpro_ep'], # marker='o', linestyle='none', alpha=0.25, color='#ff7f0e') # # plt.ylabel('W1 (mag)') # plt.xlabel('W1-W2 (mag)') # plt.gca().invert_yaxis() # plt.savefig('img/'+obj + '_cmd.png', dpi=150, bbox_inches='tight', pad_inches=0.25) # plt.close() # bonus: RA,Dec to make sure not a blend, etc # plt.figure(figsize=(8, 8)) # plt.scatter(df1['ra'], df1['dec'], # marker='o', alpha=0.25, color='#1f77b4') # plt.scatter(df2['ra'], df2['dec'], # marker='o', alpha=0.25, color='#ff7f0e') # plt.xlabel('RA (deg)') # plt.ylabel('Dec (deg)') # plt.savefig('img/' + obj + '_radec.png', dpi=150, bbox_inches='tight', pad_inches=0.25) # plt.close() ''' ## now make zoom-in light curves for every independent visit # find breaks in data (should be ~6mo unless near the poles) dlim = 20 # days, limit of time for a window to be split at dt1 = df1['mjd'].values[1:] - df1['mjd'].values[0:-1] #bk1 = np.append(np.append([-1], np.where((dt1 > dlim))[0]), len(df1)) bk1 = np.where((dt1 > dlim))[0] R1 = np.append(bk1+1, [len(df1)+1]) L1 = np.append([0], bk1+1) dt2 = df2['mjd'].values[1:] - df2['mjd'].values[0:-1] #bk2 = np.append(np.append([-1], np.where((dt2 > dlim))[0]), len(df2)) bk2 = np.where((dt2 > dlim))[0] R2 = np.append(bk2+1, [len(df2)+1]) L2 = np.append([0], bk2+1) # now step through the chunks of continuous data, make a light curve for each if len(df2)>0: for k in range(len(R2)): tmin = np.nanmin(df2['mjd'].values[L2[k]:R2[k]]) tmin_s = str(tmin) plt.figure(figsize=(9,5)) plt.errorbar(df2['mjd'].values[L2[k]:R2[k]] - tmin, df2['w1mpro_ep'].values[L2[k]:R2[k]], yerr=df2['w1sigmpro_ep'].values[L2[k]:R2[k]], marker='o', linestyle='none', alpha=0.25, color='#ff7f0e') plt.xlabel('MJD - '+tmin_s+' (days)') plt.ylabel('W1 (mag)') plt.gca().invert_yaxis() plt.savefig('img/'+obj + '_visit'+str(k+1)+'.png', dpi=150, bbox_inches='tight', pad_inches=0.25) plt.close() k0 = len(R2) # counter to keep using for visit number if len(df1)>0: for k in range(len(R1)): tmin = np.nanmin(df1['mjd'].values[L1[k]:R1[k]]) tmin_s = str(tmin) plt.figure(figsize=(9,5)) plt.errorbar(df1['mjd'].values[L1[k]:R1[k]] - tmin, df1['w1mpro'].values[L1[k]:R1[k]], yerr=df1['w1sigmpro'].values[L1[k]:R1[k]], marker='o', linestyle='none', alpha=0.25, color='#1f77b4') plt.xlabel('MJD - '+tmin_s+' (days)') plt.ylabel('W1 (mag)') plt.gca().invert_yaxis() plt.savefig('img/'+obj + '_visit'+str(k+k0+1)+'.png', dpi=150, bbox_inches='tight', pad_inches=0.25) plt.close() '''
py
b414a086c832bd11b9ecee2dc36f44551f48bb0f
# Standard Library import logging import premailer # Websauna from websauna.system.core.utils import get_secrets from websauna.system.model.retry import retryable from websauna.system.task.tasks import ScheduleOnCommitTask from websauna.system.task.tasks import task from websauna.utils.time import now from .importer import import_all_users from .interfaces import INewsletterGenerator from .mailgun import Mailgun from .state import NewsletterState logger = logging.getLogger(__name__) @task(base=ScheduleOnCommitTask, bind=True) def send_newsletter_task(self: ScheduleOnCommitTask, subject, preview_email, testmode, now_, import_subscribers, tags): """Do user import and newsletter inside a Celery worker process. We carefully split transaction handling to several parts. """ # from celery.contrib import rdb ; rdb.set_trace() request = self.get_request() secrets = get_secrets(request.registry) if not now_: now_ = now() mailing_list = secrets["mailgun.mailing_list"] if preview_email: to = preview_email subject = "[PREVIEW] " + subject else: to = mailing_list newsletter = request.registry.queryAdapter(request, INewsletterGenerator) state = NewsletterState(request) text = "Please see the attached HTML mail." @retryable(tm=request.tm) def render_tx(): """Run HTML rendering in its own transaction, as it most likely reads database.""" return newsletter.render(since=state.get_last_send_timestamp()) html = render_tx() html = premailer.transform(html) from_ = secrets["mailgun.from"] domain = secrets["mailgun.domain"] campaign = now().isoformat() mailgun = Mailgun(request.registry) if import_subscribers: # This may take a looooong time.... logger.info("Importing subscribers") import_all_users(mailgun, request.dbsession, mailing_list, tm=request.tm) logger.info("Sending out newsletter %s %s %s %s %s %s", domain, subject, to, from_, campaign, tags) mailgun.send(domain, to, from_, subject, text, html, campaign, tags=tags) if not preview_email: # Only mark newsletter send if not preview state.set_last_send_timestamp(now_)
py
b414a0e7747902f63c7a081103eba0a7e4307d15
import collections import itertools import numpy as np from qecsim import paulitools as pt import matplotlib.pyplot as plt import qecsim from qecsim import app from qecsim.models.generic import PhaseFlipErrorModel,DepolarizingErrorModel,BiasedDepolarizingErrorModel,BiasedYXErrorModel from qecsim.models.planar import PlanarCode,PlanarMPSDecoder from qecsim.models.rotatedplanar import RotatedPlanarCode, RotatedPlanarMPSDecoder # from _planarmpsdecoder_def import PlanarMPSDecoder_def import app_def import app_defp import _planarmpsdecoder_def import _planarmpsdecoder_defp import _rotatedplanarmpsdecoder_def import _rotatedplanarmpsdecoder_defp import importlib as imp imp.reload(app_def) imp.reload(app_defp) imp.reload(_planarmpsdecoder_def) imp.reload(_planarmpsdecoder_defp) imp.reload(_rotatedplanarmpsdecoder_def) imp.reload(_rotatedplanarmpsdecoder_defp) import os,time import multiprocessing as mp from functools import partial def parallel_step_p(code,error_model,decoder,max_runs,perm_rates,code_name,layout,error_probability): # perm_mat,perm_vec= deform_matsvecs(code,decoder,error_model) result= app_defp.run_defp(code,error_model,decoder,error_probability,perm_rates,code_name,layout,max_runs) return result def parallel_step_code(code,error_model,decoder,max_runs,perm_rates,code_name,layout,error_probabilities,realization_index): pL_list=np.zeros((len(error_probabilities))) std_list=np.zeros((len(error_probabilities))) # perm_mat,perm_vec= deform_matsvecs(code,decoder,error_model) for error_probability_index,error_probability in enumerate(error_probabilities): # perm_mat,perm_vec= deform_matsvecs(code,decoder,error_model) [pL_list[error_probability_index],std_list[error_probability_index]]= app_defp.run_defp(code,error_model,decoder,error_probability,perm_rates,code_name,layout,max_runs) return [pL_list,std_list] def TNDresult(code,decoder,error_model,max_runs,perm_rates,error_probabilities,code_name,layout,num_realiz): pL_list_realiz=np.zeros((num_realiz,len(error_probabilities))) std_list_realiz=np.zeros((num_realiz,len(error_probabilities))) pL_list=np.zeros(len(error_probabilities)) std_list=np.zeros(len(error_probabilities)) log_pL_list=np.zeros(len(error_probabilities)) log_std_list=np.zeros(len(error_probabilities)) if code_name[:6]=='random': print(perm_rates) p=mp.Pool() func=partial(parallel_step_code,code,error_model,decoder,max_runs,perm_rates,code_name,layout,error_probabilities) result=p.map(func,range(num_realiz)) #print(result) p.close() p.join() for realization_index in range(num_realiz): for i in range(len(error_probabilities)): pL_list_realiz[realization_index][i]=result[realization_index][0][i] std_list_realiz[realization_index][i]=result[realization_index][1][i] pL_list = np.sum(pL_list_realiz,axis=0)/num_realiz std_list = np.sqrt(np.sum(vsquare(std_list_realiz),axis=0))/num_realiz for i in range(len(pL_list)): log_pL_list[i]=-np.log(pL_list[i]) log_std_list[i]=std_list[i]/(pL_list[i]*np.log(10)) else: p=mp.Pool() func=partial(parallel_step_p,code,error_model,decoder,max_runs,perm_rates,code_name,layout) result=p.map(func,error_probabilities) print(result) p.close() p.join() for i in range(len(result)): pL_list[i]=result[i][0] std_list[i]=result[i][1] log_pL_list[i]=-np.log(pL_list[i]) log_std_list[i]=std_list[i]/(pL_list[i]*np.log(10)) # for realization_index in range(num_realiz): # p=mp.Pool() # func=partial(parallel_step_p,code,error_model,decoder,max_runs,perm_rates,code_name,layout) # result=p.map(func,error_probabilities) # print(result) # p.close() # p.join() # for i in range(len(error_probabilities)): # pL_list_realiz[realization_index][i]=result[i][0] # std_list_realiz[realization_index][i]=result[i][1] # pL_list = np.sum(pL_list_realiz,axis=0)/num_realiz # std_list = np.sqrt(np.sum(vsquare(std_list_realiz),axis=0))/num_realiz # for i in range(len(pL_list)): # log_pL_list[i]=-np.log(pL_list[i]) # log_std_list[i]=std_list[i]/(pL_list[i]*np.log(10)) return [pL_list,std_list,log_pL_list,log_std_list] if __name__=='__main__': def square(a): return a**2 vsquare=np.vectorize(square) bdry_name='surface' sizes= range(6,7,2) rotsizes= range(13,14,2) p_min,p_max=0.05,0.50 error_probabilities=np.linspace(p_min,p_max,50) #export data timestr=time.strftime('%Y%m%d-%H%M%S') #record current date and time import os dirname='./data/'+'all_codes'+timestr os.mkdir(dirname) #make a new directory with current date and time # code_names=['spiral_XZ','random_XZ','random_XZ_YZ','random_XY'] # code_names=['random_all','random_XZ_YZ'] # # code_names=['spiral_XZ','random_XZ','random_all','random_XY'] # code_names=['XY','CSS'] bias_list=[10,100,300,1000,10**300] bias_list=[50,200] perm_rates=[1,0,0,0,0,0] for bias in bias_list: chi_val=12 # decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) # codes_and_size = [PlanarCode(*(size,size)) for size in sizes] # if bias==10: # code_names=['CSS','XY','XZZX','spiral_XZ','random_XY','random_XZ','random_ZXY','random_XZ_YZ','random_all'] # else: # code_names=['CSS','XY','XZZX','spiral_XZ','random_XZ','random_XZ_YZ'] # code_names=['random_XZ_YZ','random_XZ_YZ2'] code_names=['random_rot_XZ_YZ','rotXY'] # code_names=['random_XZ_YZ0'] # code_names=['random_rot_XZ_YZ'] from itertools import cycle plt.figure(figsize=(20,10)) lines=['-',':','--','-.'] #XYZ,ZYX,XZY,YXZ,YZX,ZXY for code_name in code_names: if code_name=='CSS': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=1 bias_str='Z' max_runs=20000 elif code_name=='XY': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' bias_str='Y' num_realiz=1 max_runs=20000 elif code_name=='rotXY': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Y' num_realiz=1 max_runs=20000 elif code_name=='rot_spiral': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Y' num_realiz=1 max_runs=20000 elif code_name=='rotXZ': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Z' num_realiz=1 max_runs=10000 elif code_name=='random_rot_XY': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Y' num_realiz=20 max_runs=2000 perm_rates=[1/2,1/2,0,0,0,0] elif code_name=='random_rot_XZ_YZ': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Z' num_realiz=100 max_runs=1000 perm_rates=[1/4,1/4,1/2,0,0,0] elif code_name=='random_rot_XY_ZY': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Y' num_realiz=20 max_runs=2000 perm_rates=[1/4,1/4,1/2,0,0,0] elif code_name=='random_rot_XZ': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Z' num_realiz=20 max_runs=2000 perm_rates=[1/2,1/2,0,0,0,0] elif code_name=='random_rot_XZ_YZ0': codes_and_size = [RotatedPlanarCode(*(size,size)) for size in rotsizes] decoder = _rotatedplanarmpsdecoder_defp.RotatedPlanarMPSDecoder_defp(chi=chi_val) layout='rotated' bias_str='Z' num_realiz=30 max_runs=2000 perm_rates=[1/3,1/3,1/3,0,0,0] elif code_name=='XZZX': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=1 bias_str='Z' max_runs=20000 elif code_name=='spiral_XZ': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=1 bias_str='Z' max_runs=20000 elif code_name=='random_XZ_YZ': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=10 bias_str='Z' max_runs=2000 perm_rates=[1/2,1/3,1/2-1/3,0,0,0] elif code_name=='random_XZ_YZ2': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=10 bias_str='Z' max_runs=2000 perm_rates=[1/3,1/3,1/3,0,0,0] elif code_name=='random_all': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=30 bias_str='Z' max_runs=2000 perm_rates=[1/6,1/6,1/6,1/6,1/6,1/6] elif code_name=='random_XZ': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=30 bias_str='Z' max_runs=2000 perm_rates=[1/2,1/2,0,0,0,0] elif code_name=='random_XY': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=30 bias_str='Y' max_runs=2000 perm_rates=[1/2,1/2,0,0,0,0] elif code_name=='random_ZXY': codes_and_size = [PlanarCode(*(size,size)) for size in sizes] decoder = _planarmpsdecoder_defp.PlanarMPSDecoder_defp(chi=chi_val) layout='planar' num_realiz=30 bias_str='Z' max_runs=2000 perm_rates=[1/2,0,0,0,0,1/2] error_model = BiasedDepolarizingErrorModel(bias,bias_str) # bias=1/bias # error_model=BiasedYXErrorModel(bias) # print run parameters print('code_name:',code_name) print('codes_and_size:',[code.label for code in codes_and_size]) print('Error model:',error_model.label) print('number of realizations:',num_realiz) print('Decoder:',decoder.label) print('Error probabilities:',error_probabilities) print('Maximum runs:',max_runs) for L_index,code in enumerate(codes_and_size): plt.title('TND failure rate scaling comparison at bias='+str(bias)[:7]+' ,chi='+str(chi_val)+', L_rot='+str(rotsizes[L_index])+' ,L_pl='+str(sizes[L_index])) [pL_list,std_list,log_pL_list,log_std_list]=TNDresult(code,decoder,error_model,max_runs,perm_rates,error_probabilities,code_name,layout,num_realiz) np.savetxt(dirname+'/p_list'+code_name+str(bias)[:7]+'.csv',error_probabilities,delimiter=',') np.savetxt(dirname+'/pL_list'+code_name+str(bias)[:7]+'.csv',pL_list,delimiter=',') np.savetxt(dirname+'/std_list'+code_name+str(bias)[:7]+'.csv',std_list,delimiter=',') plt.errorbar(-np.log(error_probabilities),log_pL_list,log_std_list) plt.xlabel('-log(p)') plt.ylabel('$-log(p_L)$') plt.legend(code_names) plt.savefig(dirname+'/scaling_code_comparison_bias='+str(bias)[:7]+'.pdf')
py
b414a106462e5129c72dfaeac975ac599e504247
from rdkit import Chem import sys smiles = sys.argv[1] m = Chem.MolFromSmiles(smiles) charge = Chem.GetFormalCharge(m) aromatic_ch = m.GetSubstructMatches(Chem.MolFromSmarts('[c;H1]')) aromatic_ch = [element for tupl in aromatic_ch for element in tupl] if len(aromatic_ch) == 0: print "No aromatic carbons" print 1
py
b414a1f32c59269d15ac8f69cb89a100b87d399b
import os import math import json import time import shutil import heroku3 import requests from pyrogram import filters from pyrogram import Client as trojanz from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton if bool(os.environ.get("WEBHOOK", False)): from sample_config import Config else: from config import Config from script import Script from plugins.helpers import humanbytes from database.filters_mdb import filter_stats from database.users_mdb import add_user, find_user, all_users @trojanz.on_message(filters.command('id') & (filters.private | filters.group)) async def showid(client, message): chat_type = message.chat.type if chat_type == "private": user_id = message.chat.id await message.reply_text( f"**🃏 Your ID : `{user_id}`**", parse_mode="md", quote=True ) elif (chat_type == "group") or (chat_type == "supergroup"): user_id = message.from_user.id chat_id = message.chat.id if message.reply_to_message: reply_id = f"Replied User ID : `{message.reply_to_message.from_user.id}`" else: reply_id = "" await message.reply_text( f"**🃏 Your ID : `{user_id}`\n🎴 This Group ID : `{chat_id}`\n\n{reply_id}**", parse_mode="md", quote=True ) @trojanz.on_message(filters.command('info') & (filters.private | filters.group)) async def showinfo(client, message): try: cmd, id = message.text.split(" ", 1) except: id = False pass if id: if (len(id) == 10 or len(id) == 9): try: checkid = int(id) except: await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md") return else: await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md") return if Config.SAVE_USER == "yes": name, username, dcid = await find_user(str(id)) else: try: user = await client.get_users(int(id)) name = str(user.first_name + (user.last_name or "")) username = user.username dcid = user.dc_id except: name = False pass if not name: await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md") return else: if message.reply_to_message: name = str(message.reply_to_message.from_user.first_name\ + (message.reply_to_message.from_user.last_name or "")) id = message.reply_to_message.from_user.id username = message.reply_to_message.from_user.username dcid = message.reply_to_message.from_user.dc_id else: name = str(message.from_user.first_name\ + (message.from_user.last_name or "")) id = message.from_user.id username = message.from_user.username dcid = message.from_user.dc_id if not str(username) == "None": user_name = f"@{username}" else: user_name = "none" await message.reply_text( f"<b>➜ Name : {name}</b>\n" f"<b>➜ User ID : <code>{id}</code></b>\n" f"<b>➜ Username : {user_name}</b>\n" f"<b>➜ Permanant USER link : <a href='tg://user?id={id}'>Click here!</a></b>\n" f"<b>➜ DC ID : {dcid}</b>\n", quote=True, parse_mode="html" ) @trojanz.on_message((filters.private | filters.group) & filters.command('status')) async def bot_status(client,message): if str(message.from_user.id) not in Config.AUTH_USERS: return chats, filters = await filter_stats() if Config.SAVE_USER == "yes": users = await all_users() userstats = f"**➜ {users} users** have connect with your bot!\n" else: userstats = "" if Config.HEROKU_API_KEY: try: server = heroku3.from_key(Config.HEROKU_API_KEY) user_agent = ( 'Mozilla/5.0 (Linux; Android 10; SM-G975F) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/80.0.3987.149 Mobile Safari/537.36' ) accountid = server.account().id headers = { 'User-Agent': user_agent, 'Authorization': f'Bearer {Config.HEROKU_API_KEY}', 'Accept': 'application/vnd.heroku+json; version=3.account-quotas', } path = "/accounts/" + accountid + "/actions/get-quota" request = requests.get("https://api.heroku.com" + path, headers=headers) if request.status_code == 200: result = request.json() total_quota = result['account_quota'] quota_used = result['quota_used'] quota_left = total_quota - quota_used total = math.floor(total_quota/3600) used = math.floor(quota_used/3600) hours = math.floor(quota_left/3600) minutes = math.floor(quota_left/60 % 60) days = math.floor(hours/24) usedperc = math.floor(quota_used / total_quota * 100) leftperc = math.floor(quota_left / total_quota * 100) quota_details = f""" **🗄️ Heroku Account Status** **➜ Free Dyno Quota/Month** ⌛ {total} hours **➜ Dyno used this month** ⌛ {used} hours - ( {usedperc}% ) **➜ Dyno remain this month** ⌛ {hours} hours - ( {leftperc}% ) 🗓️ Approx {days} days! """ else: quota_details = "" except: print("Check your Heroku API key") quota_details = "" else: quota_details = "" uptime = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - Config.BOT_START_TIME)) try: t, u, f = shutil.disk_usage(".") total = humanbytes(t) used = humanbytes(u) free = humanbytes(f) disk = "\n**💽 Disk Details**\n" \ f"➜ **Used :** {used}\n" \ f"➜ **Total :** {total}\n"\ f"➜ **Free :** {free}\n" except: disk = "" await message.reply_text( "**🤖 Current Bot Status!**\n" f"**➜ {filters}** filters across **{chats}** chats\n" f"{userstats}" f"**➜ Bot uptime :** {uptime}\n" f"{quota_details}" f"{disk}", quote=True, parse_mode="md" ) @trojanz.on_message(filters.command('start') & filters.private) async def start(client, message): await message.reply_text( text=Script.START_MSG.format(message.from_user.mention), disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton("Command Help", callback_data="help_data") ] ] ), reply_to_message_id=message.message_id ) if Config.SAVE_USER == "yes": try: await add_user( str(message.from_user.id), str(message.from_user.username), str(message.from_user.first_name + " " + (message.from_user.last_name or "")), str(message.from_user.dc_id) ) except: pass @trojanz.on_message(filters.command('help') & filters.private) async def help(client, message): await message.reply_text( text=Script.HELP_MSG, disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton("How to Deploy?", url="https://youtu.be/hkmc3e7U7R4"), InlineKeyboardButton("About Me", callback_data="about_data") ], [ InlineKeyboardButton("Compass Bots", url="https://t.me/compass_botz"), InlineKeyboardButton("Developer", url="https://t.me/Dlaize") ] ] ), reply_to_message_id=message.message_id ) @trojanz.on_message(filters.command('about') & filters.private) async def about(client, message): await message.reply_text( text=Script.ABOUT_MSG, disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton( "SOURCE CODE", url="https://github.com/dakshkohli23/Unlimited-Filter-Bot") ], [ InlineKeyboardButton("BACK", callback_data="help_data"), InlineKeyboardButton("CLOSE", callback_data="close_data"), ] ] ), reply_to_message_id=message.message_id )
py
b414a5e2d811f80bae45c60203fec7ad96b0d1b4
from flask import flash from flask import redirect from flask import render_template from flask import request from flask import url_for from flask_login import current_user from flask_login import login_required from flask_login import login_user from flask_login import logout_user from flasky import db from flasky.auth import auth from flasky.email import send_mail from flasky.models import User from .forms import LoginForm from .forms import RegistrationForm @auth.before_app_request def before_request(): if current_user.is_authenticated: current_user.ping() if not current_user.confirmed \ and request.endpoint[:5] != 'auth.' \ and request.endpoint != 'static': return redirect(url_for('auth.unconfirmed')) @auth.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, remember=form.remember_me) return redirect(request.args.get('next') or url_for('main.index')) flash("Invalid User or Password") return render_template('auth/login.html', form=form) @auth.route('/logout') @login_required def logout(): logout_user() flash('You have been logged out.') return redirect(url_for('main.index')) @auth.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() send_mail(user.email, 'Conform your account', 'auth/email/confirm', user=user, token=token) flash('A confirmation email has been sent to you by email.') return redirect(url_for('main.index')) return render_template('auth/register.html', form=form) @auth.route('/confirm/<token>') def confirm(token): if current_user.confirmed: return redirect(url_for('main.index')) if current_user.confirm(token): flash('You have confirmed your account. Thanks!') else: flash('The confirmation link is invalid or has expired.') return redirect(url_for('main.index')) @auth.route('/unconfirmed') def unconfirmed(): if current_user.is_anonymous or current_user.confirmed: return redirect(url_for('main.index')) return render_template('unconfirmed.html')
py
b414a6c23c1e3428aa1a986a20f7dda9be969f7e
"""File demonstrating formation of congestion in bottleneck.""" from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, \ InFlows, SumoLaneChangeParams, SumoCarFollowingParams from flow.core.params import VehicleParams from flow.core.params import TrafficLightParams from flow.scenarios.bottleneck import BottleneckScenario from flow.controllers import SimLaneChangeController, ContinuousRouter from flow.envs.bottleneck_env import BottleneckEnv from flow.core.experiment import Experiment import logging import numpy as np SCALING = 1 DISABLE_TB = True # If set to False, ALINEA will control the ramp meter DISABLE_RAMP_METER = True INFLOW = 2300 class BottleneckDensityExperiment(Experiment): """Experiment object for bottleneck-specific simulations. Extends flow.core.experiment.Experiment """ def __init__(self, env): """Instantiate the bottleneck experiment.""" super().__init__(env) def run(self, num_runs, num_steps, rl_actions=None, convert_to_csv=False): """See parent class.""" info_dict = {} if rl_actions is None: def rl_actions(*_): return None rets = [] mean_rets = [] ret_lists = [] vels = [] mean_vels = [] std_vels = [] mean_densities = [] mean_outflows = [] for i in range(num_runs): vel = np.zeros(num_steps) logging.info('Iter #' + str(i)) ret = 0 ret_list = [] step_outflows = [] step_densities = [] state = self.env.reset() for j in range(num_steps): state, reward, done, _ = self.env.step(rl_actions(state)) vel[j] = np.mean(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_ids())) ret += reward ret_list.append(reward) env = self.env step_outflow = env.k.vehicle.get_outflow_rate(20) density = self.env.get_bottleneck_density() step_outflows.append(step_outflow) step_densities.append(density) if done: break rets.append(ret) vels.append(vel) mean_densities.append(sum(step_densities[100:]) / (num_steps - 100)) env = self.env outflow = env.k.vehicle.get_outflow_rate(10000) mean_outflows.append(outflow) mean_rets.append(np.mean(ret_list)) ret_lists.append(ret_list) mean_vels.append(np.mean(vel)) std_vels.append(np.std(vel)) print('Round {0}, return: {1}'.format(i, ret)) info_dict['returns'] = rets info_dict['velocities'] = vels info_dict['mean_returns'] = mean_rets info_dict['per_step_returns'] = ret_lists info_dict['average_outflow'] = np.mean(mean_outflows) info_dict['per_rollout_outflows'] = mean_outflows info_dict['average_rollout_density_outflow'] = np.mean(mean_densities) print('Average, std return: {}, {}'.format( np.mean(rets), np.std(rets))) print('Average, std speed: {}, {}'.format( np.mean(mean_vels), np.std(std_vels))) self.env.terminate() return info_dict def bottleneck_example(flow_rate, horizon, restart_instance=False, render=None): """ Perform a simulation of vehicles on a bottleneck. Parameters ---------- flow_rate : float total inflow rate of vehicles into the bottleneck horizon : int time horizon restart_instance: bool, optional whether to restart the instance upon reset render: bool, optional specifies whether to use the gui during execution Returns ------- exp: flow.core.experiment.Experiment A non-rl experiment demonstrating the performance of human-driven vehicles on a bottleneck. """ if render is None: render = False sim_params = SumoParams( sim_step=0.5, render=render, overtake_right=False, restart_instance=restart_instance) vehicles = VehicleParams() vehicles.add( veh_id="human", lane_change_controller=(SimLaneChangeController, {}), routing_controller=(ContinuousRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode=25, ), lane_change_params=SumoLaneChangeParams( lane_change_mode=1621, ), num_vehicles=1) additional_env_params = { "target_velocity": 40, "max_accel": 1, "max_decel": 1, "lane_change_duration": 5, "add_rl_if_exit": False, "disable_tb": DISABLE_TB, "disable_ramp_metering": DISABLE_RAMP_METER } env_params = EnvParams( horizon=horizon, additional_params=additional_env_params) inflow = InFlows() inflow.add( veh_type="human", edge="1", vehsPerHour=flow_rate, departLane="random", departSpeed=10) traffic_lights = TrafficLightParams() if not DISABLE_TB: traffic_lights.add(node_id="2") if not DISABLE_RAMP_METER: traffic_lights.add(node_id="3") additional_net_params = {"scaling": SCALING, "speed_limit": 23} net_params = NetParams( inflows=inflow, additional_params=additional_net_params) initial_config = InitialConfig( spacing="random", min_gap=5, lanes_distribution=float("inf"), edges_distribution=["2", "3", "4", "5"]) scenario = BottleneckScenario( name="bay_bridge_toll", vehicles=vehicles, net_params=net_params, initial_config=initial_config, traffic_lights=traffic_lights) env = BottleneckEnv(env_params, sim_params, scenario) return BottleneckDensityExperiment(env) if __name__ == '__main__': # import the experiment variable # inflow, number of steps, binary exp = bottleneck_example(INFLOW, 1000, render=True) exp.run(5, 1000)
py
b414a7acc5ab70e1367b597b77a3a5705910b4c8
from io import BytesIO import numpy as np import pytest import requests from doctr import io def _check_doc_content(doc_tensors, num_pages): # 1 doc of 8 pages assert(len(doc_tensors) == num_pages) assert all(isinstance(page, np.ndarray) for page in doc_tensors) assert all(page.dtype == np.uint8 for page in doc_tensors) def test_read_pdf(mock_pdf): doc = io.read_pdf(mock_pdf) _check_doc_content(doc, 2) with open(mock_pdf, 'rb') as f: doc = io.read_pdf(f.read()) _check_doc_content(doc, 2) # Wrong input type with pytest.raises(TypeError): _ = io.read_pdf(123) # Wrong path with pytest.raises(FileNotFoundError): _ = io.read_pdf("my_imaginary_file.pdf") def test_read_img_as_numpy(tmpdir_factory, mock_pdf): # Wrong input type with pytest.raises(TypeError): _ = io.read_img_as_numpy(123) # Non-existing file with pytest.raises(FileNotFoundError): io.read_img_as_numpy("my_imaginary_file.jpg") # Invalid image with pytest.raises(ValueError): io.read_img_as_numpy(str(mock_pdf)) # From path url = 'https://github.com/mindee/doctr/releases/download/v0.2.1/Grace_Hopper.jpg' file = BytesIO(requests.get(url).content) tmp_path = str(tmpdir_factory.mktemp("data").join("mock_img_file.jpg")) with open(tmp_path, 'wb') as f: f.write(file.getbuffer()) # Path & stream with open(tmp_path, 'rb') as f: page_stream = io.read_img_as_numpy(f.read()) for page in (io.read_img_as_numpy(tmp_path), page_stream): # Data type assert isinstance(page, np.ndarray) assert page.dtype == np.uint8 # Shape assert page.shape == (606, 517, 3) # RGB bgr_page = io.read_img_as_numpy(tmp_path, rgb_output=False) assert np.all(page == bgr_page[..., ::-1]) # Resize target_size = (200, 150) resized_page = io.read_img_as_numpy(tmp_path, target_size) assert resized_page.shape[:2] == target_size def test_read_html(): url = "https://www.google.com" pdf_stream = io.read_html(url) assert isinstance(pdf_stream, bytes) def test_document_file(mock_pdf, mock_image_stream): pages = io.DocumentFile.from_images(mock_image_stream) _check_doc_content(pages, 1) assert isinstance(io.DocumentFile.from_pdf(mock_pdf), list) assert isinstance(io.DocumentFile.from_url("https://www.google.com"), list) def test_pdf(mock_pdf): pages = io.DocumentFile.from_pdf(mock_pdf) # As images num_pages = 2 _check_doc_content(pages, num_pages)
py
b414a92202b0fbde91921aec472d33248c7734da
import tensorflow.compat.v1 as tf import numpy as np from t3f.tensor_train_base import TensorTrainBase from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch from t3f import shapes from t3f import utils from t3f import decompositions from t3f import initializers # TODO: add complexities to the comments. def full(tt, name='t3f_full'): """Converts a TensorTrain into a regular tensor or matrix (tf.Tensor). Args: tt: `TensorTrain` or `TensorTrainBatch` object. name: string, name of the Op. Returns: tf.Tensor. """ with tf.name_scope(name, values=tt.tt_cores): if isinstance(tt, TensorTrainBatch): # Batch of Tensor Trains. return _full_tt_batch(tt) else: # TensorTrain object (not batch). return _full_tt(tt) def _full_tt(tt): """Converts a TensorTrain into a regular tensor or matrix (tf.Tensor). Args: tt: `TensorTrain` object. Returns: tf.Tensor. """ num_dims = tt.ndims() ranks = shapes.lazy_tt_ranks(tt) shape = shapes.lazy_shape(tt) raw_shape = shapes.lazy_raw_shape(tt) res = tt.tt_cores[0] for i in range(1, num_dims): res = tf.reshape(res, (-1, ranks[i])) curr_core = tf.reshape(tt.tt_cores[i], (ranks[i], -1)) res = tf.matmul(res, curr_core) if tt.is_tt_matrix(): intermediate_shape = [] for i in range(num_dims): intermediate_shape.append(raw_shape[0][i]) intermediate_shape.append(raw_shape[1][i]) res = tf.reshape(res, intermediate_shape) transpose = [] for i in range(0, 2 * num_dims, 2): transpose.append(i) for i in range(1, 2 * num_dims, 2): transpose.append(i) res = tf.transpose(res, transpose) return tf.reshape(res, shape) else: return tf.reshape(res, shape) def _full_tt_batch(tt): """Converts a TensorTrainBatch into a regular tensor or matrix (tf.Tensor). Args: tt: `TensorTrainBatch` object. Returns: tf.Tensor. """ num_dims = tt.ndims() ranks = shapes.lazy_tt_ranks(tt) shape = shapes.lazy_shape(tt) raw_shape = shapes.lazy_raw_shape(tt) res = tt.tt_cores[0] batch_size = shapes.lazy_batch_size(tt) for i in range(1, num_dims): res = tf.reshape(res, (batch_size, -1, ranks[i])) curr_core = tf.reshape(tt.tt_cores[i], (batch_size, ranks[i], -1)) res = tf.einsum('oqb,obw->oqw', res, curr_core) if tt.is_tt_matrix(): intermediate_shape = [batch_size] for i in range(num_dims): intermediate_shape.append(raw_shape[0][i]) intermediate_shape.append(raw_shape[1][i]) res = tf.reshape(res, intermediate_shape) transpose = [0] for i in range(0, 2 * num_dims, 2): transpose.append(i + 1) for i in range(1, 2 * num_dims, 2): transpose.append(i + 1) res = tf.transpose(res, transpose) return tf.reshape(res, shape) else: return tf.reshape(res, shape) def tt_tt_matmul(tt_matrix_a, tt_matrix_b): """Multiplies two TT-matrices and returns the TT-matrix of the result. Args: tt_matrix_a: `TensorTrain` or `TensorTrainBatch` object containing a TT-matrix (a batch of TT-matrices) of size M x N tt_matrix_b: `TensorTrain` or `TensorTrainBatch` object containing a TT-matrix (a batch of TT-matrices) of size N x P Returns `TensorTrain` object containing a TT-matrix of size M x P if both arguments are `TensorTrain`s `TensorTrainBatch` if any of the arguments is a `TensorTrainBatch` Raises: ValueError is the arguments are not TT matrices or if their sizes are not appropriate for a matrix-by-matrix multiplication. """ # Both TensorTrain and TensorTrainBatch are inherited from TensorTrainBase. if not isinstance(tt_matrix_a, TensorTrainBase) or \ not isinstance(tt_matrix_b, TensorTrainBase) or \ not tt_matrix_a.is_tt_matrix() or \ not tt_matrix_b.is_tt_matrix(): raise ValueError('Arguments should be TT-matrices') if not shapes.is_batch_broadcasting_possible(tt_matrix_a, tt_matrix_b): raise ValueError('The batch sizes are different and not 1, broadcasting is ' 'not available.') ndims = tt_matrix_a.ndims() if tt_matrix_b.ndims() != ndims: raise ValueError('Arguments should have the same number of dimensions, ' 'got %d and %d instead.' % (ndims, tt_matrix_b.ndims())) # Convert BatchSize 1 batch into TT object to simplify broadcasting. tt_matrix_a = shapes.squeeze_batch_dim(tt_matrix_a) tt_matrix_b = shapes.squeeze_batch_dim(tt_matrix_b) is_a_batch = isinstance(tt_matrix_a, TensorTrainBatch) is_b_batch = isinstance(tt_matrix_b, TensorTrainBatch) is_res_batch = is_a_batch or is_b_batch a_batch_str = 'o' if is_a_batch else '' b_batch_str = 'o' if is_b_batch else '' res_batch_str = 'o' if is_res_batch else '' einsum_str = '{}aijb,{}cjkd->{}acikbd'.format(a_batch_str, b_batch_str, res_batch_str) result_cores = [] a_shape = shapes.lazy_raw_shape(tt_matrix_a) a_ranks = shapes.lazy_tt_ranks(tt_matrix_a) b_shape = shapes.lazy_raw_shape(tt_matrix_b) b_ranks = shapes.lazy_tt_ranks(tt_matrix_b) if is_res_batch: if is_a_batch: batch_size = shapes.lazy_batch_size(tt_matrix_a) if is_b_batch: batch_size = shapes.lazy_batch_size(tt_matrix_b) for core_idx in range(ndims): a_core = tt_matrix_a.tt_cores[core_idx] b_core = tt_matrix_b.tt_cores[core_idx] curr_res_core = tf.einsum(einsum_str, a_core, b_core) res_left_rank = a_ranks[core_idx] * b_ranks[core_idx] res_right_rank = a_ranks[core_idx + 1] * b_ranks[core_idx + 1] left_mode = a_shape[0][core_idx] right_mode = b_shape[1][core_idx] if is_res_batch: core_shape = (batch_size, res_left_rank, left_mode, right_mode, res_right_rank) else: core_shape = (res_left_rank, left_mode, right_mode, res_right_rank) curr_res_core = tf.reshape(curr_res_core, core_shape) result_cores.append(curr_res_core) res_shape = (tt_matrix_a.get_raw_shape()[0], tt_matrix_b.get_raw_shape()[1]) static_a_ranks = tt_matrix_a.get_tt_ranks() static_b_ranks = tt_matrix_b.get_tt_ranks() out_ranks = [a_r * b_r for a_r, b_r in zip(static_a_ranks, static_b_ranks)] if is_res_batch: return TensorTrainBatch(result_cores, res_shape, out_ranks, batch_size) else: return TensorTrain(result_cores, res_shape, out_ranks) def tt_dense_matmul(tt_matrix_a, matrix_b): """Multiplies a TT-matrix by a regular matrix, returns a regular matrix. Args: tt_matrix_a: `TensorTrain` object containing a TT-matrix of size M x N matrix_b: tf.Tensor of size N x P Returns tf.Tensor of size M x P """ if not isinstance(tt_matrix_a, TensorTrain) or not tt_matrix_a.is_tt_matrix(): raise ValueError('The first argument should be a TT-matrix') ndims = tt_matrix_a.ndims() a_columns = tt_matrix_a.get_shape()[1].value b_rows = matrix_b.get_shape()[0].value if a_columns is not None and b_rows is not None: if a_columns != b_rows: raise ValueError('Arguments shapes should align got %d and %d instead.' % (tt_matrix_a.get_shape(), matrix_b.get_shape())) a_shape = shapes.lazy_shape(tt_matrix_a) a_raw_shape = shapes.lazy_raw_shape(tt_matrix_a) if matrix_b.get_shape().is_fully_defined(): b_shape = matrix_b.get_shape().as_list() else: b_shape = tf.shape(matrix_b) a_ranks = shapes.lazy_tt_ranks(tt_matrix_a) # If A is (i0, ..., id-1) x (j0, ..., jd-1) and B is (j0, ..., jd-1) x K, # data is (K, j0, ..., jd-2) x jd-1 x 1 data = tf.transpose(matrix_b) data = tf.reshape(data, (-1, a_raw_shape[1][-1], 1)) for core_idx in reversed(range(ndims)): curr_core = tt_matrix_a.tt_cores[core_idx] # On the k = core_idx iteration, after applying einsum the shape of data # becomes ik x (ik-1..., id-1, K, j0, ..., jk-1) x rank_k data = tf.einsum('aijb,rjb->ira', curr_core, data) if core_idx > 0: # After reshape the shape of data becomes # (ik, ..., id-1, K, j0, ..., jk-2) x jk-1 x rank_k new_data_shape = (-1, a_raw_shape[1][core_idx - 1], a_ranks[core_idx]) data = tf.reshape(data, new_data_shape) # At the end the shape of the data is (i0, ..., id-1) x K return tf.reshape(data, (a_shape[0], b_shape[1])) def dense_tt_matmul(matrix_a, tt_matrix_b): """Multiplies a regular matrix by a TT-matrix, returns a regular matrix. Args: matrix_a: tf.Tensor of size M x N tt_matrix_b: `TensorTrain` object containing a TT-matrix of size N x P Returns tf.Tensor of size M x P """ # TODO: make a more efficient implementation. a_t = tf.transpose(matrix_a) b_t = transpose(tt_matrix_b) return tf.transpose(tt_dense_matmul(b_t, a_t)) def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b): """Multiplies a sparse matrix by a TT-matrix, returns a regular matrix. Args: sparse_matrix_a: tf.SparseTensor of size M x N tt_matrix_b: `TensorTrain` object containing a TT-matrix of size N x P Returns tf.Tensor of size M x P """ raise NotImplementedError # TODO: add flag `return_type = (TT | dense)`? def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b): """Multiplies a TT-matrix by a sparse matrix, returns a regular matrix. Args: tt_matrix_a: `TensorTrain` object containing a TT-matrix of size M x N sparse_matrix_b: tf.SparseTensor of size N x P Returns tf.Tensor of size M x P """ raise NotImplementedError def matmul(a, b, name='t3f_matmul'): """Multiplies two matrices that can be TT-, dense, or sparse. Note that multiplication of two TT-matrices returns a TT-matrix with much larger ranks. Also works for multiplying two batches of TT-matrices or a product between a TT-matrix and a batch of TT-matrices (with broadcasting). Args: a: `TensorTrain`, `TensorTrainBatch`, tf.Tensor, or tf.SparseTensor of size M x N b: `TensorTrain`, `TensorTrainBatch`, tf.Tensor, or tf.SparseTensor of size N x P name: string, name of the Op. Returns If both arguments are `TensorTrain` objects, returns a `TensorTrain` object containing a TT-matrix of size M x P. If at least one of the arguments is a `TensorTrainBatch` object, returns a `TensorTrainBatch` object containing a batch of TT-matrices of size M x P. Otherwise, returns tf.Tensor of size M x P. """ # TODO: is it safe to check types? What if a class is derived from TT? if isinstance(a, TensorTrainBase) and isinstance(b, TensorTrainBase): with tf.name_scope(name, values=a.tt_cores+b.tt_cores): return tt_tt_matmul(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.Tensor): with tf.name_scope(name, values=a.tt_cores+(b,)): return tt_dense_matmul(a, b) elif isinstance(a, tf.Tensor) and isinstance(b, TensorTrain): with tf.name_scope(name, values=(a,)+b.tt_cores): return dense_tt_matmul(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.SparseTensor): with tf.name_scope(name, values=a.tt_cores+(b,)): return tt_sparse_matmul(a, b) elif isinstance(a, tf.SparseTensor) and isinstance(b, TensorTrain): with tf.name_scope(name, values=(a,)+b.tt_cores): return sparse_tt_matmul(a, b) else: raise ValueError('Argument types are not supported in matmul: %s x %s' % (a, b)) def tt_tt_flat_inner(tt_a, tt_b): """Inner product between two TT-tensors or TT-matrices along all axis. The shapes of tt_a and tt_b should coincide. Args: tt_a: `TensorTrain` or `TensorTrainBatch` object tt_b: `TensorTrain` or `TensorTrainBatch` object Returns a number or a Tensor with numbers for each element in the batch. sum of products of all the elements of tt_a and tt_b Raises: ValueError if the arguments are not `TensorTrain` objects, have different number of TT-cores, different underlying shape, or if you are trying to compute inner product between a TT-matrix and a TT-tensor. Complexity: Multiplying two single TT-objects is O(d r^3 n) where d is the number of TT-cores (tt_a.ndims()), r is the largest TT-rank max(tt_a.get_tt_rank(), tt_b.get_tt_rank()) and n is the size of the axis dimension, e.g. for a tensor of size 4 x 4 x 4, n is 4; for a 9 x 64 matrix of raw shape (3, 3, 3) x (4, 4, 4) n is 12 A more precise complexity is O(d r1 r2 n max(r1, r2)) where r1 is the largest TT-rank of tt_a and r2 is the largest TT-rank of tt_b. The complexity of this operation for batch input is O(batch_size d r^3 n). """ if not isinstance(tt_a, TensorTrainBase) or not isinstance(tt_b, TensorTrainBase): raise ValueError('Arguments should be TensorTrains') if tt_a.is_tt_matrix() != tt_b.is_tt_matrix(): raise ValueError('One of the arguments is a TT-tensor, the other is ' 'a TT-matrix, disallowed') are_both_matrices = tt_a.is_tt_matrix() and tt_b.is_tt_matrix() if not shapes.is_batch_broadcasting_possible(tt_a, tt_b): raise ValueError('The batch sizes are different and not 1, broadcasting is ' 'not available.') # TODO: compare shapes and raise if not consistent. ndims = tt_a.ndims() if tt_b.ndims() != ndims: raise ValueError('Arguments should have the same number of dimensions, ' 'got %d and %d instead.' % (ndims, tt_b.ndims())) axes_str = 'ij' if are_both_matrices else 'i' # Convert BatchSize 1 batch into TT object to simplify broadcasting. tt_a = shapes.squeeze_batch_dim(tt_a) tt_b = shapes.squeeze_batch_dim(tt_b) is_a_batch = isinstance(tt_a, TensorTrainBatch) is_b_batch = isinstance(tt_b, TensorTrainBatch) is_res_batch = is_a_batch or is_b_batch a_batch_str = 'o' if is_a_batch else '' b_batch_str = 'o' if is_b_batch else '' res_batch_str = 'o' if is_res_batch else '' init_einsum_str = '{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str, b_batch_str, res_batch_str) a_core = tt_a.tt_cores[0] b_core = tt_b.tt_cores[0] # Simplest example of this operation: # if both arguments are TT-tensors, then it is # res = tf.einsum('aib,cid->bd', a_core, b_core) res = tf.einsum(init_einsum_str, a_core, b_core) einsum_str = '{3}ac,{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str, b_batch_str, res_batch_str) for core_idx in range(1, ndims): a_core = tt_a.tt_cores[core_idx] b_core = tt_b.tt_cores[core_idx] # Simplest example of this operation: # if both arguments are TT-tensors, then it is # res = tf.einsum('ac,aib,cid->bd', res, a_core, b_core) res = tf.einsum(einsum_str, res, a_core, b_core) return tf.squeeze(res) def tt_dense_flat_inner(tt_a, dense_b): """Inner product between a TT-tensor (or TT-matrix) and tf.Tensor along all axis. The shapes of tt_a and dense_b should coincide. Args: tt_a: `TensorTrain` object dense_b: tf.Tensor Returns a number sum of products of all the elements of tt_a and dense_b """ raise NotImplementedError def tt_sparse_flat_inner(tt_a, sparse_b): """Inner product between a TT-tensor (or TT-matrix) and tf.SparseTensor along all axis. The shapes of tt_a and sparse_b should coincide. Args: tt_a: `TensorTrain` object sparse_b: tf.SparseTensor Returns a number sum of products of all the elements of tt_a and sparse_b """ if sparse_b.indices.get_shape().is_fully_defined(): num_elements = sparse_b.indices.get_shape()[0] else: num_elements = tf.shape(sparse_b.indices)[0] a_shape = shapes.lazy_raw_shape(tt_a) a_ranks = shapes.lazy_tt_ranks(tt_a) if tt_a.is_tt_matrix(): tt_a_elements = tf.ones((num_elements, 1, 1), dtype=tt_a.dtype) # TODO: use t3f.shape is safer?? tensor_shape = tt_a.get_raw_shape() row_idx_linear = tf.cast(sparse_b.indices[:, 0], tf.int64) row_idx = utils.unravel_index(row_idx_linear, tf.cast(tensor_shape[0], tf.int64)) col_idx_linear = tf.cast(sparse_b.indices[:, 1], tf.int64) col_idx = utils.unravel_index(col_idx_linear, tf.cast(tensor_shape[1], tf.int64)) for core_idx in range(tt_a.ndims()): curr_core = tt_a.tt_cores[core_idx] left_rank = a_ranks[core_idx] right_rank = a_ranks[core_idx + 1] curr_core = tf.transpose(curr_core, (1, 2, 0, 3)) curr_core_shape = (a_shape[0][core_idx]*a_shape[1][core_idx], left_rank, right_rank) curr_core = tf.reshape(curr_core, curr_core_shape) # Ravel multiindex (row_idx[:, core_idx], col_idx[:, core_idx]) into # a linear index to use tf.gather that supports only first dimensional # gather. # TODO: use gather_nd instead. curr_elements_idx = row_idx[:, core_idx] * tensor_shape[1][core_idx] curr_elements_idx += col_idx[:, core_idx] core_slices = tf.gather(curr_core, curr_elements_idx) tt_a_elements = tf.matmul(tt_a_elements, core_slices) else: tt_a_elements = gather_nd(tt_a, sparse_b.indices) tt_a_elements = tf.reshape(tt_a_elements, (1, -1)) sparse_b_elements = tf.reshape(sparse_b.values, (-1, 1)) result = tf.matmul(tt_a_elements, sparse_b_elements) # Convert a 1x1 matrix into a number. result = result[0, 0] return result def dense_tt_flat_inner(dense_a, tt_b): """Inner product between a tf.Tensor and TT-tensor (or TT-matrix) along all axis. The shapes of dense_a and tt_b should coincide. Args: dense_a: tf.Tensor tt_b: `TensorTrain` object Returns a number sum of products of all the elements of dense_a and tt_b """ raise NotImplementedError def sparse_tt_flat_inner(sparse_a, tt_b): """Inner product between a tf.SparseTensor and TT-tensor (or TT-matrix) along all axis. The shapes of sparse_a and tt_b should coincide. Args: sparse_a: tf.SparseTensor tt_b: `TensorTrain` object Returns a number sum of products of all the elements of sparse_a and tt_b """ raise NotImplementedError def flat_inner(a, b, name='t3f_flat_inner'): """Inner product along all axis. The shapes of a and b should coincide. Args: a: `TensorTrain`, `TensorTrainBatch`, tf.Tensor, or tf.SparseTensor b: `TensorTrain`, `TensorTrainBatch`, tf.Tensor, or tf.SparseTensor name: string, name of the Op. Returns a number sum of products of all the elements of a and b OR or a tf.Tensor of size batch_size sum of products of all the elements of a and b for each element in the batch. """ # TODO: is it safe to check types? What if a class is derived from TT? if isinstance(a, TensorTrainBase) and isinstance(b, TensorTrainBase): with tf.name_scope(name, values=a.tt_cores+b.tt_cores): return tt_tt_flat_inner(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.Tensor): with tf.name_scope(name, values=a.tt_cores+(b,)): return tt_dense_flat_inner(a, b) elif isinstance(a, tf.Tensor) and isinstance(b, TensorTrain): with tf.name_scope(name, values=(a,)+b.tt_cores): return dense_tt_flat_inner(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.SparseTensor): with tf.name_scope(name, values=a.tt_cores+(b,)): return tt_sparse_flat_inner(a, b) elif isinstance(a, tf.SparseTensor) and isinstance(b, TensorTrain): with tf.name_scope(name, values=(a,)+b.tt_cores): return sparse_tt_flat_inner(a, b) else: raise ValueError('Argument types are not supported in flat_inner: %s x %s' % (a, b)) def _add_tensor_cores(tt_a, tt_b): """Internal function to be called from add for two TT-tensors. Does the actual assembling of the TT-cores to add two TT-tensors. """ ndims = tt_a.ndims() dtype = tt_a.dtype shape = shapes.lazy_raw_shape(tt_a) a_ranks = shapes.lazy_tt_ranks(tt_a) b_ranks = shapes.lazy_tt_ranks(tt_b) tt_cores = [] for core_idx in range(ndims): a_core = tt_a.tt_cores[core_idx] b_core = tt_b.tt_cores[core_idx] if core_idx == 0: curr_core = tf.concat((a_core, b_core), axis=2) elif core_idx == ndims - 1: curr_core = tf.concat((a_core, b_core), axis=0) else: upper_zeros = tf.zeros((a_ranks[core_idx], shape[0][core_idx], b_ranks[core_idx + 1]), dtype) lower_zeros = tf.zeros((b_ranks[core_idx], shape[0][core_idx], a_ranks[core_idx + 1]), dtype) upper = tf.concat((a_core, upper_zeros), axis=2) lower = tf.concat((lower_zeros, b_core), axis=2) curr_core = tf.concat((upper, lower), axis=0) tt_cores.append(curr_core) return tt_cores def _add_batch_tensor_cores(tt_a, tt_b): """Internal function to be called from add for two batches of TT-tensors. Does the actual assembling of the TT-cores to add two batches of TT-tensors. """ ndims = tt_a.ndims() dtype = tt_a.dtype shape = shapes.lazy_raw_shape(tt_a) a_ranks = shapes.lazy_tt_ranks(tt_a) b_ranks = shapes.lazy_tt_ranks(tt_b) if isinstance(tt_a, TensorTrainBatch) and tt_a.batch_size == 1: # We add 1 element batch tt_a to a batch_size element batch tt_b to get # the answer TensorTrainBatch of batch_size == tt_b.batch_size. batch_size = shapes.lazy_batch_size(tt_b) else: batch_size = shapes.lazy_batch_size(tt_a) tt_a = shapes.expand_batch_dim(tt_a) tt_b = shapes.expand_batch_dim(tt_b) tt_cores = [] for core_idx in range(ndims): a_core = tt_a.tt_cores[core_idx] if tt_a.batch_size == 1: a_core = tf.tile(a_core, (batch_size, 1, 1, 1)) b_core = tt_b.tt_cores[core_idx] if tt_b.batch_size == 1: b_core = tf.tile(b_core, (batch_size, 1, 1, 1)) if core_idx == 0: curr_core = tf.concat((a_core, b_core), axis=3) elif core_idx == ndims - 1: curr_core = tf.concat((a_core, b_core), axis=1) else: upper_zeros = tf.zeros((batch_size, a_ranks[core_idx], shape[0][core_idx], b_ranks[core_idx + 1]), dtype) lower_zeros = tf.zeros((batch_size, b_ranks[core_idx], shape[0][core_idx], a_ranks[core_idx + 1]), dtype) upper = tf.concat((a_core, upper_zeros), axis=3) lower = tf.concat((lower_zeros, b_core), axis=3) curr_core = tf.concat((upper, lower), axis=1) tt_cores.append(curr_core) return tt_cores, batch_size def _add_matrix_cores(tt_a, tt_b): """Internal function to be called from add for two TT-matrices. Does the actual assembling of the TT-cores to add two TT-matrices. """ ndims = tt_a.ndims() dtype = tt_a.dtype shape = shapes.lazy_raw_shape(tt_a) a_ranks = shapes.lazy_tt_ranks(tt_a) b_ranks = shapes.lazy_tt_ranks(tt_b) tt_cores = [] for core_idx in range(ndims): a_core = tt_a.tt_cores[core_idx] b_core = tt_b.tt_cores[core_idx] if core_idx == 0: curr_core = tf.concat((a_core, b_core), axis=3) elif core_idx == ndims - 1: curr_core = tf.concat((a_core, b_core), axis=0) else: upper_zeros = tf.zeros((a_ranks[core_idx], shape[0][core_idx], shape[1][core_idx], b_ranks[core_idx + 1]), dtype) lower_zeros = tf.zeros((b_ranks[core_idx], shape[0][core_idx], shape[1][core_idx], a_ranks[core_idx + 1]), dtype) upper = tf.concat((a_core, upper_zeros), axis=3) lower = tf.concat((lower_zeros, b_core), axis=3) curr_core = tf.concat((upper, lower), axis=0) tt_cores.append(curr_core) return tt_cores def _add_batch_matrix_cores(tt_a, tt_b): """Internal function to be called from add for two batches of TT-matrices. Does the actual assembling of the TT-cores to add two batches of TT-matrices. """ ndims = tt_a.ndims() dtype = tt_a.dtype shape = shapes.lazy_raw_shape(tt_a) a_ranks = shapes.lazy_tt_ranks(tt_a) b_ranks = shapes.lazy_tt_ranks(tt_b) if isinstance(tt_a, TensorTrainBatch) and tt_a.batch_size == 1: # We add 1 element batch tt_a to a batch_size element batch tt_b to get # the answer TensorTrainBatch of batch_size == tt_b.batch_size. batch_size = shapes.lazy_batch_size(tt_b) else: batch_size = shapes.lazy_batch_size(tt_a) tt_a = shapes.expand_batch_dim(tt_a) tt_b = shapes.expand_batch_dim(tt_b) tt_cores = [] for core_idx in range(ndims): a_core = tt_a.tt_cores[core_idx] if tt_a.batch_size == 1: a_core = tf.tile(a_core, (batch_size, 1, 1, 1, 1)) b_core = tt_b.tt_cores[core_idx] if tt_b.batch_size == 1: b_core = tf.tile(b_core, (batch_size, 1, 1, 1, 1)) if core_idx == 0: curr_core = tf.concat((a_core, b_core), axis=4) elif core_idx == ndims - 1: curr_core = tf.concat((a_core, b_core), axis=1) else: upper_zeros = tf.zeros((batch_size, a_ranks[core_idx], shape[0][core_idx], shape[1][core_idx], b_ranks[core_idx + 1]), dtype) lower_zeros = tf.zeros((batch_size, b_ranks[core_idx], shape[0][core_idx], shape[1][core_idx], a_ranks[core_idx + 1]), dtype) upper = tf.concat((a_core, upper_zeros), axis=4) lower = tf.concat((lower_zeros, b_core), axis=4) curr_core = tf.concat((upper, lower), axis=1) tt_cores.append(curr_core) return tt_cores, batch_size def add(tt_a, tt_b, name='t3f_add'): """Returns a TensorTrain corresponding to elementwise sum tt_a + tt_b. The shapes of tt_a and tt_b should coincide. Supports broadcasting: add(TensorTrainBatch, TensorTrain) adds TensorTrain to each element in the batch of TTs in TensorTrainBatch. Args: tt_a: `TensorTrain`, `TensorTrainBatch`, TT-tensor, or TT-matrix tt_b: `TensorTrain`, `TensorTrainBatch`, TT-tensor, or TT-matrix name: string, name of the Op. Returns a `TensorTrain` object corresponding to the element-wise sum of arguments if both arguments are `TensorTrain`s. OR a `TensorTrainBatch` if at least one of the arguments is `TensorTrainBatch` Raises ValueError if the arguments shapes do not coincide """ ndims = tt_a.ndims() if tt_a.is_tt_matrix() != tt_b.is_tt_matrix(): raise ValueError('The arguments should be both TT-tensors or both ' 'TT-matrices') if tt_a.get_raw_shape() != tt_b.get_raw_shape(): raise ValueError('The arguments should have the same shape.') if not shapes.is_batch_broadcasting_possible(tt_a, tt_b): raise ValueError('The batch sizes are different and not 1, broadcasting is ' 'not available.') with tf.name_scope(name, values=tt_a.tt_cores+tt_b.tt_cores): is_a_batch = isinstance(tt_a, TensorTrainBatch) is_b_batch = isinstance(tt_b, TensorTrainBatch) is_batch_case = is_a_batch or is_b_batch batch_size = None if is_batch_case: if tt_a.is_tt_matrix(): tt_cores, batch_size = _add_batch_matrix_cores(tt_a, tt_b) else: tt_cores, batch_size = _add_batch_tensor_cores(tt_a, tt_b) else: if tt_a.is_tt_matrix(): tt_cores = _add_matrix_cores(tt_a, tt_b) else: tt_cores = _add_tensor_cores(tt_a, tt_b) out_ranks = [1] static_a_ranks = tt_a.get_tt_ranks() static_b_ranks = tt_b.get_tt_ranks() for core_idx in range(1, ndims): out_ranks.append(static_a_ranks[core_idx] + static_b_ranks[core_idx]) out_ranks.append(1) if is_batch_case: return TensorTrainBatch(tt_cores, tt_a.get_raw_shape(), out_ranks, batch_size) else: return TensorTrain(tt_cores, tt_a.get_raw_shape(), out_ranks) def multiply(tt_left, right, name='t3f_multiply'): """Returns a TensorTrain corresponding to element-wise product tt_left * right. Supports broadcasting: multiply(TensorTrainBatch, TensorTrain) returns TensorTrainBatch consisting of element-wise products of TT in TensorTrainBatch and TensorTrain multiply(TensorTrainBatch_a, TensorTrainBatch_b) returns TensorTrainBatch consisting of element-wise products of TT in TensorTrainBatch_a and TT in TensorTrainBatch_b Batch sizes should support broadcasting Args: tt_left: `TensorTrain` OR `TensorTrainBatch` right: `TensorTrain` OR `TensorTrainBatch` OR a number. name: string, name of the Op. Returns a `TensorTrain` or `TensorTrainBatch` object corresponding to the element-wise product of the arguments. Raises ValueError if the arguments shapes do not coincide or broadcasting is not possible. """ is_left_batch = isinstance(tt_left, TensorTrainBatch) is_right_batch = isinstance(right, TensorTrainBatch) is_batch_case = is_left_batch or is_right_batch ndims = tt_left.ndims() if not isinstance(right, TensorTrainBase): with tf.name_scope(name, values=tt_left.tt_cores+(right,)): # Assume right is a number, not TensorTrain. # To squash right uniformly across TT-cores we pull its absolute value # and raise to the power 1/ndims. First TT-core is multiplied by the sign # of right. tt_cores = list(tt_left.tt_cores) fact = tf.pow(tf.cast(tf.abs(right), tt_left.dtype), 1.0 / ndims) sign = tf.cast(tf.sign(right), tt_left.dtype) for i in range(len(tt_cores)): tt_cores[i] = fact * tt_cores[i] tt_cores[0] = tt_cores[0] * sign out_ranks = tt_left.get_tt_ranks() if is_left_batch: out_batch_size = tt_left.batch_size else: with tf.name_scope(name, values=tt_left.tt_cores+right.tt_cores): if tt_left.is_tt_matrix() != right.is_tt_matrix(): raise ValueError('The arguments should be both TT-tensors or both ' 'TT-matrices') if tt_left.get_raw_shape() != right.get_raw_shape(): raise ValueError('The arguments should have the same shape.') out_batch_size = 1 dependencies = [] can_determine_if_broadcast = True if is_left_batch and is_right_batch: if tt_left.batch_size is None and right.batch_size is None: can_determine_if_broadcast = False elif tt_left.batch_size is None and right.batch_size is not None: if right.batch_size > 1: can_determine_if_broadcast = False elif tt_left.batch_size is not None and right.batch_size is None: if tt_left.batch_size > 1: can_determine_if_broadcast = False if not can_determine_if_broadcast: # Cannot determine if broadcasting is needed. Avoid broadcasting and # assume elementwise multiplication AND add execution time assert to # print a better error message if the batch sizes turn out to be # different. message = ('The batch sizes were unknown on compilation stage, so ' 'assumed elementwise multiplication (i.e. no broadcasting). ' 'Now it seems that they are different after all :') data = [message, shapes.lazy_batch_size(tt_left), ' x ', shapes.lazy_batch_size(right)] bs_eq = tf.assert_equal(shapes.lazy_batch_size(tt_left), shapes.lazy_batch_size(right), data=data) dependencies.append(bs_eq) do_broadcast = shapes.is_batch_broadcasting_possible(tt_left, right) if not can_determine_if_broadcast: # Assume elementwise multiplication if broadcasting cannot be determined # on compilation stage. do_broadcast = False if not do_broadcast and can_determine_if_broadcast: raise ValueError('The batch sizes are different and not 1, broadcasting ' 'is not available.') a_ranks = shapes.lazy_tt_ranks(tt_left) b_ranks = shapes.lazy_tt_ranks(right) shape = shapes.lazy_raw_shape(tt_left) output_str = '' bs_str_left = '' bs_str_right = '' if is_batch_case: if is_left_batch and is_right_batch: # Both arguments are batches of equal size. if tt_left.batch_size == right.batch_size or not can_determine_if_broadcast: bs_str_left = 'n' bs_str_right = 'n' output_str = 'n' if not can_determine_if_broadcast: out_batch_size = None else: out_batch_size = tt_left.batch_size else: # Broadcasting (e.g batch_sizes are 1 and n>1). bs_str_left = 'n' bs_str_right = 'm' output_str = 'nm' if tt_left.batch_size is None or tt_left.batch_size > 1: out_batch_size = tt_left.batch_size else: out_batch_size = right.batch_size else: # One of the arguments is TensorTrain. if is_left_batch: bs_str_left = 'n' bs_str_right = '' out_batch_size = tt_left.batch_size else: bs_str_left = '' bs_str_right = 'n' out_batch_size = right.batch_size output_str = 'n' is_matrix = tt_left.is_tt_matrix() tt_cores = [] for core_idx in range(ndims): a_core = tt_left.tt_cores[core_idx] b_core = right.tt_cores[core_idx] left_rank = a_ranks[core_idx] * b_ranks[core_idx] right_rank = a_ranks[core_idx + 1] * b_ranks[core_idx + 1] if is_matrix: with tf.control_dependencies(dependencies): curr_core = tf.einsum('{0}aijb,{1}cijd->{2}acijbd'.format(bs_str_left, bs_str_right, output_str), a_core, b_core) curr_core = tf.reshape(curr_core, (-1, left_rank, shape[0][core_idx], shape[1][core_idx], right_rank)) if not is_batch_case: curr_core = tf.squeeze(curr_core, axis=0) else: with tf.control_dependencies(dependencies): curr_core = tf.einsum('{0}aib,{1}cid->{2}acibd'.format(bs_str_left, bs_str_right, output_str), a_core, b_core) curr_core = tf.reshape(curr_core, (-1, left_rank, shape[0][core_idx], right_rank)) if not is_batch_case: curr_core = tf.squeeze(curr_core, axis=0) tt_cores.append(curr_core) combined_ranks = zip(tt_left.get_tt_ranks(), right.get_tt_ranks()) out_ranks = [a * b for a, b in combined_ranks] if not is_batch_case: return TensorTrain(tt_cores, tt_left.get_raw_shape(), out_ranks) else: return TensorTrainBatch(tt_cores, tt_left.get_raw_shape(), out_ranks, batch_size=out_batch_size) def frobenius_norm_squared(tt, differentiable=False, name='t3f_frobenius_norm_squared'): """Frobenius norm squared of `TensorTrain` or of each TT in `TensorTrainBatch`. Frobenius norm squared is the sum of squares of all elements in a tensor. Args: tt: `TensorTrain` or `TensorTrainBatch` object differentiable: bool, whether to use a differentiable implementation or a fast and stable implementation based on QR decomposition. name: string, name of the Op. Returns a number which is the Frobenius norm squared of `tt`, if it is `TensorTrain` OR a Tensor of size tt.batch_size, consisting of the Frobenius norms squared of each TensorTrain in `tt`, if it is `TensorTrainBatch` """ with tf.name_scope(name, values=tt.tt_cores): if differentiable: if hasattr(tt, 'batch_size'): bs_str = 'n' else: bs_str = '' if tt.is_tt_matrix(): running_prod = tf.einsum('{0}aijb,{0}cijd->{0}bd'.format(bs_str), tt.tt_cores[0], tt.tt_cores[0]) else: running_prod = tf.einsum('{0}aib,{0}cid->{0}bd'.format(bs_str), tt.tt_cores[0], tt.tt_cores[0]) for core_idx in range(1, tt.ndims()): curr_core = tt.tt_cores[core_idx] if tt.is_tt_matrix(): running_prod = tf.einsum('{0}ac,{0}aijb,{0}cijd->{0}bd'.format(bs_str), running_prod, curr_core, curr_core) else: running_prod = tf.einsum('{0}ac,{0}aib,{0}cid->{0}bd'.format(bs_str), running_prod, curr_core, curr_core) return tf.squeeze(running_prod, [-1, -2]) else: orth_tt = decompositions.orthogonalize_tt_cores(tt, left_to_right=True) # All the cores of orth_tt except the last one are orthogonal, hence # the Frobenius norm of orth_tt equals to the norm of the last core. if hasattr(tt, 'batch_size'): batch_size = shapes.lazy_batch_size(tt) last_core = tf.reshape(orth_tt.tt_cores[-1], (batch_size, -1)) return tf.norm(last_core, axis=1) ** 2 else: return tf.norm(orth_tt.tt_cores[-1]) ** 2 def frobenius_norm(tt, epsilon=1e-5, differentiable=False, name='t3f_frobenius_norm'): """Frobenius norm of `TensorTrain` or of each TT in `TensorTrainBatch` Frobenius norm is the sqrt of the sum of squares of all elements in a tensor. Args: tt: `TensorTrain` or `TensorTrainBatch` object epsilon: the function actually computes sqrt(norm_squared + epsilon) for numerical stability (e.g. gradient of sqrt at zero is inf). differentiable: bool, whether to use a differentiable implementation or a fast and stable implementation based on QR decomposition. name: string, name of the Op. Returns a number which is the Frobenius norm of `tt`, if it is `TensorTrain` OR a Tensor of size tt.batch_size, consisting of the Frobenius norms of each TensorTrain in `tt`, if it is `TensorTrainBatch` """ with tf.name_scope(name, values=tt.tt_cores): return tf.sqrt(frobenius_norm_squared(tt, differentiable) + epsilon) def transpose(tt_matrix, name='t3f_transpose'): """Transpose a TT-matrix or a batch of TT-matrices. Args: tt_matrix: `TensorTrain` or `TensorTrainBatch` object containing a TT-matrix (or a batch of TT-matrices). name: string, name of the Op. Returns: `TensorTrain` or `TensorTrainBatch` object containing a transposed TT-matrix (or a batch of TT-matrices). Raises: ValueError if the argument is not a TT-matrix. """ if not isinstance(tt_matrix, TensorTrainBase) or not tt_matrix.is_tt_matrix(): raise ValueError('The argument should be a TT-matrix.') with tf.name_scope(name, values=tt_matrix.tt_cores): transposed_tt_cores = [] for core_idx in range(tt_matrix.ndims()): curr_core = tt_matrix.tt_cores[core_idx] if isinstance(tt_matrix, TensorTrain): transposed_tt_cores.append(tf.transpose(curr_core, (0, 2, 1, 3))) else: # TensorTrainBatch. transposed_tt_cores.append(tf.transpose(curr_core, (0, 1, 3, 2, 4))) tt_matrix_shape = tt_matrix.get_raw_shape() transposed_shape = tt_matrix_shape[1], tt_matrix_shape[0] tt_ranks = tt_matrix.get_tt_ranks() if isinstance(tt_matrix, TensorTrain): return TensorTrain(transposed_tt_cores, transposed_shape, tt_ranks) else: batch_size = tt_matrix.batch_size return TensorTrainBatch(transposed_tt_cores, transposed_shape, tt_ranks, batch_size) def quadratic_form(A, b, c, name='t3f_bilinear_form'): """Outdated, see `bilinear_form`.""" print('Warning: function quadratic_form is being depricated and ' 'replaced with bilinear_form.') return bilinear_form(A, b, c) def bilinear_form(A, b, c, name='t3f_bilinear_form'): """Bilinear form b^t A c; A is a TT-matrix, b and c can be batches. Args: A: `TensorTrain` object containing a TT-matrix of size N x M. b: `TensorTrain` object containing a TT-matrix of size N x 1 or `TensorTrainBatch` with a batch of TT-matrices of size N x 1. c: `TensorTrain` object containing a TT-matrix of size M x 1 or `TensorTrainBatch` with a batch of TT-matrices of size M x 1. name: string, name of the Op. Returns: A number, the value of the bilinear form if all the arguments are `TensorTrain`s. OR tf.Tensor of size batch_size if at least one of the arguments is `TensorTrainBatch` Raises: ValueError if the arguments are not TT-matrices or if the shapes are not consistent. Complexity: O(batch_size r_A r_c r_b n d (r_b + r_A n + r_c)) d is the number of TT-cores (A.ndims()); r_A is the largest TT-rank of A max(A.get_tt_rank()) n is the size of the axis dimensions e.g. if b and c are tensors of shape (3, 3, 3), A is a 27 x 27 matrix of tensor shape (3, 3, 3) x (3, 3, 3) then n is 3 """ if not isinstance(A, TensorTrainBase) or not A.is_tt_matrix(): raise ValueError('The arguments should be a TT-matrix.') # TODO: support tf.Tensor as b and c. if not isinstance(b, TensorTrainBase) or not b.is_tt_matrix(): raise ValueError('The arguments should be a TT-matrix.') if not isinstance(c, TensorTrainBase) or not c.is_tt_matrix(): raise ValueError('The arguments should be a TT-matrix.') b_is_batch = isinstance(b, TensorTrainBatch) c_is_batch = isinstance(b, TensorTrainBatch) b_bs_str = 'p' if b_is_batch else '' c_bs_str = 'p' if c_is_batch else '' out_bs_str = 'p' if b_is_batch or c_is_batch else '' with tf.name_scope(name, values=A.tt_cores+b.tt_cores+c.tt_cores): ndims = A.ndims() curr_core_1 = b.tt_cores[0] curr_core_2 = c.tt_cores[0] curr_matrix_core = A.tt_cores[0] # We enumerate the dummy dimension (that takes 1 value) with `k`. # You may think that using two different k would be faster, but in my # experience it's even a little bit slower (but neglectable in general). einsum_str = '{0}aikb,cijd,{1}ejkf->{2}bdf'.format(b_bs_str, c_bs_str, out_bs_str) res = tf.einsum(einsum_str, curr_core_1, curr_matrix_core, curr_core_2) for core_idx in range(1, ndims): curr_core_1 = b.tt_cores[core_idx] curr_core_2 = c.tt_cores[core_idx] curr_matrix_core = A.tt_cores[core_idx] einsum_str = '{2}ace,{0}aikb,cijd,{1}ejkf->{2}bdf'.format(b_bs_str, c_bs_str, out_bs_str) res = tf.einsum(einsum_str, res, curr_core_1, curr_matrix_core, curr_core_2) # Squeeze to make the result a number instead of 1 x 1 for NON batch case # and to make the result a tensor of size # batch_size # instead of # batch_size x 1 x 1 # in the batch case. return tf.squeeze(res) def cast(tt, dtype, name='t3f_cast'): """Casts a tt-tensor to a new type. Args: tt: `TensorTrain` object. dtype: The destination type. name: string, name of the Op. Raises: TypeError: If `tt` cannot be cast to the `dtype`. ValueError: If `tt` is not a `TensorTrain` or `TensorTrainBatch`. """ with tf.name_scope(name, values=tt.tt_cores): res_cores = [] cores = tt.tt_cores for core_idx in range(tt.ndims()): res_cores.append(tf.cast(cores[core_idx], dtype)) res_shape = tt.get_raw_shape() res_ranks = tt.get_tt_ranks() if isinstance(tt, TensorTrain): return TensorTrain(res_cores, res_shape, res_ranks) elif isinstance(tt, TensorTrainBatch): return TensorTrainBatch(res_cores, res_shape, res_ranks, tt.batch_size) else: raise ValueError('Unsupported type of input "%s", should be TensorTrain ' 'or TensorTrainBatch.' % tt) def gather_nd(tt, indices, name='t3f_gather_nd'): """out[i] = tt[indices[i, 0], indices[i, 1], ...] Equivalent to tf.gather_nd(t3f.full(tt), indices) but much faster, since it does not materialize the full tensor. For batches of TT works indices should include the batch dimension as well. Args: tt: `TensorTrain` or `TensorTrainBatch` object representing a tensor (TT-matrices are not implemented yet) indices: numpy array, tf.Tensor, placeholder with 2 or more dimensions. The last dimension indices.shape[-1] should be equal to the numbers of dimensions in TT: indices.shape[-1] = tt.ndims for `TensorTrain` indices.shape[-1] = tt.ndims + 1 for `TensorTrainBatch` name: string, name of the Op. Returns: tf.Tensor with elements specified by indices. Raises: ValueError if `indices` have wrong shape. NotImplementedError if `tt` is a TT-matrix. """ with tf.name_scope(name, values=tt.tt_cores+(indices,)): if tt.is_tt_matrix(): raise NotImplementedError('gather_nd doesnt support TT-matrices yet ' '(got %s)' % tt) indices = tf.convert_to_tensor(indices) if isinstance(tt, TensorTrainBatch): if indices.get_shape()[-1] != tt.ndims() + 1: raise ValueError('The last dimension of indices (%d) should have ' 'the same size as the number of dimensions in the tt ' 'object (%d) + 1 (for the batch dimension).' % (indices.get_shape()[-1], tt.ndims())) else: if indices.get_shape()[-1] != tt.ndims(): raise ValueError('The last dimension of indices (%d) should have ' 'the same size as the number of dimensions in the tt ' 'object (%d).' % (indices.get_shape()[-1], tt.ndims())) tt_elements = tf.ones(tf.shape(indices)[:-1], dtype=tt.dtype) tt_elements = tf.reshape(tt_elements, (-1, 1, 1)) for core_idx in range(tt.ndims()): curr_core = tt.tt_cores[core_idx] if isinstance(tt, TensorTrainBatch): curr_core = tf.transpose(curr_core, (0, 2, 1, 3)) curr_idx = tf.stack((indices[:, 0], indices[:, core_idx + 1]), axis=1) core_slices = tf.gather_nd(curr_core, curr_idx) else: curr_core = tf.transpose(curr_core, (1, 0, 2)) core_slices = tf.gather(curr_core, indices[:, core_idx]) tt_elements = tf.matmul(tt_elements, core_slices) tt_elements = tf.reshape(tt_elements, tf.shape(indices)[:-1]) return tt_elements def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'): """Renormalizes TT-cores to make them of the same Frobenius norm. Doesn't change the tensor represented by `tt` object, but renormalizes the TT-cores to make further computations more stable. Args: tt: `TensorTrain` or `TensorTrainBatch` object epsilon: parameter for numerical stability of sqrt name: string, name of the Op. Returns: `TensorTrain` or `TensorTrainBatch` which represents the same tensor as tt, but with all cores having equal norm. In the batch case applies to each TT in `TensorTrainBatch`. """ # TODO: bad way to check if batch or not. with tf.name_scope(name, values=tt.tt_cores): epsilon = tf.convert_to_tensor(epsilon, dtype=tt.dtype) if isinstance(tt, TensorTrain): new_cores = [] running_log_norm = 0 core_norms = [] for core in tt.tt_cores: cur_core_norm = tf.sqrt(tf.maximum(tf.reduce_sum(core ** 2), epsilon)) core_norms.append(cur_core_norm) running_log_norm += tf.log(cur_core_norm) running_log_norm = running_log_norm / tt.ndims() fact = tf.exp(running_log_norm) for i, core in enumerate(tt.tt_cores): new_cores.append(core * fact / core_norms[i]) return TensorTrain(new_cores) else: sz = (tt.batch_size,) + (len(tt.tt_cores[0].shape) - 1) * (1,) running_core_log_norms = tf.zeros(sz, dtype=tt.dtype) ax = np.arange(len(tt.tt_cores[0].shape))[1:] fact_list = [] for core in tt.tt_cores: cur_core_norm_sq = tf.reduce_sum(core**2, axis=ax, keep_dims=True) cur_core_norm = tf.sqrt(tf.maximum(epsilon, cur_core_norm_sq)) fact_list.append(cur_core_norm) running_core_log_norms += tf.log(cur_core_norm) new_cores = [] exp_fact = tf.exp(running_core_log_norms / tt.ndims()) for i, core in enumerate(tt.tt_cores): new_cores.append(tf.multiply(core, exp_fact / fact_list[i])) return TensorTrainBatch(new_cores)
py
b414aaa8e33dd54606551330d3484038d8b2d171
from .http import get_http_app application = get_http_app()
py
b414aaf85a733315b1ff007c45487181edfa7e5d
from argument_tasks import Interface from example_data import methods SOURCE_YML_FILE = open("example_data/tasks.yml", "r") Interface(SOURCE_YML_FILE, methods).run()
py
b414ab4e35ea70d712427f10f7f75f89f2bc371f
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ImageNet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import image_utils from tensor2tensor.layers import modalities from tensor2tensor.utils import registry import tensorflow as tf # URLs and filenames for IMAGENET 32x32 data from # https://arxiv.org/abs/1601.06759. _IMAGENET_SMALL_ROOT_URL = "http://image-net.org/small/" _IMAGENET_SMALL_URLS = [ "train_32x32.tar", "valid_32x32.tar"] _IMAGENET_SMALL_TRAIN_PREFIX = "train_32x32" _IMAGENET_SMALL_EVAL_PREFIX = "valid_32x32" _IMAGENET_SMALL_IMAGE_SIZE = 32 # URLs and filenames for IMAGENET 64x64 data. _IMAGENET_MEDIUM_ROOT_URL = "http://image-net.org/small/" _IMAGENET_MEDIUM_URLS = [ "train_64x64.tar", "valid_64x64.tar"] _IMAGENET_MEDIUM_TRAIN_PREFIX = "train_64x64" _IMAGENET_MEDIUM_EVAL_PREFIX = "valid_64x64" _IMAGENET_MEDIUM_IMAGE_SIZE = 64 # Derived from ImageNet data MEAN_RGB = [0.485, 0.456, 0.406] STDDEV_RGB = [0.229, 0.224, 0.225] def imagenet_pixelrnn_generator(tmp_dir, training, size=_IMAGENET_SMALL_IMAGE_SIZE): """Image generator for Imagenet 64x64 downsampled images. It assumes that the data has been downloaded from http://image-net.org/small/*_32x32.tar or http://image-net.org/small/*_64x64.tar into tmp_dir. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. size: image size (assumes height and width are same) Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ if size == _IMAGENET_SMALL_IMAGE_SIZE: train_prefix = _IMAGENET_SMALL_TRAIN_PREFIX eval_prefix = _IMAGENET_SMALL_EVAL_PREFIX else: train_prefix = _IMAGENET_MEDIUM_TRAIN_PREFIX eval_prefix = _IMAGENET_MEDIUM_EVAL_PREFIX prefix = train_prefix if training else eval_prefix images_filepath = os.path.join(tmp_dir, prefix) image_files = tf.gfile.Glob(images_filepath + "/*") height = size width = size const_label = 0 for filename in image_files: with tf.gfile.Open(filename, "r") as f: encoded_image = f.read() yield { "image/encoded": [encoded_image], "image/format": ["png"], "image/class/label": [const_label], "image/height": [height], "image/width": [width] } def imagenet_preprocess_example(example, mode, resize_size=None, normalize=True): """Preprocessing used for Imagenet and similar problems.""" resize_size = resize_size or [299, 299] assert resize_size[0] == resize_size[1] image = example["inputs"] if mode == tf.estimator.ModeKeys.TRAIN: image = preprocess_for_train(image, image_size=resize_size[0], normalize=normalize) else: image = preprocess_for_eval(image, image_size=resize_size[0], normalize=normalize) example["inputs"] = image return example @registry.register_problem class ImageImagenet(image_utils.Image2ClassProblem): """Imagenet.""" @property def is_small(self): return False @property def num_classes(self): return 1000 def generate_data(self, data_dir, tmp_dir, task_id=-1): # TODO(lukaszkaiser): find a better way than printing this. print("To generate the ImageNet dataset in the proper format, follow " "instructions at https://github.com/tensorflow/models/tree/master" "/research/inception/README.md#getting-started") def preprocess_example(self, example, mode, _): return imagenet_preprocess_example(example, mode) class ImageImagenetRescaled(ImageImagenet): """Imagenet rescaled to rescale_size.""" @property def rescale_size(self): # return [224, 224] raise NotImplementedError() @property def normalize_image(self): """Whether the image should be normalized in preprocessing.""" return True def dataset_filename(self): return "image_imagenet" # Reuse Imagenet data. def generate_data(self, data_dir, tmp_dir, task_id=-1): tf.logging.warning( "Generate data for rescaled ImageNet problems with image_imagenet") def preprocess_example(self, example, mode, _): return imagenet_preprocess_example( example, mode, resize_size=self.rescale_size, normalize=self.normalize_image) @registry.register_problem class ImageImagenet224(ImageImagenetRescaled): """Imagenet rescaled to 224x224.""" @property def rescale_size(self): return [224, 224] @registry.register_problem class ImageImagenet224NoNormalization(ImageImagenet224): """Imagenet rescaled to 224x224 without normalization.""" @property def normalize_image(self): """Whether the image should be normalized in preprocessing.""" return False @registry.register_problem class ImageImagenet256(ImageImagenetRescaled): """Imagenet rescaled to 256x256.""" @property def rescale_size(self): return [256, 256] @registry.register_problem class ImageImagenet32(ImageImagenetRescaled): """Imagenet rescaled to 32x32.""" @property def rescale_size(self): return [32, 32] @property def is_small(self): return True # Modalities like for CIFAR. def preprocess_example(self, example, mode, _): # Just resize with area. if self._was_reversed: example["inputs"] = tf.to_int64( tf.image.resize_images(example["inputs"], self.rescale_size, tf.image.ResizeMethod.AREA)) else: example = imagenet_preprocess_example(example, mode) example["inputs"] = tf.to_int64( tf.image.resize_images(example["inputs"], self.rescale_size)) return example @registry.register_problem class ImageImagenet32Gen(ImageImagenet): """Imagenet 32 from the pixen cnn paper.""" @property def train_shards(self): return 1024 @property def dev_shards(self): return 10 def generate_data(self, data_dir, tmp_dir, task_id=-1): generator_utils.generate_dataset_and_shuffle( self.generator(data_dir, tmp_dir, True), self.training_filepaths(data_dir, self.train_shards, shuffled=True), self.generator(data_dir, tmp_dir, False), self.dev_filepaths(data_dir, self.dev_shards, shuffled=True)) def generator(self, data_dir, tmp_dir, is_training): if is_training: return imagenet_pixelrnn_generator( tmp_dir, int(True), size=_IMAGENET_SMALL_IMAGE_SIZE) else: return imagenet_pixelrnn_generator( tmp_dir, int(is_training), size=_IMAGENET_SMALL_IMAGE_SIZE) def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape([_IMAGENET_SMALL_IMAGE_SIZE, _IMAGENET_SMALL_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) return example @registry.register_problem class ImageImagenet64Gen(ImageImagenet): """Imagenet 64 from the pixen cnn paper.""" @property def train_shards(self): return 1024 @property def dev_shards(self): return 10 def generate_data(self, data_dir, tmp_dir, task_id=-1): generator_utils.generate_dataset_and_shuffle( self.generator(data_dir, tmp_dir, True), self.training_filepaths(data_dir, self.train_shards, shuffled=True), self.generator(data_dir, tmp_dir, False), self.dev_filepaths(data_dir, self.dev_shards, shuffled=True)) def generator(self, data_dir, tmp_dir, is_training): if is_training: return imagenet_pixelrnn_generator( tmp_dir, int(True), size=_IMAGENET_MEDIUM_IMAGE_SIZE) else: return imagenet_pixelrnn_generator( tmp_dir, int(False), size=_IMAGENET_MEDIUM_IMAGE_SIZE) def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape([_IMAGENET_MEDIUM_IMAGE_SIZE, _IMAGENET_MEDIUM_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) return example @registry.register_problem class ImageImagenetMultiResolutionGen(ImageImagenet64Gen): """ImageNet at multiple resolutions. The resolutions are specified as a hyperparameter during preprocessing. """ def dataset_filename(self): return "image_imagenet64_gen" @property def train_shards(self): return 1024 @property def dev_shards(self): return 10 def preprocess_example(self, example, mode, hparams): image = example["inputs"] # Get resize method. Include a default if not specified, or if it's not in # TensorFlow's collection of pre-implemented resize methods. resize_method = getattr(hparams, "resize_method", "BICUBIC") resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) if resize_method == "DILATED": scaled_images = image_utils.make_multiscale_dilated( image, hparams.resolutions, num_channels=self.num_channels) else: scaled_images = image_utils.make_multiscale( image, hparams.resolutions, resize_method=resize_method, num_channels=self.num_channels) # Pack tuple of scaled images into one tensor. We do this by enforcing the # columns to match for every resolution. # TODO(avaswani, trandustin): We should create tuples because this will not # work if height*width of low res < width of high res highest_res = hparams.resolutions[-1] example["inputs"] = tf.concat([ tf.reshape(scaled_image, [res**2 // highest_res, highest_res, self.num_channels]) for scaled_image, res in zip(scaled_images, hparams.resolutions)], axis=0) return example @registry.register_problem class ImageImagenet32Small(ImageImagenet): """Imagenet small from the pixel cnn paper.""" @property def is_small(self): return False # Modalities like for CIFAR. @property def num_classes(self): return 1000 @property def train_shards(self): return 1024 @property def dev_shards(self): return 10 def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape([_IMAGENET_SMALL_IMAGE_SIZE, _IMAGENET_SMALL_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) return example @registry.register_problem class ImageImagenet64(ImageImagenet32): """Imagenet rescaled to 64x64.""" @property def rescale_size(self): return [64, 64] @registry.register_problem class Img2imgImagenet(image_utils.ImageProblem): """Imagenet rescaled to 8x8 for input and 32x32 for output.""" def dataset_filename(self): return "image_imagenet" # Reuse Imagenet data. def preprocess_example(self, example, unused_mode, unused_hparams): inputs = example["inputs"] # For Img2Img resize input and output images as desired. example["inputs"] = image_utils.resize_by_area(inputs, 8) example["targets"] = image_utils.resize_by_area(inputs, 32) return example def generate_data(self, data_dir, tmp_dir, task_id=-1): tf.logging.warning("Generate data for img2img_imagenet with image_imagenet") def hparams(self, defaults, unused_model_hparams): p = defaults p.modality = {"inputs": modalities.ModalityType.IDENTITY, "targets": modalities.ModalityType.IDENTITY} p.vocab_size = {"inputs": 256, "targets": 256} p.batch_size_multiplier = 256 p.input_space_id = 1 p.target_space_id = 1 # The following preprocessing functions were taken from # cloud_tpu/models/resnet/resnet_preprocessing.py # ============================================================================== def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape) def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using a one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image (it will be converted to floats in [0, 1]). bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`). """ with tf.name_scope(scope, default_name="distorted_bounding_box_crop", values=[image, bbox]): # Each bounding box has shape [1, num_boxes, box coords] and # the coordinates are ordered [ymin, xmin, ymax, xmax]. # A large fraction of image datasets contain a human-annotated bounding # box delineating the region of the image containing the object of interest. # We choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box # Crop the image to the specified bounding box. cropped_image = tf.slice(image, bbox_begin, bbox_size) return cropped_image, distort_bbox def _random_crop(image, size): """Make a random crop of (`size` x `size`).""" bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) random_image, bbox = distorted_bounding_box_crop( image, bbox, min_object_covered=0.1, aspect_ratio_range=(3. / 4, 4. / 3.), area_range=(0.08, 1.0), max_attempts=1, scope=None) bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3) image = tf.cond( bad, lambda: _center_crop(_do_scale(image, size), size), lambda: tf.image.resize_bicubic([random_image], [size, size])[0]) return image def _flip(image): """Random horizontal image flip.""" image = tf.image.random_flip_left_right(image) return image def _at_least_x_are_true(a, b, x): """At least `x` of `a` and `b` `Tensors` are true.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x) def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0] def _center_crop(image, size): """Crops to center of image with specified `size`.""" image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = ((image_height - size) + 1) / 2 offset_width = ((image_width - size) + 1) / 2 image = _crop(image, offset_height, offset_width, size, size) return image def _normalize(image): """Normalize the image to zero mean and unit variance.""" offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) image -= offset scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) image /= scale return image def preprocess_for_train(image, image_size=224, normalize=True): """Preprocesses the given image for evaluation. Args: image: `Tensor` representing an image of arbitrary size. image_size: int, how large the output image should be. normalize: bool, if True the image is normalized. Returns: A preprocessed image `Tensor`. """ image = _random_crop(image, image_size) if normalize: image = _normalize(image) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) return image def preprocess_for_eval(image, image_size=224, normalize=True): """Preprocesses the given image for evaluation. Args: image: `Tensor` representing an image of arbitrary size. image_size: int, how large the output image should be. normalize: bool, if True the image is normalized. Returns: A preprocessed image `Tensor`. """ image = _do_scale(image, image_size + 32) if normalize: image = _normalize(image) image = _center_crop(image, image_size) image = tf.reshape(image, [image_size, image_size, 3]) return image
py
b414acd98264fe94fb5efb14bcdea5a805648f68
import pygame import text_input BUTTON_PADDING = 5 BUTTON_COLOR = (128, 128, 128) BUTTON_COLOR_HOVER = (100, 100, 100) BUTTON_COLOR_DISABLED = (192, 192, 192) BUTTON_TEXT_COLOR = (0, 0, 0) BUTTON_TEXT_COLOR_DISABLED = (96, 96, 96) INPUT_PADDING = 5 MOUSE_BUTTON_LEFT = 1 textmarker = ( # sized 16x24 "ooooo ooooo ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", " o ", "ooooo ooooo ", ) class Button: def __init__(self, display, text, font, (x, y), width=-1, height=-1): # label used for accessibility purposes # init values self.enable = True self.focus = False self.display = display self.text = text self.font = font self.text_color = BUTTON_TEXT_COLOR self.callback = None self.x = x self.y = y self.width = self.w = width self.height = self.h = height self.text_x = x + BUTTON_PADDING self.text_y = y + BUTTON_PADDING self.button_rect = pygame.Rect(self.x, self.y, self.w, self.h) self.border_rect = pygame.Rect(self.x, self.y, self.w, self.h) self.color = BUTTON_COLOR self.mouse_hover = False self.clicked = False # calc positions and width + height self.setPosition((x, y), width, height) def getWidth(self): return self.w def getHeight(self): return self.h def setPosition(self, (x, y), width=-1, height=-1): self.x = x self.y = y self.setWidthHeight(width, height) def setWidthHeight(self, width = -1, height = -1): self.width = width self.height = height # if width or height == -1 -> width and height depend on text size if self.width == -1: self.w = self.font.size(self.text)[0] + 2 * BUTTON_PADDING self.text_x = self.x + BUTTON_PADDING else: self.w = self.width self.text_x = self.x + self.width / 2 - self.font.size(self.text)[0] / 2 if self.height == -1: self.h = self.font.size(self.text)[1] + 2 * BUTTON_PADDING self.text_y = self.y + BUTTON_PADDING else: self.h = self.height self.text_y = self.y + self.height / 2 - self.font.size(self.text)[1] / 2 self.button_rect = pygame.Rect(self.x+1, self.y+1, self.w-2, self.h-2) self.border_rect = pygame.Rect(self.x, self.y, self.w, self.h) def changeText(self, text): self.text = text self.setWidthHeight() def handleEvent(self, event): # set colors of button and text depending on whether enabled or not if not self.enable: self.color = BUTTON_COLOR_DISABLED self.text_color = BUTTON_TEXT_COLOR_DISABLED return else: self.color = BUTTON_COLOR self.text_color = BUTTON_TEXT_COLOR # hover over button and click events if event.type == pygame.MOUSEMOTION and self.button_rect.collidepoint( event.pos): self.mouse_hover = True elif event.type == pygame.MOUSEBUTTONDOWN and event.button == \ MOUSE_BUTTON_LEFT and self.button_rect.collidepoint(event.pos): self.getClickSound().stop() self.getClickSound().play() self.clicked = True elif event.type == pygame.MOUSEBUTTONUP and event.button == \ MOUSE_BUTTON_LEFT and self.button_rect.collidepoint(event.pos): self.display.button_down_sound.stop() self.display.button_down_sound.play() if self.clicked: if self.callback: self.callback() self.clicked = False elif event.type == pygame.MOUSEBUTTONDOWN and event.button == \ MOUSE_BUTTON_LEFT and not self.button_rect.collidepoint(event.pos): self.focus = False else: self.color = BUTTON_COLOR self.clicked = False self.mouse_hover = False def update(self): if self.enable: if self.focus or self.mouse_hover: self.color = BUTTON_COLOR_HOVER else: self.color = BUTTON_COLOR def render(self): text = self.font.render(self.text, 1, self.text_color) pygame.draw.rect(self.display.screen, (0, 0, 0), self.border_rect, 2) if self.clicked: pygame.draw.rect(self.display.screen, self.color, (self.button_rect[0] +1, self.button_rect[1] +1, self.button_rect[2], self.button_rect[3]), 0) self.display.screen.blit(text, (self.text_x+1, self.text_y+1)) else: pygame.draw.rect(self.display.screen, self.color, self.button_rect, 0) self.display.screen.blit(text, (self.text_x, self.text_y)) def getLabel(self): label = self.text + " "+self.display.translator.translate("button") if not self.getEnable(): label += " ("+self.display.translator.translate("disabled")+")" return label def setCallback(self, cb): self.callback = cb def getCallback(self): return self.callback def getEnable(self): return self.enable def setEnable(self, value): self.enable = value def getClickSound(self): return self.display.button_up_sound def setFocus(self, flag): self.focus = flag def getFocus(self): return self.focus # own TextInput class, which we added a rectangle class TextInput: def __init__(self, display, font, (x, y), width, label='', password=False, only_digits = False): self.label = label self.display = display self.x = x + INPUT_PADDING self.y = y self.rect_color = (0, 0, 0) self.focus = False self.cursor_size = (16, 24) self.cursor_hotspot = (6, 12) self.cursor = pygame.cursors.compile(textmarker) self.cursor_is_textmarker = False self.clicked = False # to get the height of text with this font text_height = font.size("Dummy")[1] self.input = text_input.TextInput(self.display, font, max_width=width - 2 * INPUT_PADDING, password = password, only_digits = only_digits) self.x_end = x + width self.y_end = y + text_height + 2 * INPUT_PADDING self.input_rect = pygame.Rect(x, y - INPUT_PADDING, width, text_height + 2 * INPUT_PADDING) def setFocus(self, flag): self.focus = flag self.input.setFocus(flag) def handleEvent(self, event): self.input.handleEvent(event) # change cursor type when hovering over text input if event.type == pygame.MOUSEMOTION: if not self.cursor_is_textmarker and self.input_rect.collidepoint( event.pos): pygame.mouse.set_cursor(self.cursor_size, self.cursor_hotspot, *self.cursor) self.cursor_is_textmarker = True elif self.cursor_is_textmarker and not self.input_rect.collidepoint( event.pos): pygame.mouse.set_cursor(*pygame.cursors.arrow) self.cursor_is_textmarker = False # set focus if clicked if event.type == pygame.MOUSEBUTTONDOWN and event.button == \ MOUSE_BUTTON_LEFT and self.input_rect.collidepoint(event.pos): self.clicked = True self.display.view.setNewTabPosition(self.display.view.getTabOrder().index(self)) elif event.type == pygame.MOUSEBUTTONUP and event.button == 1 and \ self.input_rect.collidepoint(event.pos): if self.clicked: self.setFocus(True) self.clicked = False # if left mouse button clicked anywhere else, focus is gone ("dirty" # solution) elif event.type == pygame.MOUSEBUTTONUP and event.button == \ MOUSE_BUTTON_LEFT and not self.input_rect.collidepoint(event.pos): self.setFocus(False) self.clicked = False else: self.clicked = False def update(self): self.input.update() def render(self): self.display.screen.blit(self.input.render(), (self.x, self.y)) pygame.draw.rect(self.display.screen, self.rect_color, self.input_rect, 1) def setLabel(self, text): self.label = text def getLabel(self): label = self.label + " "+self.display.translator.translate("input")+": " if self.input.get_text() == '': label += self.display.translator.translate("empty") else: label += self.input.getPrintText() return label def setText(self, text): self.input.input_string = text self.input.cursor_position = len(text)
py
b414aff5886f6716e326074820489935e3c9baef
## setup.py ## ## Copyright (c) 2019 libcommon ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ## copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all ## copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ## SOFTWARE. import os import setuptools if os.path.isfile("README.md"): with open("README.md", "r") as readme: long_description = readme.read() else: long_description = "" setuptools.setup( name="PACKAGE_NAME", version="PACKAGE_VERSION", author="libcommon", author_email="[email protected]", description="PACKAGE_SHORT_DESCRIPTION", long_description=long_description, long_description_content_type="text/markdown", url="PACKAGE_CODE_URL", project_urls={ "Issue Tracker": "PACKAGE_CODE_URL/issues", "Releases": "PACKAGE_CODE_URL/releases" }, packages=setuptools.find_packages(), install_requires=["Flask"], classifiers=[ "Intended Audience :: Developers", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=PACKAGE_MIN_PYTHON_VERSION', )
py
b414b019d57dad87f3725dbf145c3b73a2c23fe0
"""royal_fog_29881 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include, re_path from django.views.generic.base import TemplateView from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("modules/", include("modules.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), ] admin.site.site_header = "Royal Fog" admin.site.site_title = "Royal Fog Admin Portal" admin.site.index_title = "Royal Fog Admin" # swagger api_info = openapi.Info( title="Royal Fog API", default_version="v1", description="API documentation for Royal Fog App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ] urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))] urlpatterns += [re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name='index.html'))]
py
b414b0607346cdaee01ca57f444ca6764893dea8
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource_py3 import SubResource class SecurityRule(SubResource): """Network security rule. All required parameters must be populated in order to send to Azure. :param id: Resource ID. :type id: str :param description: A description for this rule. Restricted to 140 chars. :type description: str :param protocol: Required. Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp', '*' :type protocol: str or ~azure.mgmt.network.v2017_10_01.models.SecurityRuleProtocol :param source_port_range: The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :type source_port_range: str :param destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. :type destination_port_range: str :param source_address_prefix: The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. :type source_address_prefix: str :param source_address_prefixes: The CIDR or source IP ranges. :type source_address_prefixes: list[str] :param source_application_security_groups: The application security group specified as source. :type source_application_security_groups: list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup] :param destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. :type destination_address_prefix: str :param destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges. :type destination_address_prefixes: list[str] :param destination_application_security_groups: The application security group specified as destination. :type destination_application_security_groups: list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup] :param source_port_ranges: The source port ranges. :type source_port_ranges: list[str] :param destination_port_ranges: The destination port ranges. :type destination_port_ranges: list[str] :param access: Required. The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny' :type access: str or ~azure.mgmt.network.v2017_10_01.models.SecurityRuleAccess :param priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. :type priority: int :param direction: Required. The direction of the rule. The direction specifies if rule will be evaluated on incoming or outcoming traffic. Possible values are: 'Inbound' and 'Outbound'. Possible values include: 'Inbound', 'Outbound' :type direction: str or ~azure.mgmt.network.v2017_10_01.models.SecurityRuleDirection :param provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str """ _validation = { 'protocol': {'required': True}, 'access': {'required': True}, 'direction': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'}, 'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'}, 'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'}, 'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'}, 'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'}, 'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'}, 'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'}, 'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'}, 'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'}, 'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'}, 'access': {'key': 'properties.access', 'type': 'str'}, 'priority': {'key': 'properties.priority', 'type': 'int'}, 'direction': {'key': 'properties.direction', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None: super(SecurityRule, self).__init__(id=id, **kwargs) self.description = description self.protocol = protocol self.source_port_range = source_port_range self.destination_port_range = destination_port_range self.source_address_prefix = source_address_prefix self.source_address_prefixes = source_address_prefixes self.source_application_security_groups = source_application_security_groups self.destination_address_prefix = destination_address_prefix self.destination_address_prefixes = destination_address_prefixes self.destination_application_security_groups = destination_application_security_groups self.source_port_ranges = source_port_ranges self.destination_port_ranges = destination_port_ranges self.access = access self.priority = priority self.direction = direction self.provisioning_state = provisioning_state self.name = name self.etag = etag