ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a58b0d63928da515eb049799058d746f90de59a
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from typing import Optional
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.models import Connection
# Please keep these variables in alphabetical order.
from tests.test_utils import AIRFLOW_MAIN_FOLDER
from tests.test_utils.logging_command_executor import CommandExecutor
GCP_AI_KEY = 'gcp_ai.json'
GCP_AUTOML_KEY = 'gcp_automl.json'
GCP_BIGQUERY_KEY = 'gcp_bigquery.json'
GCP_BIGTABLE_KEY = 'gcp_bigtable.json'
GCP_CLOUD_BUILD_KEY = 'gcp_cloud_build.json'
GCP_CLOUDSQL_KEY = 'gcp_cloudsql.json'
GCP_COMPUTE_KEY = 'gcp_compute.json'
GCP_COMPUTE_SSH_KEY = 'gcp_compute_ssh.json'
GCP_DATACATALOG_KEY = 'gcp_datacatalog.json'
GCP_DATAFLOW_KEY = 'gcp_dataflow.json'
GCP_DATAFUSION_KEY = 'gcp_datafusion.json'
GCP_DATAPROC_KEY = 'gcp_dataproc.json'
GCP_DATASTORE_KEY = 'gcp_datastore.json'
GCP_DLP_KEY = 'gcp_dlp.json'
GCP_FUNCTION_KEY = 'gcp_function.json'
GCP_GCS_KEY = 'gcp_gcs.json'
GCP_GCS_TRANSFER_KEY = 'gcp_gcs_transfer.json'
GCP_GKE_KEY = "gcp_gke.json"
GCP_KMS_KEY = "gcp_kms.json"
GCP_LIFE_SCIENCES_KEY = 'gcp_life_sciences.json'
GCP_MEMORYSTORE = 'gcp_memorystore.json'
GCP_PUBSUB_KEY = "gcp_pubsub.json"
GCP_SECRET_MANAGER_KEY = 'gcp_secret_manager.json'
GCP_SPANNER_KEY = 'gcp_spanner.json'
GCP_STACKDRIVER = 'gcp_stackdriver.json'
GCP_TASKS_KEY = 'gcp_tasks.json'
GCP_WORKFLOWS_KEY = "gcp_workflows.json"
GMP_KEY = 'gmp.json'
G_FIREBASE_KEY = 'g_firebase.json'
GCP_AWS_KEY = 'gcp_aws.json'
KEYPATH_EXTRA = 'extra__google_cloud_platform__key_path'
KEYFILE_DICT_EXTRA = 'extra__google_cloud_platform__keyfile_dict'
SCOPE_EXTRA = 'extra__google_cloud_platform__scope'
PROJECT_EXTRA = 'extra__google_cloud_platform__project'
class GcpAuthenticator(CommandExecutor):
"""
Initialises the authenticator.
:param gcp_key: name of the key to use for authentication (see GCP_*_KEY values)
:param project_extra: optional extra project parameter passed to google cloud
connection
"""
original_account = None # type: Optional[str]
def __init__(self, gcp_key: str, project_extra: Optional[str] = None):
super().__init__()
self.gcp_key = gcp_key
self.project_extra = project_extra
self.project_id = self.get_project_id()
self.full_key_path = None
self._set_key_path()
@staticmethod
def get_project_id():
return os.environ.get('GCP_PROJECT_ID')
def set_key_path_in_airflow_connection(self):
"""
Set key path in 'google_cloud_default' connection to point to the full
key path
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = self.full_key_path
if extras.get(KEYFILE_DICT_EXTRA):
del extras[KEYFILE_DICT_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra if self.project_extra else self.project_id
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def set_dictionary_in_airflow_connection(self):
"""
Set dictionary in 'google_cloud_default' connection to contain content
of the json service account file.
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
with open(self.full_key_path) as path_file:
content = json.load(path_file)
extras[KEYFILE_DICT_EXTRA] = json.dumps(content)
if extras.get(KEYPATH_EXTRA):
del extras[KEYPATH_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def _set_key_path(self):
"""
Sets full key path - if GCP_CONFIG_DIR points to absolute
directory, it tries to find the key in this directory. Otherwise it assumes
that Airflow is running from the directory where configuration is checked
out next to airflow directory in config directory
it tries to find the key folder in the workspace's config
directory.
:param : name of the key file to find.
"""
if "GCP_CONFIG_DIR" in os.environ:
gcp_config_dir = os.environ["GCP_CONFIG_DIR"]
else:
gcp_config_dir = os.path.join(AIRFLOW_MAIN_FOLDER, os.pardir, "config")
if not os.path.isdir(gcp_config_dir):
self.log.info("The %s is not a directory", gcp_config_dir)
key_dir = os.path.join(gcp_config_dir, "keys")
if not os.path.isdir(key_dir):
self.log.error("The %s is not a directory", key_dir)
return
key_path = os.path.join(key_dir, self.gcp_key)
if not os.path.isfile(key_path):
self.log.error("The %s file is missing", key_path)
self.full_key_path = key_path
def _validate_key_set(self):
if self.full_key_path is None:
raise AirflowException("The gcp_key is not set!")
if not os.path.isfile(self.full_key_path):
raise AirflowException(
f"The key {self.gcp_key} could not be found. Please copy it to the {self.full_key_path} path."
)
def gcp_authenticate(self):
"""
Authenticate with service account specified via key name.
"""
self._validate_key_set()
self.log.info("Setting the Google Cloud key to %s", self.full_key_path)
# Checking if we can authenticate using service account credentials provided
self.execute_cmd(
[
'gcloud',
'auth',
'activate-service-account',
f'--key-file={self.full_key_path}',
f'--project={self.project_id}',
]
)
self.set_key_path_in_airflow_connection()
def gcp_revoke_authentication(self):
"""
Change default authentication to none - which is not existing one.
"""
self._validate_key_set()
self.log.info("Revoking authentication - setting it to none")
self.execute_cmd(['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}'])
self.execute_cmd(['gcloud', 'config', 'set', 'account', 'none', f'--project={self.project_id}'])
def gcp_store_authentication(self):
"""
Store authentication as it was originally so it can be restored and revoke
authentication.
"""
self._validate_key_set()
if not GcpAuthenticator.original_account:
GcpAuthenticator.original_account = self.check_output(
['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}']
).decode('utf-8')
self.log.info("Storing account: to restore it later %s", GcpAuthenticator.original_account)
def gcp_restore_authentication(self):
"""
Restore authentication to the original one.
"""
self._validate_key_set()
if GcpAuthenticator.original_account:
self.log.info("Restoring original account stored: %s", GcpAuthenticator.original_account)
subprocess.call(
[
'gcloud',
'config',
'set',
'account',
GcpAuthenticator.original_account,
f'--project={self.project_id}',
]
)
else:
self.log.info("Not restoring the original Google Cloud account: it is not set")
|
py
|
1a58b24873b39ebb82894ffb6e132cf055be37c0
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.tracks.controllers import (RHCreateTrack, RHCreateTrackGroup, RHDeleteTrack,
RHDeleteTrackGroup, RHDisplayTracks, RHEditProgram, RHEditTrack,
RHEditTrackGroup, RHManageTracks, RHSortTracks, RHTracksPDF)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('tracks', __name__, template_folder='templates', virtual_template_folder='events/tracks',
url_prefix='/event/<int:event_id>')
_bp.add_url_rule('/manage/tracks/', 'manage', RHManageTracks)
_bp.add_url_rule('/manage/tracks/program', 'edit_program', RHEditProgram, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/create', 'create_track', RHCreateTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/sort', 'sort_tracks', RHSortTracks, methods=('POST',))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'edit_track', RHEditTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'delete_track', RHDeleteTrack, methods=('DELETE',))
_bp.add_url_rule('/manage/track-groups/create', 'create_track_group', RHCreateTrackGroup, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'edit_track_group', RHEditTrackGroup,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'delete_track_group', RHDeleteTrackGroup,
methods=('DELETE',))
_bp.add_url_rule('/program', 'program', RHDisplayTracks)
_bp.add_url_rule('/program.pdf', 'program_pdf', RHTracksPDF)
_compat_bp = IndicoBlueprint('compat_tracks', __name__, url_prefix='/event/<int:event_id>')
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/contributions/', 'track_contribs',
make_compat_redirect_func('contributions', 'contribution_list',
view_args_conv={'track_id': None}))
|
py
|
1a58b29f660076d92a793f2b26f244c2fce25a33
|
import os
import sys
import subprocess
import random
class Plopper:
def __init__(self,sourcefile,outputdir):
# Initilizing global variables
self.sourcefile = sourcefile
self.outputdir = outputdir+"/tmp_files"
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
#Creating a dictionary using parameter label and value
def createDict(self, x, params):
dictVal = {}
for p, v in zip(params, x):
dictVal[p] = v
return(dictVal)
#Replace the Markers in the source file with the corresponding Pragma values
def plotValues(self, dictVal, inputfile, outputfile):
with open(inputfile, "r") as f1:
buf = f1.readlines()
with open(outputfile, "w") as f2:
for line in buf:
modify_line = line
for key, value in dictVal.items():
if key in modify_line:
if value != 'None': #For empty string options
modify_line = modify_line.replace('#'+key, str(value))
if modify_line != line:
f2.write(modify_line)
else:
#To avoid writing the Marker
f2.write(line)
# Function to find the execution time of the interim file, and return the execution time as cost to the search module
def findRuntime(self, x, params):
interimfile = ""
#exetime = float('inf')
#exetime = sys.maxsize
exetime = 1
counter = random.randint(1, 10001) # To reduce collision increasing the sampling intervals
interimfile = self.outputdir+"/"+str(counter)+".c"
# Generate intermediate file
dictVal = self.createDict(x, params)
self.plotValues(dictVal, self.sourcefile, interimfile)
#compile and find the execution time
tmpbinary = interimfile[:-2]
kernel_idx = self.sourcefile.rfind('/')
kernel_dir = self.sourcefile[:kernel_idx]
cmd1 = "clang -fno-caret-diagnostics " +interimfile +" " + kernel_dir + "/Materials.c " \
+ kernel_dir + "/XSutils.c " + " -I" + kernel_dir + \
" -std=c99 -fopenmp -DOPENMP -fno-unroll-loops -O3 -mllvm -polly -mllvm -polly-process-unprofitable -mllvm -polly-use-llvm-names -ffast-math -march=native -L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib -o "+tmpbinary
cmd2 = kernel_dir + "/exe.pl " + tmpbinary
#Find the compilation status using subprocess
compilation_status = subprocess.run(cmd1, shell=True, stderr=subprocess.PIPE)
#Find the execution time only when the compilation return code is zero, else return infinity
if compilation_status.returncode == 0 :
#and len(compilation_status.stderr) == 0: #Second condition is to check for warnings
execution_status = subprocess.run(cmd2, shell=True, stdout=subprocess.PIPE)
exetime = float(execution_status.stdout.decode('utf-8'))
if exetime == 0:
exetime = 1
else:
print(compilation_status.stderr)
print("compile failed")
return exetime #return execution time as cost
|
py
|
1a58b4896d90e0439f9134be6f68651796211833
|
import commands
import os
import sys
class EnvFileReader:
def read_file(self, filename, env_var = os.environ):
file_lines = open(filename,'r').readlines()
line_num = 1
for line in file_lines:
# get rid of comments
line = line.split("#")[0]
# strip whitespace from ends
line = line.strip()
# check if empty line
if line == "":
line_num += 1
continue
# check for =
if line.find("=") == -1:
raise "Missing '=' on line %i of file %s" % (line_num,filename)
# split into var = val pairs
(var,val) = line.split("=",1)
# remove whitespace from vars and values
var = var.strip()
val = val.strip()
# search for variables in val
done = False
while True:
var_start_index = val.find("$(")
if var_start_index == -1:
break
var_end_index = val.find(")")
if var_end_index == -1:
raise "Variable parse error on line %i of file %s" % (line_num,filename)
# extract variable value
sub_var = val[var_start_index+2:var_end_index]
# look for variable in environment, if there rebuild val
if os.environ.has_key(sub_var):
val = val[0:var_start_index] + os.environ[sub_var] + val[var_end_index+1:]
elif env_var.has_key(sub_var):
val = val[0:var_start_index] + env_var[sub_var] + val[var_end_index+1:]
else:
raise "Variable %s not found in environment" % sub_var
# print "%s = %s" % (var, val)
env_var[var] = val
line_num += 1
if __name__ == '__main__' :
reader = EnvFileReader()
reader.read_file(sys.argv[1])
# for env in os.environ:
# print "%s = %s" % (env,os.environ[env])
|
py
|
1a58b509f67c2818403c2be4951585a037bf0e3d
|
"""Utilities for with-statement contexts. See PEP 343."""
import abc
import sys
import _collections_abc
from collections import deque
from functools import wraps
from types import MethodType
__all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
"AbstractContextManager", "AbstractAsyncContextManager",
"AsyncExitStack", "ContextDecorator", "ExitStack",
"redirect_stdout", "redirect_stderr", "suppress"]
class AbstractContextManager(abc.ABC):
"""An abstract base class for context managers."""
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractContextManager:
return _collections_abc._check_methods(C, "__enter__", "__exit__")
return NotImplemented
class AbstractAsyncContextManager(abc.ABC):
"""An abstract base class for asynchronous context managers."""
async def __aenter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractAsyncContextManager:
return _collections_abc._check_methods(C, "__aenter__",
"__aexit__")
return NotImplemented
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManagerBase:
"""Shared functionality for @contextmanager and @asynccontextmanager."""
def __init__(self, func, args, kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
class _GeneratorContextManager(_GeneratorContextManagerBase,
AbstractContextManager,
ContextDecorator):
"""Helper for @contextmanager decorator."""
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, self.args, self.kwds)
def __enter__(self):
# do not keep args and kwds alive unnecessarily
# they are only needed for recreation, which is not possible anymore
del self.args, self.kwds, self.func
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return False
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27122)
if exc is value:
return False
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if type is StopIteration and exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
# This cannot use 'except BaseException as exc' (as in the
# async implementation) to maintain compatibility with
# Python 2, where old-style class exceptions are not caught
# by 'except BaseException'.
if sys.exc_info()[1] is value:
return False
raise
raise RuntimeError("generator didn't stop after throw()")
class _AsyncGeneratorContextManager(_GeneratorContextManagerBase,
AbstractAsyncContextManager):
"""Helper for @asynccontextmanager."""
async def __aenter__(self):
try:
return await self.gen.__anext__()
except StopAsyncIteration:
raise RuntimeError("generator didn't yield") from None
async def __aexit__(self, typ, value, traceback):
if typ is None:
try:
await self.gen.__anext__()
except StopAsyncIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = typ()
# See _GeneratorContextManager.__exit__ for comments on subtleties
# in this implementation
try:
await self.gen.athrow(typ, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopAsyncIteration as exc:
return exc is not value
except RuntimeError as exc:
if exc is value:
return False
# Avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479 for sync generators; async generators also
# have this behavior). But do this only if the exception wrapped
# by the RuntimeError is actully Stop(Async)Iteration (see
# issue29692).
if isinstance(value, (StopIteration, StopAsyncIteration)):
if exc.__cause__ is value:
return False
raise
except BaseException as exc:
if exc is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, args, kwds)
return helper
def asynccontextmanager(func):
"""@asynccontextmanager decorator.
Typical usage:
@asynccontextmanager
async def some_async_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
async with some_async_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _AsyncGeneratorContextManager(func, args, kwds)
return helper
class closing(AbstractContextManager):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class _RedirectStream(AbstractContextManager):
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class suppress(AbstractContextManager):
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
class _BaseExitStack:
"""A base class for ExitStack and AsyncExitStack."""
@staticmethod
def _create_exit_wrapper(cm, cm_exit):
return MethodType(cm_exit, cm)
@staticmethod
def _create_cb_wrapper(callback, *args, **kwds):
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
return _exit_wrapper
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature.
Can suppress exceptions the same way __exit__ method can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself).
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods.
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume it's a callable.
self._push_exit_callback(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator.
def enter_context(self, cm):
"""Enters the supplied context manager.
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with
# statement.
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback # Allow use as a decorator
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods."""
_exit_wrapper = self._create_exit_wrapper(cm, cm_exit)
self._push_exit_callback(_exit_wrapper, True)
def _push_exit_callback(self, callback, is_sync=True):
self._exit_callbacks.append((is_sync, callback))
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(_BaseExitStack, AbstractContextManager):
"""Context manager for dynamic management of a stack of exit callbacks.
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception.
"""
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
assert is_sync
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
def close(self):
"""Immediately unwind the context stack."""
self.__exit__(None, None, None)
# Inspired by discussions on https://bugs.python.org/issue29302
class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager):
"""Async context manager for dynamic management of a stack of exit
callbacks.
For example:
async with AsyncExitStack() as stack:
connections = [await stack.enter_async_context(get_connection())
for i in range(5)]
# All opened connections will automatically be released at the
# end of the async with statement, even if attempts to open a
# connection later in the list raise an exception.
"""
@staticmethod
def _create_async_exit_wrapper(cm, cm_exit):
return MethodType(cm_exit, cm)
@staticmethod
def _create_async_cb_wrapper(callback, *args, **kwds):
async def _exit_wrapper(exc_type, exc, tb):
await callback(*args, **kwds)
return _exit_wrapper
async def enter_async_context(self, cm):
"""Enters the supplied async context manager.
If successful, also pushes its __aexit__ method as a callback and
returns the result of the __aenter__ method.
"""
_cm_type = type(cm)
_exit = _cm_type.__aexit__
result = await _cm_type.__aenter__(cm)
self._push_async_cm_exit(cm, _exit)
return result
def push_async_exit(self, exit):
"""Registers a coroutine function with the standard __aexit__ method
signature.
Can suppress exceptions the same way __aexit__ method can.
Also accepts any object with an __aexit__ method (registering a call
to the method instead of the object itself).
"""
_cb_type = type(exit)
try:
exit_method = _cb_type.__aexit__
except AttributeError:
# Not an async context manager, so assume it's a coroutine function
self._push_exit_callback(exit, False)
else:
self._push_async_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def push_async_callback(self, callback, *args, **kwds):
"""Registers an arbitrary coroutine function and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper, False)
return callback # Allow use as a decorator
async def aclose(self):
"""Immediately unwind the context stack."""
await self.__aexit__(None, None, None)
def _push_async_cm_exit(self, cm, cm_exit):
"""Helper to correctly register coroutine function to __aexit__
method."""
_exit_wrapper = self._create_async_exit_wrapper(cm, cm_exit)
self._push_exit_callback(_exit_wrapper, False)
async def __aenter__(self):
return self
async def __aexit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
try:
if is_sync:
cb_suppress = cb(*exc_details)
else:
cb_suppress = await cb(*exc_details)
if cb_suppress:
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
|
py
|
1a58b6c6e6f106876df931ebe620e79f4785efd3
|
from tkinter import *
root=Tk()
root.title("CAR RENTAL RECEIPT")
root.geometry('700x800')
#Labels
g1=Label(root, text="CAR RENTAL RECEIPT", font="Calibri 18 bold")
l1=Label(root, text="Date: ")
e1=Entry(root,width=30, borderwidth=2)
l2=Label(root, text="Receipt #: ")
e2=Entry(root,width=30, borderwidth=2)
l3=Label(root, text="Rental Company Info", font="Calibri 12 bold")
l3_1=Label(root, text="Company: ")
e3_1=Entry(root,width=30, borderwidth=2)
l3_2=Label(root, text="Representative: ")
e3_2=Entry(root,width=30, borderwidth=2)
l3_3=Label(root, text="Location: ")
e3_3=Entry(root,width=30, borderwidth=2)
l3_4=Label(root, text="City/State/ZIP: ")
e3_4=Entry(root,width=30, borderwidth=2)
l3_5=Label(root, text="Phone: ")
e3_5=Entry(root,width=30, borderwidth=2)
l4=Label(root, text="Lessee Info", font="Calibri 12 bold")
l4_1=Label(root, text="License: ")
e4_1=Entry(root,width=30, borderwidth=2)
l4_2=Label(root, text="Representative: ")
e4_2=Entry(root,width=30, borderwidth=2)
l4_3=Label(root, text="Address: ")
e4_3=Entry(root,width=30, borderwidth=2)
l4_4=Label(root, text="City/State/ZIP: ")
e4_4=Entry(root,width=30, borderwidth=2)
l4_5=Label(root, text="Phone: ")
e4_5=Entry(root,width=30, borderwidth=2)
g2=Label(root, text="Vehicle Information", font="Calibri 18 bold")
l5_1=Label(root, text="VIN: ")
e5_1=Entry(root,width=30, borderwidth=2)
l5_2=Label(root, text="Make: ")
e5_2=Entry(root,width=30, borderwidth=2)
l5_3=Label(root, text="Year: ")
e5_3=Entry(root,width=30, borderwidth=2)
l5_4=Label(root, text="Color: ")
e5_4=Entry(root,width=30, borderwidth=2)
l6_1=Label(root, text="Registration: ")
e6_1=Entry(root,width=30, borderwidth=2)
l6_2=Label(root, text="Model: ")
e6_2=Entry(root,width=30, borderwidth=2)
l6_3=Label(root, text="Mileage: ")
e6_3=Entry(root,width=30, borderwidth=2)
h1=Label(root, text="VIN", font="Calibri 12 bold")
h1_1=Entry(root,width=14, borderwidth=2)
h1_2=Entry(root,width=14, borderwidth=2)
h1_3=Entry(root,width=14, borderwidth=2)
h2=Label(root, text="Cost/Day", font="Calibri 12 bold")
h2_1=Entry(root,width=12, borderwidth=2)
h2_2=Entry(root,width=12, borderwidth=2)
h2_3=Entry(root,width=12, borderwidth=2)
h3=Label(root, text="# of Days", font="Calibri 12 bold")
h3_1=Entry(root,width=19, borderwidth=2)
h3_2=Entry(root,width=19, borderwidth=2)
h3_3=Entry(root,width=19, borderwidth=2)
h4=Label(root, text="Additional Costs", font="Calibri 12 bold")
h4_1=Entry(root,width=18, borderwidth=2)
h4_2=Entry(root,width=18, borderwidth=2)
h4_3=Entry(root,width=18, borderwidth=2)
h4l1=Label(root, text="Subtotal: ")
h4l2=Label(root, text="Tax (%): ")
h4l3=Label(root, text="Total: ")
h4l4=Label(root, text="Amount paid: ")
h4e1=Entry(root,width=8, borderwidth=2)
h4e2=Entry(root,width=9, borderwidth=2)
h4e3=Entry(root,width=10, borderwidth=2)
h4e4=Entry(root,width=4, borderwidth=2)
h5=Label(root, text="Line Total", font="Calibri 12 bold")
h5_1=Entry(root,width=16, borderwidth=2)
h5_2=Entry(root,width=16, borderwidth=2)
h5_3=Entry(root,width=16, borderwidth=2)
h5_4=Entry(root,width=16, borderwidth=2)
h5_5=Entry(root,width=16, borderwidth=2)
h5_6=Entry(root,width=16, borderwidth=2)
h5_7=Entry(root,width=16, borderwidth=2)
xl1=Label(root, text="Payment Method: ")
ck1=Checkbutton(root, text='Cash. ', onvalue=1, offvalue=0)
ck2=Checkbutton(root, text='Check No.: ', onvalue=1, offvalue=0)
ent1=Entry(root,width=31, borderwidth=2)
ck3=Checkbutton(root, text='Credit No.: ', onvalue=1, offvalue=0)
ent3=Entry(root,width=41, borderwidth=2)
ck4=Checkbutton(root, text='Other.: ', onvalue=1, offvalue=0)
ent4=Entry(root,width=44, borderwidth=2)
lasl1=Label(root, text="Authorized Signature: ", font="Calibri 10 bold")
lasl2=Label(root, text="Representative Name: ", font="Calibri 10")
lase1=Entry(root,width=22, borderwidth=2)
lase2=Entry(root,width=20, borderwidth=2)
#Positioning
l1.place(x=10, y=45)
e1.place(x=50, y=45)
l2.place(x=10, y=75)
e2.place(x=73, y=75)
l3.place(x=10, y=110)
l3_1.place(x=10, y=150)
l3_2.place(x=10, y=180)
l3_3.place(x=10, y=210)
l3_4.place(x=10, y=240)
l3_5.place(x=10, y=270)
e3_1.place(x=110, y=150)
e3_2.place(x=110, y=180)
e3_3.place(x=110, y=210)
e3_4.place(x=110, y=240)
e3_5.place(x=110, y=270)
l4.place(x=320, y=110)
l4_1.place(x=320, y=150)
l4_2.place(x=320, y=180)
l4_3.place(x=320, y=210)
l4_4.place(x=320, y=240)
l4_5.place(x=320, y=270)
e4_1.place(x=420, y=150)
e4_2.place(x=420, y=180)
e4_3.place(x=420, y=210)
e4_4.place(x=420, y=240)
e4_5.place(x=420, y=270)
g1.place(x=240, y=0)
g2.place(x=240, y=300)
l5_1.place(x=10, y=360)
l5_2.place(x=10, y=390)
l5_3.place(x=10, y=420)
l5_4.place(x=10, y=450)
e5_1.place(x=60, y=360)
e5_2.place(x=60, y=390)
e5_3.place(x=60, y=420)
e5_4.place(x=60, y=450)
l6_1.place(x=320, y=360)
l6_2.place(x=320, y=390)
l6_3.place(x=320, y=420)
e6_1.place(x=420, y=360)
e6_2.place(x=420, y=390)
e6_3.place(x=420, y=420)
h1.place(x=70, y=490)
h2.place(x=160, y=490)
h3.place(x=290, y=490)
h4.place(x=400, y=490)
h5.place(x=560, y=490)
h1_1.place(x=40, y=520)
h1_2.place(x=40, y=545)
h1_3.place(x=40, y=570)
h2_1.place(x=150, y=520)
h2_2.place(x=150, y=545)
h2_3.place(x=150, y=570)
h3_1.place(x=260, y=520)
h3_2.place(x=260, y=545)
h3_3.place(x=260, y=570)
h4_1.place(x=400, y=520)
h4_2.place(x=400, y=545)
h4_3.place(x=400, y=570)
h4l1.place(x=400, y=595)
h4l2.place(x=400, y=620)
h4l3.place(x=400, y=645)
h4l4.place(x=400, y=670)
h4e1.place(x=460, y=595)
h4e2.place(x=454, y=620)
h4e3.place(x=447, y=645)
h4e4.place(x=483, y=670)
h5_1.place(x=540, y=520)
h5_2.place(x=540, y=545)
h5_3.place(x=540, y=570)
h5_4.place(x=540, y=595)
h5_5.place(x=540, y=620)
h5_6.place(x=540, y=645)
h5_7.place(x=540, y=670)
xl1.place(x=40, y=595)
ck1.place(x=40, y=620)
ck2.place(x=100, y=620)
ck3.place(x=40, y=645)
ck4.place(x=40, y=670)
ent1.place(x=188, y=623)
ent3.place(x=128, y=649)
ent4.place(x=110, y=675)
lasl1.place(x=380, y=710)
lasl2.place(x=390, y=735)
lase1.place(x=505, y=710)
lase2.place(x=518, y=735)
root.mainloop()
|
py
|
1a58b703d201a6129423d0c248f4ecbf3d362fb8
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from openstack.tests.functional.cloud import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container-infra')),
('neutron', dict(env='NEUTRON', service='network')),
('octavia', dict(env='OCTAVIA', service='load-balancer')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get(
'OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.user_cloud.has_service(self.service))
class TestKeystoneVersion(base.BaseFunctionalTestCase):
def test_keystone_version(self):
use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False)
if use_keystone_v2 and use_keystone_v2 != '0':
self.assertEqual('2.0', self.identity_version)
else:
self.assertEqual('3', self.identity_version)
|
py
|
1a58b878fd841c70963bdd0b602071fed6687112
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-16 14:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='postcommentmodel',
name='createTime',
field=models.FloatField(default=1497623356.988723, verbose_name='Comment create time'),
),
migrations.AlterField(
model_name='postmodel',
name='createTime',
field=models.FloatField(default=1497623356.9878757, verbose_name='Post create time'),
),
migrations.AlterField(
model_name='postmodel',
name='lastMessTime',
field=models.FloatField(default=1497623356.9879315, verbose_name='Last messege time'),
),
]
|
py
|
1a58b87c5d2fa8f42d0bd1b6fa61414fa087b456
|
class utilidades:
def __init__(self, carne, nombre):
self.carne = carne
self.nombre = nombre
def retorno(self, carne, nombre):
|
py
|
1a58b9f7a3961893cdca006bae7d3dddcffd2979
|
# -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
with open('.meta/packages') as reqs:
install_requires = reqs.read().split('\n')
setup(
name='rpihelper',
version='0.0.3',
author='Nikita Grishko',
author_email='[email protected]',
url='https://github.com/Gr1N/rpihelper',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
scripts=[
'bin/rqscheduletasks',
],
)
|
py
|
1a58ba0411158d5441049f2066374144fa2ff4a1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyxmlescpos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='0.1.0',
description='Print XML-defined Receipts on ESC/POS Receipt Printers',
long_description=long_description,
# The project's main homepage.
url='https://github.com/fvdsn/py-xml-escpos',
download_url = 'https://github.com/fvdsn/py-xml-escpos/tarball/0.1.0',
# Author details
author='Frédéric van der Essen & Manuel F Martinez',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Printing',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='printing receipt xml escpos',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['pyusb', 'qrcode'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
py
|
1a58bb018b9bc6ee37b64beeba91cf9e83b27934
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_operations_async.py
DESCRIPTION:
This sample demonstrates how to list/get all document model operations (succeeded, in-progress, failed)
associated with the Form Recognizer resource. Kinds of operations returned are "documentModelBuild",
"documentModelCompose", and "documentModelCopyTo". Note that operation information only persists for
24 hours. If the operation was successful, the document model can be accessed using get_model or list_models APIs.
USAGE:
python sample_get_operations_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
import asyncio
async def sample_get_operations_async():
# [START list_operations_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with document_model_admin_client:
operations = document_model_admin_client.list_operations()
print("The following document model operations exist under my resource:")
async for operation in operations:
print("\nOperation ID: {}".format(operation.operation_id))
print("Operation kind: {}".format(operation.kind))
print("Operation status: {}".format(operation.status))
print("Operation percent completed: {}".format(operation.percent_completed))
print("Operation created on: {}".format(operation.created_on))
print("Operation last updated on: {}".format(operation.last_updated_on))
print("Resource location of successful operation: {}".format(operation.resource_location))
# [END list_operations_async]
# [START get_operation_async]
# Get an operation by ID
try:
first_operation = await operations.__anext__()
print("\nGetting operation info by ID: {}".format(first_operation.operation_id))
operation_info = await document_model_admin_client.get_operation(first_operation.operation_id)
if operation_info.status == "succeeded":
print("My {} operation is completed.".format(operation_info.kind))
result = operation_info.result
print("Model ID: {}".format(result.model_id))
elif operation_info.status == "failed":
print("My {} operation failed.".format(operation_info.kind))
error = operation_info.error
print("{}: {}".format(error.code, error.message))
else:
print("My operation status is {}".format(operation_info.status))
except StopAsyncIteration:
print("No operations found.")
# [END get_operation_async]
async def main():
await sample_get_operations_async()
if __name__ == '__main__':
asyncio.run(main())
|
py
|
1a58bc72b9bf36300abce9a8af8ae079cc05996f
|
from .scheduler import Scheduler
from .simplescheduler import SimpleScheduler
from .threadedscheduler import ThreadedScheduler
|
py
|
1a58bd4a5fe02f510ac1ada32082afe10eb8a1ca
|
import hashlib
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.urls.base import reverse_lazy
from django.utils import timezone
from core.colors import random_color
from user_management.models import Organization
class ClientUser(models.Model):
"""
A user of the software.
Users are always anonymized and only represented by a hash
value, so privacy can be guaranteed.
Hashing will use the provided object's __repr__ method, so please
make sure that the __repr__ method outputs a value that is unique to
the user and unchanging.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
nickname = models.CharField(max_length=255, blank=True, null=True)
def __repr__(self):
return self.name
def __str__(self):
return self.name
@property
def short_name(self):
if self.nickname is None:
return self.name[:8]
return self.name[:8] + " " + self.nickname
@classmethod
def user_from_object(cls, user_object, organization: Organization = None, organization_id: str = None):
if organization:
user_hash = cls.hash_from_object(user_object, str(organization.id))
elif organization_id:
user_hash = cls.hash_from_object(user_object, organization_id)
else:
raise AttributeError("You need to specify an Organization")
instance, created = cls.objects.get_or_create(name=user_hash)
return instance
@classmethod
def hash_from_object(cls, hashable_object, organization_id: str):
string_to_hash = str(hashable_object) + str(organization_id)
return hashlib.sha256(settings.HASH_SALT.encode() + string_to_hash.encode()).hexdigest()
class App(models.Model):
"""
A collection of FunctionalityGroups.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=140)
slug = models.SlugField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
def __str__(self):
return "{}.{}".format(self.organization, self.slug)
def get_absolute_url(self):
return reverse_lazy("app-detail", kwargs={"app_id": self.id})
class Functionality(models.Model):
"""
A behaviour, functionality, or program option to be managed.
A Functionality contains one or more Flavor objects that represent individual
variations of one functionality. This is helpful when you want to A/B test multiple
incarnations of a functionality.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=140)
slug = models.SlugField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
app = models.ForeignKey(App, on_delete=models.CASCADE)
def __str__(self):
return "{}.{}".format(self.app, self.slug)
class Meta:
verbose_name_plural = "Functionalities"
@property
def slug_as_scorecase(self):
return self.slug.replace("-", "_")
@property
def number_of_users(self):
return Availability.objects.filter(flavor__functionality=self).count()
@property
def number_of_enabled_users(self):
return Availability.objects.filter(flavor__functionality=self, is_enabled=True).count()
def get_absolute_url(self):
return reverse_lazy("functionality-detail", kwargs={"pk": self.id})
def get_default_tag(self):
from tagging.models import Tag
return Tag.objects.get_or_create(name="Default", organization=self.app.organization)[0]
class Flavor(models.Model):
"""
A specific version of a functionality.
Add more then one Flavor to a Functionality to A/B test. One will be randomly
activated depending on its enable_probability.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=140)
slug = models.SlugField(max_length=100)
functionality = models.ForeignKey(Functionality, on_delete=models.CASCADE)
client_users = models.ManyToManyField(ClientUser, through="Availability")
color = models.CharField(max_length=6, default=random_color)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}.{}".format(self.functionality, self.slug)
@property
def number_of_users(self):
return self.availability_set.count()
@property
def number_of_enabled_users(self):
return self.availability_set.filter(is_enabled=True).count()
@property
def number_of_disabled_users(self):
return self.availability_set.filter(is_enabled=False).count()
@property
def single_width_percent(self):
try:
return float(self.number_of_enabled_users) / self.number_of_users * 100
except ZeroDivisionError:
return 1 * 100
@property
def width_percent(self):
try:
return float(self.number_of_enabled_users) / self.functionality.number_of_users * 100
except ZeroDivisionError:
return 1 * 100
def get_absolute_url(self):
return reverse_lazy("functionality-detail", kwargs={"pk": self.functionality.id})
class RolloutStrategy(models.Model):
"""
A description of how a feature should be rolled out, depending on a tag.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
functionality = models.ForeignKey(Functionality, on_delete=models.CASCADE)
possible_flavors = models.ManyToManyField(Flavor, blank=False)
tag = models.ForeignKey("tagging.Tag", on_delete=models.CASCADE, null=True, blank=True)
start_at = models.DateTimeField(default=timezone.now)
max_enabled_users = models.IntegerField(default=0)
priority = models.PositiveSmallIntegerField(default=0)
RECALL_FUNCTIONALITY = "recall"
PAUSE_ROLLOUT = "pause_rollout"
DEFINED_BY_RELEASES = "defined_by_releases"
ENABLE_GLOBALLY = "enable_globally"
STRATEGY_CHOICES = (
(RECALL_FUNCTIONALITY, "Recall"),
(PAUSE_ROLLOUT, "Roll Out Paused"),
(DEFINED_BY_RELEASES, "Release-Driven"),
(ENABLE_GLOBALLY, "Enabled Globally"),
)
strategy = models.CharField(max_length=50, choices=STRATEGY_CHOICES, default=DEFINED_BY_RELEASES)
class Meta:
ordering = ["start_at"]
unique_together = ("tag", "functionality")
def get_absolute_url(self):
return reverse_lazy("functionality-detail", kwargs={"pk": self.functionality.id})
def clean(self):
super().clean()
# make sure only the functionality's flavors are selected
for flavor in self.possible_flavors.all():
if flavor.functionality != self.functionality:
raise ValidationError({"possible_flavors": "Only Related Flavors can be selected"})
# make sure only the organization's tags are selected
if self.tag and self.functionality and self.tag.organization != self.functionality.app.organization:
raise ValidationError({"tag": "Only your organization's tags can be selected"})
class Availability(models.Model):
"""
A Flavor that is enabled for a specific user.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(ClientUser, on_delete=models.CASCADE)
flavor = models.ForeignKey(Flavor, on_delete=models.CASCADE)
is_enabled = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}.{}".format(self.flavor, self.user)
class Meta:
verbose_name_plural = "Availabilities"
|
py
|
1a58bdb2e904ff044bf4ec00bf8ddc92c386dd26
|
import numpy as np
from torch import nn
import torch
from encoder.params_model import *
from encoder.params_data import *
from encoder.data_objects.iemocap_dataset import emo_categories
class EmoEncoder(nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.lstm = nn.LSTM(input_size=mel_n_channels,
hidden_size=model_hidden_size,
num_layers=model_num_layers,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=model_hidden_size,
out_features=model_embedding_size).to(device)
self.relu = torch.nn.ReLU().to(device)
self.linear_cls = nn.Linear(in_features=model_embedding_size,
out_features=len(emo_categories)).to(device)
def forward(self, utterances, hidden_init=None):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
(batch_size, n_frames, n_channels)
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
"""
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
# and the final cell state.
out, (hidden, cell) = self.lstm(utterances, hidden_init)
# We take only the hidden state of the last layer
embeds_raw = self.relu(self.linear(hidden[-1]))
# L2-normalize it
embeds = embeds_raw / (torch.norm(embeds_raw, dim=1, keepdim=True) + 1e-5)
pred = self.linear_cls(embeds)
return embeds, pred
class StackedBiLSTMEmoEncoder(nn.Module):
def __init__(self, device):
super(StackedBiLSTMEmoEncoder, self).__init__()
self.device = device
self.lstm1 = nn.LSTM(input_size=mel_n_channels,
hidden_size=512,
bidirectional=True,
batch_first=True).to(device)
self.lstm2 = nn.LSTM(input_size=1024,
hidden_size=256,
bidirectional=True,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=512,
out_features=512).to(device)
self.tanh = nn.Tanh().to(device)
self.linear_cls = nn.Linear(in_features=512,
out_features=len(emo_categories)).to(device)
def forward(self, utterances, hidden_init=None):
o, _ = self.lstm1(utterances, hidden_init)
o, (h, c) = self.lstm2(o)
# Take the hidden state of last layers and concatenate the two directions
h = torch.transpose(h[-2:], 0, 1)
h = h.reshape([h.shape[0], -1])
embeds = self.tanh(self.linear(h))
pred = self.linear_cls(embeds)
return embeds, pred
|
py
|
1a58bed4df2072bea30a837f2f6613409753b6e5
|
import torch
import random
import numpy as np
from collections import deque
from snake_gameai import SnakeGameAI, Direction, Point, BLOCK_SIZE
from model import Linear_QNet, QTrainer
from Helper import plot
MAX_MEMORY = 100_000
BATCH_SIZE = 1000
LR = 0.001
class Agent:
def __init__(self):
self.n_game = 0
self.epsilon = 0 # Randomness
self.gamma = 0.9 # discount rate
self.memory = deque(maxlen=MAX_MEMORY) # popleft()
self.model = Linear_QNet(11, 256, 3)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
# for n,p in self.model.named_parameters():
# print(p.device,'',n)
# self.model.to('cuda')
# for n,p in self.model.named_parameters():
# print(p.device,'',n)
# state (11 Values)
# [ danger straight, danger right, danger left,
#
# direction left, direction right,
# direction up, direction down
#
# food left,food right,
# food up, food down]
def get_state(self, game):
head = game.snake[0]
point_l = Point(head.x - BLOCK_SIZE, head.y)
point_r = Point(head.x + BLOCK_SIZE, head.y)
point_u = Point(head.x, head.y - BLOCK_SIZE)
point_d = Point(head.x, head.y + BLOCK_SIZE)
dir_l = game.direction == Direction.LEFT
dir_r = game.direction == Direction.RIGHT
dir_u = game.direction == Direction.UP
dir_d = game.direction == Direction.DOWN
state = [
# Danger Straight
(dir_u and game.is_collision(point_u)) or
(dir_d and game.is_collision(point_d)) or
(dir_l and game.is_collision(point_l)) or
(dir_r and game.is_collision(point_r)),
# Danger right
(dir_u and game.is_collision(point_r)) or
(dir_d and game.is_collision(point_l)) or
(dir_u and game.is_collision(point_u)) or
(dir_d and game.is_collision(point_d)),
# Danger Left
(dir_u and game.is_collision(point_r)) or
(dir_d and game.is_collision(point_l)) or
(dir_r and game.is_collision(point_u)) or
(dir_l and game.is_collision(point_d)),
# Move Direction
dir_l,
dir_r,
dir_u,
dir_d,
# Food Location
game.food.x < game.head.x, # food is in left
game.food.x > game.head.x, # food is in right
game.food.y < game.head.y, # food is up
game.food.y > game.head.y # food is down
]
return np.array(state, dtype=int)
def remember(self, state, action, reward, next_state, done):
# popleft if memory exceed
self.memory.append((state, action, reward, next_state, done))
def train_long_memory(self):
if (len(self.memory) > BATCH_SIZE):
mini_sample = random.sample(self.memory, BATCH_SIZE)
else:
mini_sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.trainer.train_step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done):
self.trainer.train_step(state, action, reward, next_state, done)
# TODO: What is the role of epsilon in this method? Feel free to reference the OpenAI Gym RL tutorial from 02/09/22
"""
The role of epsilon is to introduce randomness, of choosing random instead of best action.
This helps our model explore more to collect more and to allow the AI to explore other random
options rather than the immediate best one, without heavily depending on the information it already has prior.
In this specific method, higher epsilon equals more chance of exploration and more chance of random choice.
"""
def get_action(self, state):
# random moves: tradeoff explotation / exploitation
self.epsilon = 80 - self.n_game
final_move = [0, 0, 0]
if(random.randint(0, 200) < self.epsilon):
move = random.randint(0, 2)
final_move[move] = 1
else:
state0 = torch.tensor(state, dtype=torch.float).cpu()
prediction = self.model(state0).cpu() # prediction by model
move = torch.argmax(prediction).item()
final_move[move] = 1
return final_move
# TODO: Write a couple sentences describing the training process coded below.
"""
This is where the program does most of the work.
This code below initalizes all variables. Sets the agent class and and game class.
The the while loop that cycles over after every move that:
- grabs the state of the game
- grabs the player movement
- performs the move
- grabs the updated and updated state of the game
After the infinite while loop comes and if statement that IF the game is 'done,'
we train the long term memory, the game resets and prints its status, counts and updates total stats
and then repeats the loop all over again.
"""
def train():
plot_scores = []
plot_mean_scores = []
total_score = 0
record = 0
agent = Agent()
game = SnakeGameAI()
while True:
# Get Old state
state_old = agent.get_state(game)
# get move
final_move = agent.get_action(state_old)
# perform move and get new state
reward, done, score = game.play_step(final_move)
state_new = agent.get_state(game)
# train short memory
agent.train_short_memory(
state_old, final_move, reward, state_new, done)
# remember
agent.remember(state_old, final_move, reward, state_new, done)
if done:
# Train long memory,plot result
game.reset()
agent.n_game += 1
agent.train_long_memory()
if(score > reward): # new High score
reward = score
agent.model.save()
print('Game:', agent.n_game, 'Score:', score, 'Record:', record)
plot_scores.append(score)
total_score += score
mean_score = total_score / agent.n_game
plot_mean_scores.append(mean_score)
plot(plot_scores, plot_mean_scores)
if(__name__ == "__main__"):
train()
# TODO: Write a brief paragraph on your thoughts about this implementation.
# Was there anything surprising, interesting, confusing, or clever? Does the code smell at all?
"""
I thought the code was very direct and simple.
It's very interesting to look at the script and dissect the mind of the program of how ti will perform and react.
I think overall, the implementtaion is really well thought out and well written regarding the implementation and view
of reinforcement learning. I'm sure there could be other technical improvements in regards to the coding aspect of it all,
but the code was great overall.
"""
|
py
|
1a58bedf7138a7e0a5edae868e4a47439e0adb4b
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from fcos_core.layers import smooth_l1_loss
from fcos_core.modeling.box_coder import BoxCoder
from fcos_core.modeling.matcher import Matcher
from fcos_core.structures.boxlist_ops import boxlist_iou
from fcos_core.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from fcos_core.modeling.utils import cat
class FastRCNNLossComputation(torch.nn.Module):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
proposal_matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg=False,
classification_loss_type='CE',
num_classes=81,
attribute_on=False,
boundingbox_loss_type='SL1',
cfg=None,
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
super().__init__()
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.attribute_on = attribute_on
self.classification_loss_type = classification_loss_type
if self.classification_loss_type == 'CE':
self._classifier_loss = F.cross_entropy
elif self.classification_loss_type == 'BCE':
from qd.qd_pytorch import BCEWithLogitsNegLoss
self._classifier_loss = BCEWithLogitsNegLoss()
elif self.classification_loss_type.startswith('IBCE'):
param = map(float, self.classification_loss_type[4:].split('_'))
from qd.qd_pytorch import IBCEWithLogitsNegLoss
self._classifier_loss = IBCEWithLogitsNegLoss(*param)
elif self.classification_loss_type == 'MCEB':
from qd.qd_pytorch import MCEBLoss
self._classifier_loss = MCEBLoss()
elif self.classification_loss_type == 'tree':
tree_file = cfg.MODEL.ROI_BOX_HEAD.TREE_0_BKG
from mtorch.softmaxtree_loss import SoftmaxTreeWithLoss
self._classifier_loss = SoftmaxTreeWithLoss(
tree_file,
ignore_label=-1, # this is dummy value since this will not happend
loss_weight=1,
valid_normalization=True,
)
self.copied_fields = ["labels"]
if self.attribute_on:
self.copied_fields.append("attributes")
assert boundingbox_loss_type == 'SL1'
def create_all_bkg_labels(self, num, device):
if self.classification_loss_type in ['CE', 'tree']:
return torch.zeros(num,
dtype=torch.float32,
device=device)
elif self.classification_loss_type in ['BCE'] or \
self.classification_loss_type.startswith('IBCE'):
return torch.zeros((num, self.num_classes),
dtype=torch.float32,
device=device)
else:
raise NotImplementedError(self.classification_loss_type)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields(self.copied_fields)
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
if len(target) == 0:
dummy_bbox = torch.zeros((len(matched_idxs), 4),
dtype=torch.float32, device=matched_idxs.device)
from maskrcnn_benchmark.structures.bounding_box import BoxList
matched_targets = BoxList(dummy_bbox, target.size, target.mode)
matched_targets.add_field('labels', self.create_all_bkg_labels(
len(matched_idxs), matched_idxs.device))
matched_targets.add_field('tightness', torch.zeros(len(matched_idxs),
device=matched_idxs.device))
matched_targets.add_field(
'attributes',
torch.zeros((len(matched_idxs), 1),
device=matched_idxs.device))
else:
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
attributes = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
if self.attribute_on:
attributes_per_image = matched_targets.get_field("attributes")
attributes_per_image = attributes_per_image.to(dtype=torch.int64)
if len(targets_per_image) > 0:
# Label background (below the low threshold)
# attribute 0 is ignored in the loss
attributes_per_image[bg_inds,:] = 0
# Label ignore proposals (between low and high thresholds)
attributes_per_image[ignore_inds,:] = 0
# return attributes
attributes.append(attributes_per_image)
else:
attributes.append([])
#return labels, regression_targets
result = {
'labels': labels,
'regression_targets': regression_targets,
}
if self.attribute_on:
result['attributes'] = attributes
return result
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
prepare_result = self.prepare_targets(proposals, targets)
labels = prepare_result['labels']
regression_targets = prepare_result['regression_targets']
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for i, (labels_per_image, regression_targets_per_image,
proposals_per_image) in enumerate(zip(
labels, regression_targets, proposals
)):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
if self.attribute_on:
# add attributes labels
attributes_per_image = prepare_result['attributes'][i]
proposals_per_image.add_field(
"attributes", attributes_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img,
as_tuple=False).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def forward(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = self._classifier_loss(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0, as_tuple=False).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
attribute_on = cfg.MODEL.ATTRIBUTE_ON
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
classification_loss_type = cfg.MODEL.ROI_BOX_HEAD.CLASSIFICATION_LOSS
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
cfg = cfg
loss_evaluator = FastRCNNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg,
classification_loss_type,
num_classes,
attribute_on=attribute_on,
boundingbox_loss_type=cfg.MODEL.ROI_BOX_HEAD.BOUNDINGBOX_LOSS_TYPE,
cfg=cfg,
)
return loss_evaluator
|
py
|
1a58bf14f278628307ba469c1664385de9dcf958
|
import unittest
import sys
try:
import aula1_resp as aula1
except ImportError:
print('Erro: o arquivo aula1.py não foi encontrado')
sys.exit(1)
MAX_PRIMES = 10000
def primes_sieve(limit):
limitn = limit+1
not_prime = [False] * limitn
primes = []
for i in range(2, limitn):
if not_prime[i]:
continue
for f in range(i*2, limitn, i):
not_prime[f] = True
primes.append(i)
return primes
def fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a+b
return a
def factorial(n):
for i in range(2, n):
n *= i
return n
class TesteAula1(unittest.TestCase):
@unittest.skipIf('is_prime' not in vars(aula1),
'Função "is_prime" não foi encontrada')
def test_is_prime(self):
primes = primes_sieve(MAX_PRIMES)
for i in range(1, MAX_PRIMES):
if aula1.is_prime(i):
self.assertIn(i, primes)
else:
self.assertNotIn(i, primes)
@unittest.skipIf('fibonacci' not in vars(aula1),
'Função "fibonacci" não foi encontrada')
def test_fibonacci(self):
for i in range(0, 30):
self.assertEqual(fibonacci(i), aula1.fibonacci(i))
@unittest.skipIf('factorial' not in vars(aula1),
'Função "factorial" não foi encontrada')
def test_factorial(self):
for i in range(1, 70):
self.assertEqual(factorial(i), aula1.factorial(i))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py
|
1a58bf5255ef456da068e73d7f095a9ce5d4f08c
|
# encoding: utf-8
from __future__ import unicode_literals, absolute_import
import os
import sys
import locale
from itertools import chain
from six import iterkeys, iteritems
from six.moves.configparser import ConfigParser
from .autocomplete import SIMPLE as default_completion, ALL_MODES
class Struct(object):
"""Simple class for instantiating objects we can add arbitrary attributes
to and use for various arbitrary things."""
def getpreferredencoding():
"""Get the user's preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def can_encode(c):
try:
c.encode(getpreferredencoding())
return True
except UnicodeEncodeError:
return False
def supports_box_chars():
"""Check if the encoding supports Unicode box characters."""
return all(map(can_encode, "│─└┘┌┐"))
def get_config_home():
"""Returns the base directory for bpython's configuration files."""
xdg_config_home = os.environ.get("XDG_CONFIG_HOME", "~/.config")
return os.path.join(xdg_config_home, "bpython")
def default_config_path():
"""Returns bpython's default configuration file path."""
return os.path.join(get_config_home(), "config")
def fill_config_with_default_values(config, default_values):
for section in iterkeys(default_values):
if not config.has_section(section):
config.add_section(section)
for (opt, val) in iteritems(default_values[section]):
if not config.has_option(section, opt):
config.set(section, opt, "%s" % (val,))
def loadini(struct, configfile):
"""Loads .ini configuration file and stores its values in struct"""
config_path = os.path.expanduser(configfile)
config = ConfigParser()
defaults = {
"general": {
"arg_spec": True,
"auto_display_list": True,
"autocomplete_mode": default_completion,
"color_scheme": "default",
"complete_magic_methods": True,
"dedent_after": 1,
"default_autoreload": False,
"editor": os.environ.get("VISUAL", os.environ.get("EDITOR", "vi")),
"flush_output": True,
"highlight_show_source": True,
"hist_duplicates": True,
"hist_file": "~/.pythonhist",
"hist_length": 1000,
"paste_time": 0.02,
"pastebin_confirm": True,
"pastebin_expiry": "1week",
"pastebin_helper": "",
"pastebin_url": "https://bpaste.net",
"save_append_py": False,
"single_undo_time": 1.0,
"syntax": True,
"tab_length": 4,
"unicode_box": True,
},
"keyboard": {
"backspace": "C-h",
"beginning_of_line": "C-a",
"clear_line": "C-u",
"clear_screen": "C-l",
"clear_word": "C-w",
"copy_clipboard": "F10",
"cut_to_buffer": "C-k",
"delete": "C-d",
"down_one_line": "C-n",
"edit_config": "F3",
"edit_current_block": "C-x",
"end_of_line": "C-e",
"exit": "",
"external_editor": "F7",
"help": "F1",
"incremental_search": "M-s",
"last_output": "F9",
"left": "C-b",
"pastebin": "F8",
"redo": "C-g",
"reimport": "F6",
"reverse_incremental_search": "M-r",
"right": "C-f",
"save": "C-s",
"search": "C-o",
"show_source": "F2",
"suspend": "C-z",
"toggle_file_watch": "F5",
"transpose_chars": "C-t",
"undo": "C-r",
"up_one_line": "C-p",
"yank_from_buffer": "C-y",
},
"cli": {"suggestion_width": 0.8, "trim_prompts": False,},
"curtsies": {"list_above": False, "right_arrow_completion": True,},
}
default_keys_to_commands = dict(
(value, key) for (key, value) in iteritems(defaults["keyboard"])
)
fill_config_with_default_values(config, defaults)
try:
if not config.read(config_path):
# No config file. If the user has it in the old place then complain
if os.path.isfile(os.path.expanduser("~/.bpython.ini")):
sys.stderr.write(
"Error: It seems that you have a config file at "
"~/.bpython.ini. Please move your config file to "
"%s\n" % default_config_path()
)
sys.exit(1)
except UnicodeDecodeError as e:
sys.stderr.write(
"Error: Unable to parse config file at '{}' due to an "
"encoding issue. Please make sure to fix the encoding "
"of the file or remove it and then try again.\n".format(config_path)
)
sys.exit(1)
def get_key_no_doublebind(command):
default_commands_to_keys = defaults["keyboard"]
requested_key = config.get("keyboard", command)
try:
default_command = default_keys_to_commands[requested_key]
if default_commands_to_keys[default_command] == config.get(
"keyboard", default_command
):
setattr(struct, "%s_key" % default_command, "")
except KeyError:
pass
return requested_key
struct.config_path = config_path
struct.dedent_after = config.getint("general", "dedent_after")
struct.tab_length = config.getint("general", "tab_length")
struct.auto_display_list = config.getboolean("general", "auto_display_list")
struct.syntax = config.getboolean("general", "syntax")
struct.arg_spec = config.getboolean("general", "arg_spec")
struct.paste_time = config.getfloat("general", "paste_time")
struct.single_undo_time = config.getfloat("general", "single_undo_time")
struct.highlight_show_source = config.getboolean(
"general", "highlight_show_source"
)
struct.hist_file = config.get("general", "hist_file")
struct.editor = config.get("general", "editor")
struct.hist_length = config.getint("general", "hist_length")
struct.hist_duplicates = config.getboolean("general", "hist_duplicates")
struct.flush_output = config.getboolean("general", "flush_output")
struct.default_autoreload = config.getboolean(
"general", "default_autoreload"
)
struct.pastebin_key = get_key_no_doublebind("pastebin")
struct.copy_clipboard_key = get_key_no_doublebind("copy_clipboard")
struct.save_key = get_key_no_doublebind("save")
struct.search_key = get_key_no_doublebind("search")
struct.show_source_key = get_key_no_doublebind("show_source")
struct.suspend_key = get_key_no_doublebind("suspend")
struct.toggle_file_watch_key = get_key_no_doublebind("toggle_file_watch")
struct.undo_key = get_key_no_doublebind("undo")
struct.redo_key = get_key_no_doublebind("redo")
struct.reimport_key = get_key_no_doublebind("reimport")
struct.reverse_incremental_search_key = get_key_no_doublebind(
"reverse_incremental_search"
)
struct.incremental_search_key = get_key_no_doublebind("incremental_search")
struct.up_one_line_key = get_key_no_doublebind("up_one_line")
struct.down_one_line_key = get_key_no_doublebind("down_one_line")
struct.cut_to_buffer_key = get_key_no_doublebind("cut_to_buffer")
struct.yank_from_buffer_key = get_key_no_doublebind("yank_from_buffer")
struct.clear_word_key = get_key_no_doublebind("clear_word")
struct.backspace_key = get_key_no_doublebind("backspace")
struct.clear_line_key = get_key_no_doublebind("clear_line")
struct.clear_screen_key = get_key_no_doublebind("clear_screen")
struct.delete_key = get_key_no_doublebind("delete")
struct.left_key = get_key_no_doublebind("left")
struct.right_key = get_key_no_doublebind("right")
struct.end_of_line_key = get_key_no_doublebind("end_of_line")
struct.beginning_of_line_key = get_key_no_doublebind("beginning_of_line")
struct.transpose_chars_key = get_key_no_doublebind("transpose_chars")
struct.exit_key = get_key_no_doublebind("exit")
struct.last_output_key = get_key_no_doublebind("last_output")
struct.edit_config_key = get_key_no_doublebind("edit_config")
struct.edit_current_block_key = get_key_no_doublebind("edit_current_block")
struct.external_editor_key = get_key_no_doublebind("external_editor")
struct.help_key = get_key_no_doublebind("help")
struct.pastebin_confirm = config.getboolean("general", "pastebin_confirm")
struct.pastebin_url = config.get("general", "pastebin_url")
struct.pastebin_expiry = config.get("general", "pastebin_expiry")
struct.pastebin_helper = config.get("general", "pastebin_helper")
struct.cli_suggestion_width = config.getfloat("cli", "suggestion_width")
struct.cli_trim_prompts = config.getboolean("cli", "trim_prompts")
struct.complete_magic_methods = config.getboolean(
"general", "complete_magic_methods"
)
struct.autocomplete_mode = config.get("general", "autocomplete_mode")
struct.save_append_py = config.getboolean("general", "save_append_py")
struct.curtsies_list_above = config.getboolean("curtsies", "list_above")
struct.curtsies_right_arrow_completion = config.getboolean(
"curtsies", "right_arrow_completion"
)
color_scheme_name = config.get("general", "color_scheme")
default_colors = {
"keyword": "y",
"name": "c",
"comment": "b",
"string": "m",
"error": "r",
"number": "G",
"operator": "Y",
"punctuation": "y",
"token": "C",
"background": "d",
"output": "w",
"main": "c",
"paren": "R",
"prompt": "c",
"prompt_more": "g",
"right_arrow_suggestion": "K",
}
if color_scheme_name == "default":
struct.color_scheme = default_colors
else:
struct.color_scheme = dict()
theme_filename = color_scheme_name + ".theme"
path = os.path.expanduser(
os.path.join(get_config_home(), theme_filename)
)
try:
load_theme(struct, path, struct.color_scheme, default_colors)
except EnvironmentError:
sys.stderr.write(
"Could not load theme '%s'.\n" % (color_scheme_name,)
)
sys.exit(1)
# expand path of history file
struct.hist_file = os.path.expanduser(struct.hist_file)
# verify completion mode
if struct.autocomplete_mode not in ALL_MODES:
struct.autocomplete_mode = default_completion
# set box drawing characters
if config.getboolean("general", "unicode_box") and supports_box_chars():
struct.left_border = "│"
struct.right_border = "│"
struct.top_border = "─"
struct.bottom_border = "─"
struct.left_bottom_corner = "└"
struct.right_bottom_corner = "┘"
struct.left_top_corner = "┌"
struct.right_top_corner = "┐"
else:
struct.left_border = "|"
struct.right_border = "|"
struct.top_border = "-"
struct.bottom_border = "-"
struct.left_bottom_corner = "+"
struct.right_bottom_corner = "+"
struct.left_top_corner = "+"
struct.right_top_corner = "+"
def load_theme(struct, path, colors, default_colors):
theme = ConfigParser()
with open(path, "r") as f:
theme.readfp(f)
for k, v in chain(theme.items("syntax"), theme.items("interface")):
if theme.has_option("syntax", k):
colors[k] = theme.get("syntax", k)
else:
colors[k] = theme.get("interface", k)
# Check against default theme to see if all values are defined
for k, v in iteritems(default_colors):
if k not in colors:
colors[k] = v
|
py
|
1a58c031fc8c5ee8ba873c2c70c11b732b9c2afc
|
#!/usr/bin/env python
# This will try to import setuptools. If not here, it will reach for the embedded
# ez_setup (or the ez_setup package). If none, it fails with a message
import sys
from codecs import open
try:
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
except ImportError:
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
raise ImportError('MoviePy could not be installed, probably because'
' neither setuptools nor ez_setup are installed on this computer.'
'\nInstall ez_setup ([sudo] pip install ez_setup) and try again.')
class PyTest(TestCommand):
"""Handle test execution from setup."""
user_options = [('pytest-args=', 'a', "Arguments to pass into pytest")]
def initialize_options(self):
"""Initialize the PyTest options."""
TestCommand.initialize_options(self)
self.pytest_args = ""
def finalize_options(self):
"""Finalize the PyTest options."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run the PyTest testing suite."""
try:
import pytest
except ImportError:
raise ImportError('Running tests requires additional dependencies.'
'\nPlease run (pip install moviepy[test])')
errno = pytest.main(self.pytest_args.split(" "))
sys.exit(errno)
cmdclass = {'test': PyTest} # Define custom commands.
if 'build_docs' in sys.argv:
try:
from sphinx.setup_command import BuildDoc
except ImportError:
raise ImportError('Running the documenation builds has additional'
' dependencies. Please run (pip install moviepy[docs])')
cmdclass['build_docs'] = BuildDoc
__version__ = None # Explicitly set version to quieten static code checkers.
exec(open('moviepy/version.py').read()) # loads __version__
# Define the requirements for specific execution needs.
requires = [
'decorator>=4.0.2,<5.0',
"imageio>=2.5,<3.0; python_version>='3.4'",
"imageio>=2.0,<2.5; python_version<'3.4'",
"imageio_ffmpeg>=0.2.0; python_version>='3.4'",
'tqdm>=4.11.2,<5.0',
'numpy',
'requests>=2.8.1,<3.0',
'proglog<=1.0.0'
]
optional_reqs = [
"opencv-python>=3.0,<4.0; python_version!='2.7'",
"scikit-image>=0.13.0,<1.0; python_version>='3.4'",
"scikit-learn; python_version>='3.4'",
"scipy>=0.19.0,<1.0; python_version!='3.3'",
"matplotlib>=2.0.0,<3.0; python_version>='3.4'",
"youtube_dl"
]
doc_reqs = [
"pygame>=1.9.3,<2.0; python_version!='3.3'",
'numpydoc>=0.6.0,<1.0',
'sphinx_rtd_theme>=0.1.10b0,<1.0',
'Sphinx>=1.5.2,<2.0',
]
test_reqs = [
'coverage<5.0',
'coveralls>=1.1,<2.0',
'pytest-cov>=2.5.1,<3.0',
'pytest>=3.0.0,<4.0',
'requests>=2.8.1,<3.0'
]
extra_reqs = {
"optional": optional_reqs,
"doc": doc_reqs,
"test": test_reqs
}
# Load the README.
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='moviepy',
version=__version__,
author='Zulko 2017',
description='Video editing with Python',
long_description=readme,
url='https://zulko.github.io/moviepy/',
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Video :: Conversion',
],
keywords='video editing audio compositing ffmpeg',
packages=find_packages(exclude=['docs', 'tests']),
cmdclass=cmdclass,
command_options={
'build_docs': {
'build_dir': ('setup.py', './docs/build'),
'config_dir': ('setup.py', './docs'),
'version': ('setup.py', __version__.rsplit('.', 2)[0]),
'release': ('setup.py', __version__)}},
tests_require=test_reqs,
install_requires=requires,
extras_require=extra_reqs,
)
|
py
|
1a58c1d0cd9e9f298798998aa8632bd18ab5d993
|
import os
import random
from collections import namedtuple
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
import h5py
from lilanet.datasets.transforms import Compose, RandomHorizontalFlip, Normalize
class DENSE(data.Dataset):
"""`DENSE LiDAR`_ Dataset.
Args:
root (string): Root directory of the ``lidar_2d`` and ``ImageSet`` folder.
split (string, optional): Select the split to use, ``train``, ``val`` or ``all``
transform (callable, optional): A function/transform that takes in distance, reflectivity
and target tensors and returns a transformed version.
"""
#TODO: Bu class kismina bir bak
Class = namedtuple('Class', ['name', 'id', 'color'])
classes = [
Class('unknown', 0, (0, 0, 0)),
Class('car', 1, (0, 0, 142)),
Class('pedestrian', 2, (220, 20, 60)),
Class('cyclist', 3, (119, 11, 32)),
]
def __init__(self, root, split='train', transform=None):
self.root = os.path.expanduser(root)
self.lidar_path = os.path.join(self.root, 'lidar_2d')
self.split = os.path.join(self.root, '{}_01'.format(split))
self.transform = transform
self.lidar = []
if split not in ['train', 'val', 'all']:
raise ValueError('Invalid split! Use split="train", split="val" or split="all"')
self.lidar = [os.path.join(r,file) for r,d,f in os.walk(self.split) for file in f]
def __getitem__(self, index):
with h5py.File(self.lidar[index], "r", driver='core') as hdf5:
# for channel in self.channels:
distance_1 = hdf5.get('distance_m_1')[()]
reflectivity_1 = hdf5.get('intensity_1')[()]
label_1 = hdf5.get('labels_1')[()]
#Label transformation is necessary to have contiguous labeling
label_dict= {0:0, 100:1, 101:2, 102:3}
label_1 = np.vectorize(label_dict.get)(label_1)
distance = torch.as_tensor(distance_1.astype(np.float32, copy=False)).contiguous()
reflectivity = torch.as_tensor(reflectivity_1.astype(np.float32, copy=False)).contiguous()
label = torch.as_tensor(label_1.astype(np.float32, copy=False)).contiguous()
# distance = torch.as_tensor(distance_1.astype(np.float32, copy=False))
# reflectivity = torch.as_tensor(reflectivity_1.astype(np.float32, copy=False))
# label = torch.as_tensor(label_1.astype(np.float32, copy=False))
# print("label: '{}'".format(label))
if self.transform:
distance, reflectivity, label = self.transform(distance, reflectivity, label)
return distance, reflectivity, label
def __len__(self):
return len(self.lidar)
@staticmethod
def num_classes():
return len(DENSE.classes)
@staticmethod
def mean():
return [0.21, 12.12]
@staticmethod
def std():
return [0.16, 12.32]
@staticmethod
def class_weights():
return torch.tensor([1 / 15.0, 1.0, 10.0, 10.0])
@staticmethod
def get_colormap():
cmap = torch.zeros([256, 3], dtype=torch.uint8)
for cls in DENSE.classes:
cmap[cls.id, :] = torch.tensor(cls.color, dtype=torch.uint8)
return cmap
if __name__ == '__main__':
import matplotlib.pyplot as plt
joint_transforms = Compose([
RandomHorizontalFlip(),
Normalize(mean=DENSE.mean(), std=DENSE.std())
])
def _normalize(x):
return (x - x.min()) / (x.max() - x.min())
def visualize_seg(label_map, one_hot=False):
if one_hot:
label_map = np.argmax(label_map, axis=-1)
out = np.zeros((label_map.shape[0], label_map.shape[1], 3))
for l in range(1, DENSE.num_classes()):
mask = label_map == l
out[mask, 0] = np.array(DENSE.classes[l].color[1])
out[mask, 1] = np.array(DENSE.classes[l].color[0])
out[mask, 2] = np.array(DENSE.classes[l].color[2])
return out
dataset = DENSE('../../data/DENSE', transform=joint_transforms)
distance, reflectivity, label = random.choice(dataset)
print('Distance size: ', distance.size())
print('Reflectivity size: ', reflectivity.size())
print('Label size: ', label.size())
distance_map = Image.fromarray((255 * _normalize(distance.numpy())).astype(np.uint8))
reflectivity_map = Image.fromarray((255 * _normalize(reflectivity.numpy())).astype(np.uint8))
label_map = Image.fromarray((255 * visualize_seg(label.numpy())).astype(np.uint8))
blend_map = Image.blend(distance_map.convert('RGBA'), label_map.convert('RGBA'), alpha=0.4)
plt.figure(figsize=(10, 5))
plt.subplot(221)
plt.title("Distance")
plt.imshow(distance_map)
plt.subplot(222)
plt.title("Reflectivity")
plt.imshow(reflectivity_map)
plt.subplot(223)
plt.title("Label")
plt.imshow(label_map)
plt.subplot(224)
plt.title("Result")
plt.imshow(blend_map)
plt.show()
|
py
|
1a58c255bfc6e3bfca9078c6b8cf714c85295b39
|
import enum
class TransactionType(enum.Enum):
OPTIONS = 'options'
FOREX = 'forex'
DEPOSIT_WITHDRAW = 'deposit-withdraw'
BUY_SELL = 'buy-sell'
DIVIDEND = 'dividend'
INTEREST = 'interest'
FOREIGN_TAX = 'foreign-tax'
class TransactionsDetailsType(enum.Enum):
DIVIDEND = 'DIVIDEND'
BUY = 'BUY'
SELL = 'SELL'
WITHDRAW = 'WITHDRAW'
DEPOSIT = 'DEPOSIT'
UNKNOWN = 'UNKNOWN'
class ChannelType(enum.Enum):
ACCOUNTS = 'accounts'
QUOTES = 'quotes'
ORDERDEPTHS = 'orderdepths'
TRADES = 'trades'
BROKERTRADESUMMARY = 'brokertradesummary'
POSITIONS = 'positions'
ORDERS = 'orders'
DEALS = 'deals'
class TimePeriod(enum.Enum):
TODAY = 'TODAY'
ONE_WEEK = 'ONE_WEEK'
ONE_MONTH = 'ONE_MONTH'
THREE_MONTHS = 'THREE_MONTHS'
THIS_YEAR = 'THIS_YEAR'
ONE_YEAR = 'ONE_YEAR'
FIVE_YEARS = 'FIVE_YEARS'
class ListType(enum.Enum):
HIGHEST_RATED_FUNDS = 'HIGHEST_RATED_FUNDS'
LOWEST_FEE_INDEX_FUNDS = 'LOWEST_FEE_INDEX_FUNDS'
BEST_DEVELOPMENT_FUNDS_LAST_THREE_MONTHS = 'BEST_DEVELOPMENT_FUNDS_LAST_THREE_MONTHS'
MOST_OWNED_FUNDS = 'MOST_OWNED_FUNDS'
class InstrumentType(enum.Enum):
STOCK = 'stock'
FUND = 'fund'
BOND = 'bond'
OPTION = 'option'
FUTURE_FORWARD = 'future_forward'
CERTIFICATE = 'certificate'
WARRANT = 'warrant'
EXCHANGE_TRADED_FUND = 'exchange_traded_fund'
INDEX = 'index'
PREMIUM_BOND = 'premium_bond'
SUBSCRIPTION_OPTION = 'subscription_option'
EQUITY_LINKED_BOND = 'equity_linked_bond'
CONVERTIBLE = 'convertible'
ANY = ''
class OrderType(enum.Enum):
BUY = 'BUY'
SELL = 'SELL'
class HttpMethod(enum.Enum):
POST = 1
GET = 2
PUT = 3
DELETE = 4
class Route(enum.Enum):
ACCOUNT_OVERVIEW_PATH = '/_mobile/account/{}/overview'
ACCOUNTS_POSITIONS_PATH = '/_cqbe/ff/overview/positions'
AUTHENTICATION_PATH = '/_api/authentication/sessions/usercredentials'
CHARTDATA_PATH = '/_mobile/chart/orderbook/{}?timePeriod={}'
DEALS_AND_ORDERS_PATH = '/_mobile/account/dealsandorders'
INSIGHTS_PATH = '/_cqbe/insights/?timePeriod={}&accountIds={}'
INSPIRATION_LIST_PATH = '/_mobile/marketing/inspirationlist/{}'
INSTRUMENT_PATH = '/_mobile/market/{}/{}'
INSTRUMENT_SEARCH_PATH = '/_mobile/market/search/{}?query={}'
MONTHLY_SAVINGS_CREATE_PATH = '/_api/transfer/monthly-savings/{}'
MONTHLY_SAVINGS_PATH = '/_mobile/transfer/monthly-savings/{}'
MONTHLY_SAVINGS_PAUSE_PATH = '/_api/transfer/monthly-savings/{}/{}/pause'
MONTHLY_SAVINGS_REMOVE_PATH = '/_api/transfer/monthly-savings/{}/{}/'
MONTHLY_SAVINGS_RESUME_PATH = '/_api/transfer/monthly-savings/{}/{}/resume'
NOTE_PATH = '/_api/contract-notes/documents/{}/{}/note.pdf'
ORDER_DELETE_PATH = '/_api/order?accountId={}&orderId={}'
ORDER_GET_PATH = '/_mobile/order/{}?accountId={}&orderId={}'
ORDER_PLACE_PATH = '/_api/order'
ORDER_PLACE_PATH_BUY_FUND = '/_api/fund-guide/fund-order-page/buy'
ORDER_PLACE_PATH_SELL_FUND = '/_api/fund-guide/fund-order-page/sell'
ORDER_EDIT_PATH = '/_api/order/{}/{}'
ORDERBOOK_LIST_PATH = '/_mobile/market/orderbooklist/{}'
ORDERBOOK_PATH = '/_mobile/order/{}?orderbookId={}'
OVERVIEW_PATH = '/_mobile/account/overview'
POSITIONS_PATH = '/_mobile/account/positions'
TOTP_PATH = '/_api/authentication/sessions/totp'
TRANSACTIONS_PATH = '/_mobile/account/transactions/{}'
TRANSACTIONS_DETAILS_PATH = '/_api/transactions'
WATCHLISTS_ADD_DELETE_PATH = '/_api/usercontent/watchlist/{}/orderbooks/{}'
WATCHLISTS_PATH = '/_mobile/usercontent/watchlist'
|
py
|
1a58c2bd306947b92b12679c4ea4b3c91b52eb2f
|
import pytest
import torch
from src.models.main import TrainOREvaluate
from src.models.model import MyAwesomeModel
def test_weight_change():
init_weights, step_weights = TrainOREvaluate(single_step=True).weights
assert not torch.all(torch.eq(init_weights, step_weights))
def test_forward_raise():
with pytest.raises(ValueError):
model = MyAwesomeModel()
model.forward(torch.rand(1, 1, 28, 27))
|
py
|
1a58c2d35ae959f5733ae0fb04be3a809fdd917d
|
from mapping.tridiag.get_tridiag_solver import get_tridiag, get_tridiag_from_diag, get_tridiag_from_special_sparse
|
py
|
1a58c39346d554f4e6e84ff4df69d422c4b75c7c
|
# Generated by Django 3.0.6 on 2020-05-15 00:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vi_lomas_changes', '0014_auto_20200506_0230'),
]
operations = [
migrations.RemoveField(
model_name='raster',
name='extent_geom',
),
]
|
py
|
1a58c4e16afd1515875d955fb455ecdfa4926a39
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j6997cv93d6adl5%6d274b#^je8@ut4q0dhyd_x9gx-gbo@q@-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'game.apps.GameConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'game1',
'USER': 'djangouser',
'PASSWORD': 'Password.New20',
'HOST': 'localhost',
'PORT': '3306',
'TEST': {
'NAME': 'test_game1',
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
py
|
1a58c6d027d9cc75605bf8bfb1ce3abfa3d3995b
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
# pylint: disable=line-too-long, too-many-locals, too-many-statements
def load_command_table(self, _):
from ._client_factory import (
cf_alert_rules, cf_metrics, cf_metric_def, cf_alert_rule_incidents, cf_log_profiles, cf_autoscale,
cf_diagnostics, cf_activity_log, cf_action_groups, cf_activity_log_alerts, cf_event_categories)
from ._exception_handler import monitor_exception_handler, missing_resource_handler
from .transformers import (action_group_list_table)
action_group_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.action_groups_operations#ActionGroupsOperations.{}',
client_factory=cf_action_groups)
action_group_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.action_groups#{}',
client_factory=cf_action_groups)
activity_log_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.activity_log#{}',
client_factory=cf_activity_log)
activity_log_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.activity_log_alerts_operations#ActivityLogAlertsOperations.{}',
client_factory=cf_activity_log_alerts)
activity_log_alerts_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.activity_log_alerts#{}',
client_factory=cf_activity_log_alerts)
alert_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.alert_rules_operations#AlertRulesOperations.{}',
client_factory=cf_alert_rules)
alert_rule_incidents_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.alert_rule_incidents_operations#AlertRuleIncidentsOperations.{}',
client_factory=cf_alert_rule_incidents)
autoscale_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.autoscale_settings_operations#AutoscaleSettingsOperations.{}',
client_factory=cf_autoscale)
autoscale_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.autoscale_settings#{}',
client_factory=cf_autoscale)
diagnostics_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.diagnostic_settings_operations#DiagnosticSettingsOperations.{}',
client_factory=cf_diagnostics)
diagnostics_categories_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.diagnostic_settings_category_operations#DiagnosticSettingsCategoryOperations.{}',
client_factory=cf_diagnostics)
diagnostics_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.diagnostics_settings#{}',
client_factory=cf_diagnostics)
log_profiles_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.log_profiles_operations#LogProfilesOperations.{}',
client_factory=cf_log_profiles)
metric_operations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.metrics_operations#MetricsOperations.{}',
client_factory=cf_metrics)
alert_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.metric_alert#{}',
client_factory=cf_alert_rules)
metric_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.metric_definitions_operations#MetricDefinitionsOperations.{}',
client_factory=cf_metric_def)
with self.command_group('monitor action-group', action_group_sdk, custom_command_type=action_group_custom) as g:
g.command('show', 'get', table_transformer=action_group_list_table)
g.command('create', 'create_or_update', table_transformer=action_group_list_table)
g.command('delete', 'delete')
g.command('enable-receiver', 'enable_receiver', table_transformer=action_group_list_table, exception_handler=monitor_exception_handler)
g.custom_command('list', 'list_action_groups', table_transformer=action_group_list_table)
g.generic_update_command('update', custom_func_name='update_action_groups', setter_arg_name='action_group',
table_transformer=action_group_list_table, exception_handler=monitor_exception_handler)
with self.command_group('monitor activity-log', activity_log_custom) as g:
g.command('list', 'list_activity_log')
g.command('list-categories', 'list', operations_tmpl='azure.mgmt.monitor.operations.event_categories_operations#EventCategoriesOperations.{}', client_factory=cf_event_categories)
with self.command_group('monitor activity-log alert', activity_log_alerts_sdk, custom_command_type=activity_log_alerts_custom) as g:
g.custom_command('list', 'list_activity_logs_alert')
g.custom_command('create', 'create', exception_handler=monitor_exception_handler)
g.command('show', 'get', exception_handler=missing_resource_handler)
g.command('delete', 'delete', exception_handler=missing_resource_handler)
g.generic_update_command('update', custom_func_name='update', setter_arg_name='activity_log_alert', exception_handler=monitor_exception_handler)
g.custom_command('action-group add', 'add_action_group', exception_handler=monitor_exception_handler)
g.custom_command('action-group remove', 'remove_action_group', exception_handler=monitor_exception_handler)
g.custom_command('scope add', 'add_scope', exception_handler=monitor_exception_handler)
g.custom_command('scope remove', 'remove_scope', exception_handler=monitor_exception_handler)
with self.command_group('monitor alert', alert_sdk, custom_command_type=alert_custom) as g:
g.custom_command('create', 'create_metric_rule')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list_by_resource_group')
g.command('show-incident', 'get', command_type=alert_rule_incidents_sdk)
g.command('list-incidents', 'list_by_alert_rule', command_type=alert_rule_incidents_sdk)
g.generic_update_command('update', custom_func_name='update_metric_rule', exception_handler=monitor_exception_handler)
with self.command_group('monitor autoscale-settings', autoscale_sdk, custom_command_type=autoscale_custom) as g:
g.command('create', 'create_or_update')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list_by_resource_group')
g.custom_command('get-parameters-template', 'scaffold_autoscale_settings_parameters')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor diagnostic-settings', diagnostics_sdk, custom_command_type=diagnostics_custom) as g:
from .validators import validate_diagnostic_settings
g.custom_command('create', 'create_diagnostics_settings', validator=validate_diagnostic_settings)
g.command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor diagnostic-settings categories', diagnostics_categories_sdk) as g:
g.command('show', 'get')
g.command('list', 'list')
with self.command_group('monitor log-profiles', log_profiles_sdk) as g:
g.command('create', 'create_or_update')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor metrics') as g:
from .transformers import metrics_table, metrics_definitions_table
g.command('list', 'list', command_type=metric_operations_sdk, table_transformer=metrics_table)
g.command('list-definitions', 'list', command_type=metric_definitions_sdk, table_transformer=metrics_definitions_table)
|
py
|
1a58c7e12820daf233fc3452807d1807e77a6490
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nlp.nhnet.decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.nhnet import configs
from official.nlp.nhnet import decoder
from official.nlp.nhnet import utils
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
return {
"key":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32),
"value":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32)
}
class DecoderTest(tf.test.TestCase):
def setUp(self):
super(DecoderTest, self).setUp()
self._config = utils.get_test_params()
def test_transformer_decoder(self):
decoder_block = decoder.TransformerDecoder(
num_hidden_layers=self._config.num_hidden_layers,
hidden_size=self._config.hidden_size,
num_attention_heads=self._config.num_attention_heads,
intermediate_size=self._config.intermediate_size,
intermediate_activation=self._config.hidden_act,
hidden_dropout_prob=self._config.hidden_dropout_prob,
attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,
initializer_range=self._config.initializer_range)
decoder_block.build(None)
self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers)
def test_decoder_block_with_cache(self):
decoder_block = decoder.TransformerDecoderBlock(
hidden_size=self._config.hidden_size,
num_attention_heads=self._config.num_attention_heads,
intermediate_size=self._config.intermediate_size,
intermediate_activation=self._config.hidden_act,
hidden_dropout_prob=self._config.hidden_dropout_prob,
attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,
initializer_range=self._config.initializer_range)
# Forward path.
dummy_tensor = tf.zeros([2, 4, self._config.hidden_size], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask]
cache = _create_cache(
2, 0, self._config.num_attention_heads,
self._config.hidden_size // self._config.num_attention_heads)
output, cache = decoder_block(inputs, cache)
self.assertEqual(output.shape, (2, 4, self._config.hidden_size))
self.assertEqual(cache["value"].shape, (2, 4, 2, 8))
def test_bert_decoder(self):
seq_length = 10
encoder_input_ids = tf.keras.layers.Input(
shape=(seq_length,), name="encoder_input_ids", dtype=tf.int32)
target_ids = tf.keras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=tf.int32)
encoder_outputs = tf.keras.layers.Input(
shape=(seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=tf.float32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids": np.zeros((2, 10), dtype=np.int32),
"target_ids": np.zeros((2, 10), dtype=np.int32),
"all_encoder_outputs": np.zeros((2, 10, 16), dtype=np.float32),
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, 10, 16))
def test_multi_doc_decoder(self):
self._config = utils.get_test_params(cls=configs.NHNetConfig)
seq_length = 10
num_docs = 5
encoder_input_ids = tf.keras.layers.Input(
shape=(num_docs, seq_length), name="encoder_input_ids", dtype=tf.int32)
target_ids = tf.keras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=tf.int32)
encoder_outputs = tf.keras.layers.Input(
shape=(num_docs, seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=tf.float32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
doc_attention_probs = tf.keras.layers.Input(
shape=(self._config.num_decoder_attn_heads, seq_length, num_docs),
name="doc_attention_probs",
dtype=tf.float32)
cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids":
np.zeros((2, num_docs, seq_length), dtype=np.int32),
"target_ids":
np.zeros((2, seq_length), dtype=np.int32),
"all_encoder_outputs":
np.zeros((2, num_docs, seq_length, 16), dtype=np.float32),
"doc_attention_probs":
np.zeros(
(2, self._config.num_decoder_attn_heads, seq_length, num_docs),
dtype=np.float32)
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, seq_length, 16))
if __name__ == "__main__":
tf.test.main()
|
py
|
1a58c7fb3f8517e549618f6da24ab0fb4c3e46f3
|
import numpy as np
import cv2
from os.path import *
import math
# trs, let's assume width is always wider than height
def video_to_npy(infile, outfile=None, width=None, height=None, squarecrop=None, fps=None, mode='rgb', maxlength=None, use_cache=False):
global vcache
if use_cache and outfile is not None and 'vcache' in globals():
if outfile in vcache: return vcache[outfile]
else:
vcache = dict()
# has this video already been saved before?
if outfile and isfile(outfile):
frames = np.load(outfile)
if use_cache: vcache[outfile] = frames
# just return this preloaded video
return frames
print('reading fresh video from %s' % infile)
vidcap = cv2.VideoCapture(infile)
success, image = vidcap.read()
frames = []
count = 0
if not success:
raise ValueError('Could not read the video file!')
while success:
frames.append( image[...,::-1] if mode == 'rgb' else image )
count += 1
success,image = vidcap.read()
if fps:
span = int(vidcap.get(cv2.CAP_PROP_FPS) / fps)
frames = frames[0::span]
if width or height:
width = width if width else int(height / frames[0].shape[0] * frames[0].shape[1])
height = height if height else int(width / frames[0].shape[1] * frames[0].shape[0])
frames = [ cv2.resize(frame, (width, height)) for frame in frames ]
if squarecrop:
tl = int((width/2)-(height/2))
# note that x,y is the wrong way around i.e. it's
# F x Y x X x C
frames = [ frame[ 0:height, tl:(tl+height)] for frame in frames ]
# trs-renamed this from "cropat" as it's a more intuative name
if maxlength:
frames = frames[0:maxlength*fps]
frames = np.array(frames)
if outfile:
np.save(outfile, frames)
return frames
def resize_video(video, video_size=(100,100)):
"""
Resize video content
"""
width, height = video_size
width = width if width else int(height / video[0].shape[0] * video[0].shape[1])
height = height if height else int(width / video[0].shape[1] * video[0].shape[0])
video = np.array([ cv2.resize(frame, (width, height)) for frame in video ])
return video
def dense_optical_flow(frame1, frame2):
f1 = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
f2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
return cv2.calcOpticalFlowFarneback(f1, f2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
def flow_to_hsv(frame1, flow):
hsvImg = np.zeros_like(frame1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsvImg[..., 0] = 0.5 * ang * 180 / np.pi
hsvImg[..., 1] = 255
hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
def naive_stabilization(f):
vec = np.average(f,axis=(0,1))
mask = f==0
f = f-vec
f[mask]=0
return f
def flow_to_polar(f):
return cv2.cartToPolar(f[..., 0], f[..., 1])
|
py
|
1a58c87e650f1d7c4f9f2253b068cb35d8eac22e
|
#!/usr/bin/env python
# Fit proper motion and parallax using ra/dec/mjd data
# Most of this code was taken from here:
# https://github.com/ctheissen/WISE_Parallaxes/blob/master/WISE_Parallax.py
import numpy as np
from astropy.table import Table, vstack, join
import matplotlib.pyplot as plt
from astropy import units as u
from scipy.optimize import curve_fit, minimize
from astropy.time import Time
import astropy.coordinates as coords
from dlnpyutils import utils as dln, coords as dcoords
# Set some constants
d2a = 3600.
d2ma = 3600000.
d2y = 1/365.25
def astrometryfunc(x, Delta1, Delta2, PMra, PMdec, pi):
""" Compute proper motion and parallax model for a set of ra/dec/mjd values."""
# x: input list of central RA and DEC positions and array of MJDs
# Delta1: initial dRA position
# Delta2: initial dDEC position
# PMra: proper motion in RA (arcsec/yr)
# PMdec: proper motion in DEC (arcsec/yr)
# pi: parallax (arcsec)
ra0, dec0, mjds = x
n = len(mjds)
years = (mjds - mjds[0])*d2y
ras = np.zeros(n,np.float64)+ra0
decs = np.zeros(n,np.float64)+dec0
bary = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
# Parallax factors
Fac1 = (bary.x * np.sin(ras*np.pi/180.) - bary.y * np.cos(ras*np.pi/180.) )
Fac2 = bary.x * np.cos(ras*np.pi/180.) * np.sin(decs*np.pi/180.) + \
bary.y * np.sin(ras*np.pi/180.) * np.sin(decs*np.pi/180.) - \
bary.z * np.cos(decs*np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
return np.concatenate( [RAsend, DECsend]).flatten()
def fit(cat):
""" Fit proper motion and parallax to ra/dec/mjd data in a table."""
mjd = cat['mjd']
ra = cat['ra']
raerr = cat['raerr']
dec = cat['dec']
decerr = cat['decerr']
# Compute relative positions
cenra = np.mean(ra)
cendec = np.mean(dec)
lon,lat = dcoords.rotsphcen(ra,dec,cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Fit proper motion and parallax
pars, cov = curve_fit(astrometryfunc, [ra, dec, mjd] ,
np.concatenate( [lon,lat] ).flatten(),
sigma=np.concatenate( [ raerr, decerr ] ).flatten() )
return pars,cov
def plotfit(cat,pars,cov,savefig=None):
""" Plot a figure of the data and the proper motion/parallax fit."""
plt.rcParams.update({'font.size': 12})
# Compute relative positions
cenra = np.mean(cat['ra'])
cendec = np.mean(cat['dec'])
lon,lat = dcoords.rotsphcen(cat['ra'],cat['dec'],cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Array of MJDs for model curve
mjd = np.linspace(np.min(cat['mjd']),np.max(cat['mjd']),100)
out = astrometryfunc([cenra,cendec,mjd],pars[0],pars[1],pars[2],pars[3],pars[4])
ll = out[0:100]
bb = out[100:]
# Plot the model and data
plt.plot(ll,bb)
plt.errorbar(lon,lat,xerr=cat['raerr'],yerr=cat['decerr'],fmt='o',color='black',
markersize=5,ecolor='lightgray',elinewidth=2,linestyle='none',capsize=0)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
xr = dln.minmax(np.concatenate((lon,ll)))
xr = [xr[0]-0.05*dln.valrange(xr),xr[1]+0.05*dln.valrange(xr)]
yr = dln.minmax(np.concatenate((lat,bb)))
yr = [yr[0]-0.05*dln.valrange(yr),yr[1]+0.05*dln.valrange(yr)]
plt.xlim(xr)
plt.ylim(yr)
perr = np.sqrt(np.diag(cov))
plt.annotate(r'$\mu_\alpha$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[2]*1e3,perr[2]*1e3) + '\n' +
r'$\mu_\delta$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[3]*1e3,perr[3]*1e3) + '\n' +
r'$\pi$ = %5.3f $\pm$ %5.3f mas' % (pars[4]*1e3,perr[4]*1e3),
xy=(xr[0]+0.05*dln.valrange(xr),yr[1]-0.20*dln.valrange(yr)),ha='left')
if savefig is not None:
plt.savefig(savefig)
|
py
|
1a58c8a746301b8e814357d026abc32a704c0eb2
|
# Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
if self:
return "{}".format(self.val)
else:
return None
class Solution(object):
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
fast, slow = head, head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
if fast is slow:
fast = head
while fast is not slow:
fast, slow = fast.next, slow.next
return fast
return None
|
py
|
1a58c8e0eedff8bed9d5383ceb7af7b9fdd5b250
|
from sqlbag import S
from schemainspect import get_inspector
CREATE = """
DROP SCHEMA IF EXISTS it CASCADE;
CREATE SCHEMA it;
CREATE FUNCTION it.key_func(jsonb) RETURNS int AS $$
SELECT jsonb_array_length($1);
$$ LANGUAGE SQL IMMUTABLE;
CREATE FUNCTION it.part_func(jsonb) RETURNS boolean AS $$
SELECT jsonb_typeof($1) = 'array';
$$ LANGUAGE SQL IMMUTABLE;
CREATE TABLE it.foo(a bigserial, b jsonb);
CREATE UNIQUE INDEX fun_partial_index ON it.foo (it.key_func(b))
WHERE it.part_func(b);
CREATE INDEX brin_index ON it.foo USING BRIN (a);
"""
def test_indexes(db):
with S(db) as s:
s.execute(CREATE)
i1 = get_inspector(s, schema="it")
# Recreate schema.
# Functions oids will be changed
s.execute(CREATE)
i2 = get_inspector(s, schema="it")
assert i1.indexes == i2.indexes
CREATE_CONST = """
create table t(id uuid primary key, x bigint);
"""
def test_constraints(db):
with S(db) as s:
s.execute(CREATE_CONST)
i = get_inspector(s)
constraints_keys = list(i.constraints.keys())
assert constraints_keys == ['"public"."t"."t_pkey"']
indexes_keys = list(i.indexes.keys())
assert indexes_keys == ['"public"."t_pkey"']
|
py
|
1a58c8e7224933af823f6ad723ad33ffdfbc394b
|
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas.api.types import (
infer_dtype,
is_object_dtype,
is_string_dtype,
)
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseDtypeTests(BaseExtensionTests):
"""Base class for ExtensionDtype classes"""
def test_name(self, dtype):
assert isinstance(dtype.name, str)
def test_kind(self, dtype):
valid = set("biufcmMOSUV")
assert dtype.kind in valid
def test_construct_from_string_own_name(self, dtype):
result = dtype.construct_from_string(dtype.name)
assert type(result) is type(dtype)
# check OK as classmethod
result = type(dtype).construct_from_string(dtype.name)
assert type(result) is type(dtype)
def test_is_dtype_from_name(self, dtype):
result = type(dtype).is_dtype(dtype.name)
assert result is True
def test_is_dtype_unboxes_dtype(self, data, dtype):
assert dtype.is_dtype(data) is True
def test_is_dtype_from_self(self, dtype):
result = type(dtype).is_dtype(dtype)
assert result is True
def test_is_dtype_other_input(self, dtype):
assert dtype.is_dtype([1, 2, 3]) is False
def test_is_not_string_type(self, dtype):
return not is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
return not is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
assert dtype != dtype.name + "-suffix"
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype("object")
def test_eq_with_self(self, dtype):
assert dtype == dtype
assert dtype != object()
def test_array_type(self, data, dtype):
assert dtype.construct_array_type() is type(data)
def test_check_dtype(self, data):
dtype = data.dtype
# check equivalency for using .dtypes
df = pd.DataFrame(
{"A": pd.Series(data, dtype=dtype), "B": data, "C": "foo", "D": 1}
)
# TODO(numpy-1.20): This warnings filter and if block can be removed
# once we require numpy>=1.20
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
result = df.dtypes == str(dtype)
# NumPy>=1.20.0, but not pandas.compat.numpy till there
# is a wheel available with this change.
try:
new_numpy_behavior = np.dtype("int64") != "Int64"
except TypeError:
new_numpy_behavior = True
if dtype.name == "Int64" and not new_numpy_behavior:
expected = pd.Series([True, True, False, True], index=list("ABCD"))
else:
expected = pd.Series([True, True, False, False], index=list("ABCD"))
self.assert_series_equal(result, expected)
expected = pd.Series([True, True, False, False], index=list("ABCD"))
result = df.dtypes.apply(str) == str(dtype)
self.assert_series_equal(result, expected)
def test_hashable(self, dtype):
hash(dtype) # no error
def test_str(self, dtype):
assert str(dtype) == dtype.name
def test_eq(self, dtype):
assert dtype == dtype.name
assert dtype != "anonther_type"
def test_construct_from_string(self, dtype):
dtype_instance = type(dtype).construct_from_string(dtype.name)
assert isinstance(dtype_instance, type(dtype))
def test_construct_from_string_another_type_raises(self, dtype):
msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'"
with pytest.raises(TypeError, match=msg):
type(dtype).construct_from_string("another_type")
def test_construct_from_string_wrong_type_raises(self, dtype):
with pytest.raises(
TypeError,
match="'construct_from_string' expects a string, got <class 'int'>",
):
type(dtype).construct_from_string(0)
def test_get_common_dtype(self, dtype):
# in practice we will not typically call this with a 1-length list
# (we shortcut to just use that dtype as the common dtype), but
# still testing as good practice to have this working (and it is the
# only case we can test in general)
assert dtype._get_common_dtype([dtype]) == dtype
@pytest.mark.parametrize("skipna", [True, False])
def test_infer_dtype(self, data, data_missing, skipna):
# only testing that this works without raising an error
res = infer_dtype(data, skipna=skipna)
assert isinstance(res, str)
res = infer_dtype(data_missing, skipna=skipna)
assert isinstance(res, str)
|
py
|
1a58c91b9cf613d1b4eb4585754d905648a4f28b
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import PALLY1TestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(PALLY1TestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
|
py
|
1a58c91bbfca57276b768dfed4de455edee6d38a
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.pools.ip_pools.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Allocations(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.pools.ip_pools.allocations'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _AllocationsStub)
def list(self,
pool_id,
):
"""
Returns information about which addresses have been allocated from a
specified IP address pool.
:type pool_id: :class:`str`
:param pool_id: IP pool ID (required)
:rtype: :class:`com.vmware.nsx.model_client.AllocationIpAddressListResult`
:return: com.vmware.nsx.model.AllocationIpAddressListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'pool_id': pool_id,
})
class _AllocationsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'pool_id': type.StringType(),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/pools/ip-pools/{pool-id}/allocations',
path_variables={
'pool_id': 'pool-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AllocationIpAddressListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.pools.ip_pools.allocations',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Allocations': Allocations,
}
|
py
|
1a58cae48c6afa2fb00ca374a70d7d394abfc9a4
|
from django.apps import AppConfig
class KinksConfig(AppConfig):
name = 'kinks'
|
py
|
1a58cbb4638146e5caa44937219a137ece230682
|
from netatmobeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Netatmobeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
netatmobeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("netatmobeat is running"))
exit_code = netatmobeat_proc.kill_and_wait()
assert exit_code == 0
|
py
|
1a58cc3bdd33528a36dd77fea439c6a78944741a
|
#!/usr/bin/env python3
from dataclasses import dataclass, field
from typing import List, Type
from ml.rl.models.actor import GaussianFullyConnectedActor
from ml.rl.models.base import ModelBase
from ml.rl.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder
from ml.rl.parameters import NormalizationData, param_hash
from ml.rl.preprocessing.identify_types import CONTINUOUS_ACTION
from ml.rl.preprocessing.normalization import get_num_output_features
@dataclass(frozen=True)
class GaussianFullyConnectedConfig:
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [128, 64])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
use_batch_norm: bool = False
use_layer_norm: bool = False
class GaussianFullyConnected(ContinuousActorNetBuilder):
def __init__(self, config: GaussianFullyConnectedConfig):
super().__init__()
assert len(config.sizes) == len(config.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{config.sizes}, {config.activations}"
)
self.config = config
@classmethod
def config_type(cls) -> Type:
return GaussianFullyConnectedConfig
@property
def default_action_preprocessing(self) -> str:
return CONTINUOUS_ACTION
def build_actor(
self,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> ModelBase:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
action_dim = get_num_output_features(
action_normalization_data.dense_normalization_parameters
)
return GaussianFullyConnectedActor(
state_dim=state_dim,
action_dim=action_dim,
sizes=self.config.sizes,
activations=self.config.activations,
use_batch_norm=self.config.use_batch_norm,
use_layer_norm=self.config.use_layer_norm,
)
|
py
|
1a58cc65347ddc4be9f73b1dbe3d82a73f8a4132
|
from nose.tools import ok_, eq_
# It was shorthand actually:
# ok_ assert(..)
# eq_ assert_equals(..)
def test_case01():
ok_(2+2 == 4, msg="Test Case Failure")
def test_case02():
eq_(2+2, 4, msg="Test Case Failure")
def test_case03():
ok_(2+2 == 5, msg="Test Case Failure")
def test_case04():
eq_(2+2, 5, msg="Test Case Failure")
|
py
|
1a58cfe76d0bf752911b44b69ef352a513ca4d7f
|
import unittest
from scripts.caesar import shift_character
class TestCaesarCipher(unittest.TestCase):
def test_shift_character(self):
self.assertEqual(shift_character("a", 1), "B")
self.assertEqual(shift_character("a", 2), "C")
self.assertEqual(shift_character("a", 3), "D")
self.assertEqual(shift_character("z", 1), "A")
self.assertEqual(shift_character("Z", 2), "B")
self.assertEqual(shift_character("1", 2), "1")
self.assertEqual(shift_character(",", 2), ",")
self.assertEqual(shift_character(".", 2), ".")
self.assertEqual(shift_character(";", 2), ";")
self.assertEqual(shift_character(":", 2), ":")
self.assertEqual(shift_character("!", 2), "!")
self.assertEqual(shift_character("(", 2), "(")
self.assertEqual(shift_character(")", 2), ")")
self.assertEqual(shift_character("'", 2), "'")
self.assertEqual(shift_character('"', 2), '"')
self.assertEqual(shift_character("?", 2), "?")
self.assertEqual(shift_character("-", 2), "-")
with self.assertRaisesRegexp(Exception, "Illegal input"):
shift_character("$", 1)
with self.assertRaisesRegexp(Exception, "too long"):
shift_character("ab", 1)
|
py
|
1a58d04a00cbe43c10115313476d7bf7b931188c
|
"""
power_meter_hardware.py
"__"
"""
__author__ = "Prakash Manandhar, and Sophie Yang"
__copyright__ = "Copyright 2021, Hydration Team"
__credits__ = ["Prakash Manandhar, and Sophie Yang"]
__license__ = "Internal"
__version__ = "1.0.0"
__maintainer__ = "Sophie Yang"
__email__ = "[email protected]"
__status__ = "Production"
from time import sleep # this lets us have a time delay
import time
from abc import ABC, abstractmethod # https://docs.python.org/3/library/abc.html
import numpy
import threading
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
from pymodbus.client.sync import ModbusSerialClient
from pymodbus.payload import BinaryPayloadDecoder
class AbstractPowerMeter(ABC):
@abstractmethod
# returns a timestamped power reading
def get_active_power_W(self):
pass
@abstractmethod
def get_current_mA(self):
pass
class MockPowerMeterSensor(AbstractPowerMeter):
def get_active_power_W(self):
return [time.time(), -2000.0]
def get_current_mA(self):
return [time.time(), -999.0]
class PowerMeterThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stopped = True
self.sensor_readings = {
"time_s": 0.0,
"active_power_W": 0.0,
"current_mA": 0.0,
}
self.client = ModbusSerialClient(port=config.get('PowerMeter', 'port'), method='rtu', baudrate=config.getint('PowerMeter', 'baudrate'))
def run(self):
self.stopped = False
address = config.getint("PowerMeter", "address")
count = config.getint("PowerMeter", "count")
sampling_time = config.getfloat("PowerMeter", "SamplingTime")
while not self.stopped:
loop_start = time.time()
result = self.client.read_holding_registers(address, count, unit=1)
decoder = BinaryPayloadDecoder.fromRegisters(result.registers,
wordorder = '>', byteorder = '>')
current_mA = decoder.decode_32bit_float()
power_W = decoder.decode_32bit_float()
self.sensor_readings["time_s"] = loop_start
self.sensor_readings["active_power_W"] = power_W
self.sensor_readings["current_mA"] = current_mA
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
def stop(self):
self.stopped = True
class FileWriterThread(threading.Thread):
def __init__(self, power_meter_thread):
threading.Thread.__init__(self)
self.power_meter_thread = power_meter_thread
self.stopped = True
def run(self):
self.stopped = False
time_start_s = time.time()
fp = open(f"power_meter_{time_start_s}.csv", "w")
keys = self.power_meter_thread.sensor_readings.keys()
for k in keys:
fp.write(f"{k},")
fp.write("\n")
sampling_time = config.getfloat("PowerMeter", "SamplingTime")
while not self.stopped: #read sensor continuously
loop_start = time.time()
for k in keys:
fp.write(f"{self.power_meter_thread.sensor_readings[k]},")
fp.write("\n")
loop_start_int = (int(loop_start))%10
if loop_start_int == 0:
print(f"[t (s), Power (W)] = {self.power_meter_thread.sensor_readings['time_s']}, "\
f"{self.power_meter_thread.sensor_readings['active_power_W']}")
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
fp.close()
def stop(self):
self.stopped = True
class PowerMeter(AbstractPowerMeter):
def __init__(self):
self.power_meter_thread = PowerMeterThread()
self.file_writer_thread = FileWriterThread(self.power_meter_thread)
self.power_meter_thread.start()
self.file_writer_thread.start()
def get_active_power_W(self):
return [self.power_meter_thread.sensor_readings["time_s"],
self.power_meter_thread.sensor_readings["active_power_W"]]
def get_current_mA(self):
return [self.power_meter_thread.sensor_readings["time_s"],
self.power_meter_thread.sensor_readings["current_mA"]]
|
py
|
1a58d0875c0608d8680144cc2ba969dc774ff8e1
|
from django.urls import path, re_path
from . import views
app_name = 'courses'
urlpatterns = [
path('', views.course_list, name='courses'),
re_path(r'(?P<course_pk>\d+)/(?P<step_pk>\d+)$', views.step_detail, name='step'),
re_path(r'(?P<pk>\d+)/$', views.course_detail, name='detail'),
]
|
py
|
1a58d0e4e3d144d487e8bec5d3468b8dbb3c679d
|
#!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import sys
import imp
import base64
import re
import json
import platform
import shutil
import time
import traceback
import datetime
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as Util
from redhatPatching import redhatPatching
class OraclePatching(redhatPatching):
def __init__(self, hutil):
super(OraclePatching,self).__init__(hutil)
|
py
|
1a58d122228921cf8b48c52177abcb8e72870674
|
import os
import sys
from datetime import datetime
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from db.db import db
# TODO: 定义User模型
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(30), nullable=False)
username = db.Column(db.String(30), nullable=False)
psd = db.Column(db.String(30), nullable=False)
money = db.Column(db.Float, nullable=False, default=0)
create_time = db.Column(db.DATETIME, default=datetime.now)
|
py
|
1a58d18861046ade3587c4f48b9795108fd32034
|
# Disable debbuging logs (to get rid of cuda warnings)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
import tensorflow as tf
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, 6)
if not tf.__version__ == '2.2.0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)')
'''
arange returns evenly spaces values within a given interval (between 0 and 5)
using 0.1 steps
'''
X = np.arange(0.0, 5.0, 0.1) #Independent
a = 1 # Slope
b = 0 # Intercept
Y = a * X + b #Dependent
# Graphical interface
plt.plot(X, Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Independent Variable')
plt.show()
|
py
|
1a58d1a235be2efa36790fbb834a3cdfa2170276
|
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator, URLValidator
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import gettext
from cms.utils.page import get_all_pages_from_path
from cms.utils.urlutils import admin_reverse, relative_url_regex
def validate_relative_url(value):
RegexValidator(regex=relative_url_regex)(value)
def validate_url(value):
try:
# Validate relative urls first
validate_relative_url(value)
except ValidationError:
# Fallback to absolute urls
URLValidator()(value)
def validate_url_uniqueness(site, path, language, exclude_page=None):
""" Checks for conflicting urls
"""
if '/' in path:
validate_url(path)
path = path.strip('/')
pages = get_all_pages_from_path(site, path, language)
pages = pages.select_related('publisher_public')
if exclude_page:
pages = pages.exclude(pk=exclude_page.pk)
if exclude_page.publisher_public_id:
pages = pages.exclude(pk=exclude_page.publisher_public_id)
try:
conflict_page = pages[0]
except IndexError:
return True
if conflict_page.publisher_is_draft:
page_id = conflict_page.pk
else:
# rare case where draft points to one url
# and live points to another which conflicts.
# Use the draft ID because public page is not editable.
page_id = conflict_page.publisher_public_id
if conflict_page.is_page_type:
change_url = admin_reverse('cms_pagetype_change', args=[page_id])
else:
change_url = admin_reverse('cms_page_change', args=[page_id])
conflict_url = '<a href="%(change_url)s" target="_blank">%(page_title)s</a>' % {
'change_url': change_url,
'page_title': force_text(conflict_page),
}
if exclude_page:
message = gettext('Page %(conflict_page)s has the same url \'%(url)s\' as current page "%(instance)s".')
else:
message = gettext('Page %(conflict_page)s has the same url \'%(url)s\' as current page.')
message = message % {'conflict_page': conflict_url, 'url': path, 'instance': exclude_page}
raise ValidationError(mark_safe(message))
|
py
|
1a58d257c6e947efecd5aada41ca9a0abedf92b8
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from ax.models.base import Model
from ax.models.model_utils import tunable_feature_indices
from ax.models.random.base import RandomModel
from ax.models.types import TConfig
from ax.utils.common.docutils import copy_doc
from ax.utils.common.typeutils import not_none
from torch.quasirandom import SobolEngine
class SobolGenerator(RandomModel):
"""This class specifies the generation algorithm for a Sobol generator.
As Sobol does not make use of a model, it does not implement
the fit or predict methods.
Attributes:
deduplicate: If true, a single instantiation of the generator will not
return the same point twice.
init_position: The initial state of the Sobol generator.
Starts at 0 by default.
scramble: If True, permutes the parameter values among
the elements of the Sobol sequence. Default is True.
seed: An optional seed value for scrambling.
"""
engine: Optional[SobolEngine] = None
def __init__(
self,
seed: Optional[int] = None,
deduplicate: bool = False,
init_position: int = 0,
scramble: bool = True,
generated_points: Optional[np.ndarray] = None,
fallback_to_sample_polytope: bool = False,
) -> None:
super().__init__(
deduplicate=deduplicate, seed=seed, generated_points=generated_points
)
self.init_position = init_position
self.scramble = scramble
# Initialize engine on gen.
self._engine = None
self.fallback_to_sample_polytope = fallback_to_sample_polytope
def init_engine(self, n_tunable_features: int) -> SobolEngine:
"""Initialize singleton SobolEngine, only on gen.
Args:
n_tunable_features: The number of features which can be
searched over.
Returns:
SobolEngine, which can generate Sobol points.
"""
if not self._engine:
self._engine = SobolEngine(
dimension=n_tunable_features, scramble=self.scramble, seed=self.seed
).fast_forward(self.init_position)
return self._engine
@property
def engine(self) -> Optional[SobolEngine]:
"""Return a singleton SobolEngine."""
return self._engine
def gen(
self,
n: int,
bounds: List[Tuple[float, float]],
linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
rounding_func: Optional[Callable[[np.ndarray], np.ndarray]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate new candidates.
Args:
n: Number of candidates to generate.
bounds: A list of (lower, upper) tuples for each column of X.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
rounding_func: A function that rounds an optimization result
appropriately (e.g., according to `round-trip` transformations)
but *unused here*.
Returns:
2-element tuple containing
- (n x d) array of generated points.
- Uniform weights, an n-array of ones for each point.
"""
tf_indices = tunable_feature_indices(
bounds=bounds, fixed_features=fixed_features
)
if len(tf_indices) > 0:
self.init_engine(len(tf_indices))
points, weights = super().gen(
n=n,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
model_gen_options=model_gen_options,
rounding_func=rounding_func,
)
if self.engine:
self.init_position = not_none(self.engine).num_generated
return (points, weights)
@copy_doc(Model._get_state)
def _get_state(self) -> Dict[str, Any]:
state = super()._get_state()
state.update({"init_position": self.init_position})
return state
@copy_doc(RandomModel._gen_unconstrained)
def _gen_unconstrained(
self,
n: int,
d: int,
tunable_feature_indices: np.ndarray,
fixed_features: Optional[Dict[int, float]] = None,
) -> np.ndarray:
if len(tunable_feature_indices) == 0:
# Search space is entirely fixed, should return the only avail. point.
fixed_features = fixed_features or {}
# pyre-fixme[7]: Expected `ndarray` but got `Tuple[typing.Any, typing.Any]`.
return (
np.tile(np.array([list(not_none(fixed_features).values())]), (n, 1)),
np.ones(n),
)
return super()._gen_unconstrained(
n=n,
d=d,
tunable_feature_indices=tunable_feature_indices,
fixed_features=fixed_features,
)
def _gen_samples(self, n: int, tunable_d: int) -> np.ndarray:
"""Generate n samples.
tunable_d is ignored; as it is specified at engine initialization.
Args:
bounds: A list of d (lower, upper) tuples for each column of X.
fixed_feature_indices: Indices of features which are fixed at a
particular value.
"""
if self.engine is None:
raise ValueError( # pragma: no cover
"Sobol Engine must be initialized before candidate generation."
)
return not_none(self.engine).draw(n, dtype=torch.double).numpy()
|
py
|
1a58d26208b79fef1fc511f297e0ac23b844e3d7
|
from django.contrib import admin
from pages.models import Expert, Meeting
class MeetingAdmin(admin.ModelAdmin):
list_display = ("user", "objective")
admin.site.register(Expert)
admin.site.register(Meeting, MeetingAdmin)
|
py
|
1a58d287f604ad83957b320d058e4d38a0b75ef9
|
#!/usr/bin/env python
import struct
from serial import Serial, PARITY_NONE
from umodbus.client.serial import rtu
from umodbus.functions import function_code_to_function_map, ModbusFunction
# This code is the great work of:
# https://github.com/greentangerine/ME3000
# Thank you so much @greentangerine for sharing your work and letting us all build on it
class ME3000:
# some constants from the Passive protocol
STANDBY=0x0100
DISCHARGE=0x0101
CHARGE=0x0102
AUTO=0x0103
STANDBY_VAL=0x5555
ME_HOLDING=0x0200
NUM_HOLDING=69
# specific holding registers
ME_STATE=0x0200
BATTPCT=0x0210
ME_INPUT=0x10B0
NUM_INPUT=13
INV_STATES = ("WAIT", "CHECK CHARGE", "CHARGE", "CHECK DISCHARGE",
"DISCHARGE", "EPS", "FAULT", "PERM FAULT")
port_id = None
slave_id = None
serial_port = None
def __init__(self, port, slave):
self.port_id = port
self.slave_id = slave
self.serial_port = self.get_serial_port()
def connect(self):
if self.serial_port is not None:
self.serial_port = self.get_serial_port()
def get_serial_port(self):
""" Return serial.Serial instance, ready to use for RS485."""
port = Serial(port=self.port_id, baudrate=9600, parity=PARITY_NONE,
stopbits=1, bytesize=8, timeout=1)
return port
def disconnect(self):
if self.serial_port is not None:
self.close_serial_port()
def close_serial_port(self):
self.serial_port.close()
self.serial_port = None
def set_auto(self):
""" Switch inverter to AUTO."""
ret_status = True
message = write_passive_register(slave_id=self.slave_id,
address=self.AUTO,
value=0)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def set_standby(self):
""" Switch inverter to STANDBY."""
ret_status = True
message = write_passive_register(slave_id=self.slave_id,
address=self.STANDBY,
value=self.STANDBY_VAL)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def set_charge(self, charge=3000):
""" Set charge value."""
ret_status = True
message = write_passive_register(slave_id=self.slave_id,
address=self.CHARGE,
value=charge)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def set_discharge(self, discharge=3000):
""" Set discharge value."""
ret_status = True
message = write_passive_register(slave_id=self.slave_id,
address=self.DISCHARGE,
value=discharge)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def read_holding(self):
""" Read all the holding registers from inverter."""
ret_status = True
message = rtu.read_holding_registers(slave_id=self.slave_id,
starting_address=self.ME_HOLDING,
quantity=self.NUM_HOLDING)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def read_input(self):
""" Read the inverter's input registers."""
ret_status = True
message = rtu.read_input_registers(slave_id=self.slave_id,
starting_address=self.ME_INPUT,
quantity=self.NUM_INPUT)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = 0
return ret_status, response
def get_inverter_state(self):
""" Return the inverter state."""
ret_status = True
message = rtu.read_holding_registers(slave_id=self.slave_id,
starting_address=self.ME_STATE,
quantity=1)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = [-1]
return ret_status, response[0], self.INV_STATES[response[0]]
def get_battery_percentage(self):
""" Return the current charge percentage of the batteries."""
ret_status = True
message = rtu.read_holding_registers(slave_id=self.slave_id,
starting_address=self.BATTPCT,
quantity=1)
try:
response = rtu.send_message(message, self.serial_port)
except:
ret_status = False
response = [-1]
return ret_status, response[0]
def write_passive_register(slave_id, address, value):
""" Return ADU for Modbus extended function code 66: Write Passive Register.
:param slave_id: Number of slave.
:return: Byte array with ADU.
"""
function = WritePassiveRegister()
function._address = address
function._value = value
return rtu._create_request_adu(slave_id, function.request_pdu)
# SoFar ME30000 Passive Mode
WRITE_PASSIVE_REGISTER = 66
class WritePassiveRegister(ModbusFunction):
""" Implement SoFar Modbus function code 66.
This function code is used to write a single holding register in a
remote device.
The Request PDU specifies the address of the register to
be written.
The response is consists of the slave id, function code, byte count
and status bytes.
The request PDU with function code 66 must be 5 bytes:
================ ===============
Field Length (bytes)
================ ===============
Function code 1
Address 2
Value 2
================ ===============
The PDU can unpacked to this:
..
Note: the backslash in the bytes below are escaped using an extra back
slash. Without escaping the bytes aren't printed correctly in the HTML
output of this docs.
To work with the bytes in Python you need to remove the escape sequences.
`b'\\x01\\x00d` -> `b\x01\x00d`
.. code-block:: python
>>> struct.unpack('>BHh', b'\\x42\\x00d\\x00\\x03')
(6, 100, 3)
The reponse PDU is a two byte status value.
================ ===============
Field Length (bytes)
================ ===============
Function code 1
Byte Count 1
Status Value 2
================ ===============
"""
function_code = WRITE_PASSIVE_REGISTER
_address = None
_count = 2
_value = None
data = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
""" Value to be written on register.
:param value: An integer.
:raises: IllegalDataValueError when value isn't in range.
"""
try:
struct.pack('>h', value)
except struct.error:
raise IllegalDataValueError
self._value = value
@property
def request_pdu(self):
""" Build request PDU to write single register.
:return: Byte array of 5 bytes with PDU.
"""
if None in [self._address, self._value]:
# TODO Raise proper exception.
raise Exception
return struct.pack('>BHh', self.function_code,
self._address, self._value)
@staticmethod
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
:param pdu: A request PDU.
"""
_, address, value = \
struct.unpack('>BHh', pdu)
instance = WritePassiveRegister()
instance._address = address
instance._value = value
return instance
@property
def expected_response_pdu_size(self):
""" Return number of bytes expected for response PDU.
:return: number of bytes.
"""
return 4
def create_response_pdu(self):
fmt = '>BBH'
ret_val = struct.pack(fmt,
self.function_code, self._count, self.data)
return ret_val
@staticmethod
def create_from_response_pdu(resp_pdu):
""" Create instance from response PDU.
:param resp_pdu: Byte array with request PDU.
:return: Instance of :class:`WritePassiveRegister`.
"""
write_passive_register = WritePassiveRegister()
quantity, value = struct.unpack('>BH', resp_pdu[1:4])
write_passive_register._address = quantity
write_passive_register.data = value
return write_passive_register
def execute(self, slave_id, route_map):
""" Execute the Modbus function registered for a route.
:param slave_id: Slave id.
:param eindpoint: Instance of modbus.route.Map.
"""
endpoint = route_map.match(slave_id, self.function_code, self._address)
try:
endpoint(slave_id=slave_id,
address=self._address,
value=self._value,
function_code=self.function_code)
# route_map.match() returns None if no match is found. Calling None
# results in TypeError.
except TypeError:
raise IllegalDataAddressError()
function_code_to_function_map[WRITE_PASSIVE_REGISTER] = WritePassiveRegister
|
py
|
1a58d3169bd241ce24945a476b634cc01b9340a8
|
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('main_app.urls')),
]
|
py
|
1a58d3a6cb0e541f2a96376006f8369719b22ca6
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
"""
hbmqtt_pub - MQTT 3.1.1 publisher
Usage:
hbmqtt_pub --version
hbmqtt_pub (-h | --help)
hbmqtt_pub --url BROKER_URL -t TOPIC (-f FILE | -l | -m MESSAGE | -n | -s) [-c CONFIG_FILE] [-i CLIENT_ID] [-q | --qos QOS] [-d] [-k KEEP_ALIVE] [--clean-session] [--ca-file CAFILE] [--ca-path CAPATH] [--ca-data CADATA] [ --will-topic WILL_TOPIC [--will-message WILL_MESSAGE] [--will-qos WILL_QOS] [--will-retain] ] [--extra-headers HEADER] [-r]
Options:
-h --help Show this screen.
--version Show version.
--url BROKER_URL Broker connection URL (musr conform to MQTT URI scheme (see https://github.com/mqtt/mqtt.github.io/wiki/URI-Scheme>)
-c CONFIG_FILE Broker configuration file (YAML format)
-i CLIENT_ID Id to use as client ID.
-q | --qos QOS Quality of service to use for the message, from 0, 1 and 2. Defaults to 0.
-r Set retain flag on connect
-t TOPIC Message topic
-m MESSAGE Message data to send
-f FILE Read file by line and publish message for each line
-s Read from stdin and publish message for each line
-k KEEP_ALIVE Keep alive timeout in second
--clean-session Clean session on connect (defaults to False)
--ca-file CAFILE] CA file
--ca-path CAPATH] CA Path
--ca-data CADATA CA data
--will-topic WILL_TOPIC
--will-message WILL_MESSAGE
--will-qos WILL_QOS
--will-retain
--extra-headers EXTRA_HEADERS JSON object with key-value pairs of additional headers for websocket connections
-d Enable debug messages
"""
import sys
import logging
import asyncio
import os
import json
import hbmqtt
from hbmqtt.client import MQTTClient, ConnectException
from docopt import docopt
from hbmqtt.utils import read_yaml_config
logger = logging.getLogger(__name__)
def _gen_client_id():
import os
import socket
pid = os.getpid()
hostname = socket.gethostname()
return "hbmqtt_pub/%d-%s" % (pid, hostname)
def _get_qos(arguments):
try:
return int(arguments["--qos"][0])
except:
return None
def _get_extra_headers(arguments):
try:
return json.loads(arguments["--extra-headers"])
except:
return {}
def _get_message(arguments):
if arguments["-n"]:
yield b""
if arguments["-m"]:
yield arguments["-m"].encode(encoding="utf-8")
if arguments["-f"]:
try:
with open(arguments["-f"], "r") as f:
for line in f:
yield line.encode(encoding="utf-8")
except:
logger.error("Failed to read file '%s'" % arguments["-f"])
if arguments["-l"]:
import sys
for line in sys.stdin:
if line:
yield line.encode(encoding="utf-8")
if arguments["-s"]:
import sys
message = bytearray()
for line in sys.stdin:
message.extend(line.encode(encoding="utf-8"))
yield message
async def do_pub(client, arguments):
running_tasks = []
try:
logger.info("%s Connecting to broker" % client.client_id)
await client.connect(
uri=arguments["--url"],
cleansession=arguments["--clean-session"],
cafile=arguments["--ca-file"],
capath=arguments["--ca-path"],
cadata=arguments["--ca-data"],
extra_headers=_get_extra_headers(arguments),
)
qos = _get_qos(arguments)
topic = arguments["-t"]
retain = arguments["-r"]
for message in _get_message(arguments):
logger.info("%s Publishing to '%s'" % (client.client_id, topic))
task = asyncio.ensure_future(client.publish(topic, message, qos, retain))
running_tasks.append(task)
if running_tasks:
await asyncio.wait(running_tasks)
await client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except KeyboardInterrupt:
await client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except ConnectException as ce:
logger.fatal("connection to '%s' failed: %r" % (arguments["--url"], ce))
except asyncio.CancelledError:
logger.fatal("Publish canceled due to previous error")
def main(*args, **kwargs):
if sys.version_info[:2] < (3, 6):
logger.fatal("Error: Python 3.6+ is required")
sys.exit(-1)
arguments = docopt(__doc__, version=hbmqtt.__version__)
# print(arguments)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
if arguments["-d"]:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=formatter)
if arguments["-c"]:
config = read_yaml_config(arguments["-c"])
else:
config = read_yaml_config(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "default_client.yaml"
)
)
logger.debug("Using default configuration")
loop = asyncio.get_event_loop()
client_id = arguments.get("-i", None)
if not client_id:
client_id = _gen_client_id()
if arguments["-k"]:
config["keep_alive"] = int(arguments["-k"])
if (
arguments["--will-topic"]
and arguments["--will-message"]
and arguments["--will-qos"]
):
config["will"] = dict()
config["will"]["topic"] = arguments["--will-topic"]
config["will"]["message"] = arguments["--will-message"].encode("utf-8")
config["will"]["qos"] = int(arguments["--will-qos"])
config["will"]["retain"] = arguments["--will-retain"]
client = MQTTClient(client_id=client_id, config=config, loop=loop)
loop.run_until_complete(do_pub(client, arguments))
loop.close()
if __name__ == "__main__":
main()
|
py
|
1a58d5313c38bf12e6f5db86fd0bf740a60feeab
|
# Caeser Encryption
import sys
if (__name__ == "__main__"):
def readFile (path):
file = open(path, "r")
lineList = []
for line in file:
lineList.append(line)
#print(lineList)
return lineList
def encrypt (lines,x):
encrypted = []
for line in lines:
for idx in range(0,len(line)-1):
num = ord(line[idx])+x
while(num < 0):
num += 256
while(num > 255):
num -= 256
char = chr(num)
encrypted.append(char)
encrypted.append("\n")
return encrypted
def writeFile(path, encrypted):
file = open(path, "w")
for char in encrypted:
file.write(char) #print(char)
file.close
if(len(sys.argv) < 4):
print("INPUT ERROR")
print("try:\npython caesar.py \"number\" \"path\" \"-e\\-d\"")
else:
num = int(sys.argv[1])
path = sys.argv[2]
crypt = sys.argv[3]
lines = readFile(path)
if(crypt == "-d"):
num = num * (-1)
encrypted = encrypt(lines, num)
elif(crypt == "-e"):
encrypted = encrypt(lines, num)
writeFile(path, encrypted)
|
py
|
1a58d5c82eddb2aac383010682ed0d4262f6c2a7
|
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, TYPE_CHECKING
from loopchain.components import SingletonMetaClass
if TYPE_CHECKING:
from loopchain.peer import PeerInnerStub
from loopchain.channel.channel_inner_service import ChannelInnerStub, \
ChannelTxReceiverInnerStub, ChannelTxCreatorInnerStub
from loopchain.scoreservice import IconScoreInnerStub
class StubCollection(metaclass=SingletonMetaClass):
def __init__(self):
self.amqp_target = None
self.amqp_key = None
self.peer_stub: PeerInnerStub = None
self.channel_stubs: Dict[str, ChannelInnerStub] = {}
self.channel_tx_creator_stubs: Dict[str, ChannelTxCreatorInnerStub] = {}
self.channel_tx_receiver_stubs: Dict[str, ChannelTxReceiverInnerStub] = {}
self.icon_score_stubs: Dict[str, IconScoreInnerStub] = {}
async def create_peer_stub(self):
from loopchain import configure as conf
from loopchain.peer import PeerInnerStub
queue_name = conf.PEER_QUEUE_NAME_FORMAT.format(amqp_key=self.amqp_key)
self.peer_stub = PeerInnerStub(self.amqp_target, queue_name, conf.AMQP_USERNAME, conf.AMQP_PASSWORD)
await self.peer_stub.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY)
return self.peer_stub
async def create_channel_stub(self, channel_name):
from loopchain import configure as conf
from loopchain.channel.channel_inner_service import ChannelInnerStub
queue_name = conf.CHANNEL_QUEUE_NAME_FORMAT.format(
channel_name=channel_name, amqp_key=self.amqp_key)
stub = ChannelInnerStub(self.amqp_target, queue_name, conf.AMQP_USERNAME, conf.AMQP_PASSWORD)
await stub.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY)
self.channel_stubs[channel_name] = stub
logging.debug(f"Channel : {channel_name}, Queue : {queue_name}")
return stub
async def create_channel_tx_creator_stub(self, channel_name):
from loopchain import configure as conf
from loopchain.channel.channel_inner_service import ChannelTxCreatorInnerStub
queue_name = conf.CHANNEL_TX_CREATOR_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=self.amqp_key)
stub = ChannelTxCreatorInnerStub(self.amqp_target, queue_name)
await stub.connect()
self.channel_tx_creator_stubs[channel_name] = stub
logging.debug(f"Channel : {channel_name}, Queue : {queue_name}")
return stub
async def create_channel_tx_receiver_stub(self, channel_name):
from loopchain import configure as conf
from loopchain.channel.channel_inner_service import ChannelTxReceiverInnerStub
queue_name = conf.CHANNEL_TX_RECEIVER_QUEUE_NAME_FORMAT.format(
channel_name=channel_name, amqp_key=self.amqp_key)
stub = ChannelTxReceiverInnerStub(self.amqp_target, queue_name, conf.AMQP_USERNAME, conf.AMQP_PASSWORD)
await stub.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY)
self.channel_tx_receiver_stubs[channel_name] = stub
logging.debug(f"Channel : {channel_name}, Queue : {queue_name}")
return stub
async def create_icon_score_stub(self, channel_name):
from loopchain import configure as conf
from loopchain.scoreservice import IconScoreInnerStub
queue_name = conf.ICON_SCORE_QUEUE_NAME_FORMAT.format(
channel_name=channel_name, amqp_key=self.amqp_key
)
stub = IconScoreInnerStub(self.amqp_target, queue_name, conf.AMQP_USERNAME, conf.AMQP_PASSWORD)
await stub.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY)
self.icon_score_stubs[channel_name] = stub
return stub
|
py
|
1a58d8049828b56ade32e01f0709baf48cdf76bd
|
# _*_ coding: utf-8 _*_
#
# Package: src.core.model
__all__ = [
"car",
"customer",
"employee",
"entity",
"rental",
"user"
]
|
py
|
1a58dac9634102e9d2fe4611afbd81eaa687010c
|
import operator
from functools import cached_property
import kafka
from confluent_kafka.admin import AdminClient, ConfigResource
from esque.config import Config
from esque.controller.topic_controller import TopicController
from esque.helpers import ensure_kafka_future_done, unpack_confluent_config
class Cluster:
def __init__(self):
self._config = Config.get_instance()
self.__topic_controller = None
@cached_property
def kafka_python_client(self) -> kafka.KafkaAdminClient:
return kafka.KafkaAdminClient(**self._config.create_kafka_python_config())
@cached_property
def confluent_client(self) -> AdminClient:
return AdminClient({"topic.metadata.refresh.interval.ms": "250", **self._config.create_confluent_config()})
@property
def topic_controller(self) -> TopicController:
if self.__topic_controller is None:
self.__topic_controller = TopicController(self, self._config)
return self.__topic_controller
@property
def bootstrap_servers(self):
return self._config.bootstrap_servers
def get_metadata(self):
return self.confluent_client.list_topics(timeout=1)
@property
def brokers(self):
metadata = self.confluent_client.list_topics(timeout=1)
return sorted(
[{"id": broker.id, "host": broker.host, "port": broker.port} for broker in metadata.brokers.values()],
key=operator.itemgetter("id"),
)
def retrieve_config(self, config_type: ConfigResource.Type, id):
requested_resources = [ConfigResource(config_type, str(id))]
futures = self.confluent_client.describe_configs(requested_resources)
((old_resource, future),) = futures.items()
future = ensure_kafka_future_done(future)
result = future.result()
return unpack_confluent_config(result)
|
py
|
1a58dcfb1f91c169fc3da03fed303f103f5d44b7
|
import pdb
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from sklearn.preprocessing import OneHotEncoder, StandardScaler
def _quantization_binning(data, num_bins=10):
qtls = np.arange(0.0, 1.0 + 1 / num_bins, 1 / num_bins)
bin_edges = np.quantile(data, qtls, axis=0) # (num_bins + 1, num_features)
bin_widths = np.diff(bin_edges, axis=0)
bin_centers = bin_edges[:-1] + bin_widths / 2 # ()
return bin_edges, bin_centers, bin_widths
def _quantize(inputs, bin_edges, num_bins=10):
quant_inputs = np.zeros(inputs.shape[0])
for i, x in enumerate(inputs):
quant_inputs[i] = np.digitize(x, bin_edges)
quant_inputs = quant_inputs.clip(1, num_bins) - 1 # Clip edges
return quant_inputs
def _one_hot(a, num_bins=10):
return np.squeeze(np.eye(num_bins)[a.reshape(-1).astype(np.int32)])
def DataQuantize(X, bin_edges=None, num_bins=10):
'''
Quantize: First 4 entries are continuos, and the rest are binary
'''
X_ = []
for i in range(5):
if bin_edges is not None:
Xi_q = _quantize(X[:, i], bin_edges, num_bins)
else:
bin_edges, bin_centers, bin_widths = _quantization_binning(X[:, i], num_bins)
Xi_q = _quantize(X[:, i], bin_edges, num_bins)
Xi_q = _one_hot(Xi_q, num_bins)
X_.append(Xi_q)
for i in range(5, len(X[0])):
if i == 39: # gender attribute
continue
Xi_q = _one_hot(X[:, i], num_bins=2)
X_.append(Xi_q)
return np.concatenate(X_,1), bin_edges
def get_adult_data():
'''
We borrow the code from https://github.com/IBM/sensitive-subspace-robustness
Preprocess the adult data set by removing some features and put adult data into a BinaryLabelDataset
You need to download the adult dataset (both the adult.data and adult.test files) from https://archive.ics.uci.edu/ml/datasets/Adult
'''
headers = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-stataus', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'y']
train = pd.read_csv('adult/adult.data', header = None)
test = pd.read_csv('adult/adult.test', header = None)
df = pd.concat([train, test], ignore_index=True)
df.columns = headers
df['y'] = df['y'].replace({' <=50K.': 0, ' >50K.': 1, ' >50K': 1, ' <=50K': 0 })
df = df.drop(df[(df[headers[-2]] == ' ?') | (df[headers[6]] == ' ?')].index)
df = pd.get_dummies(df, columns=[headers[1], headers[5], headers[6], headers[7], headers[9], headers[8], 'native-country'])
delete_these = ['race_ Amer-Indian-Eskimo','race_ Asian-Pac-Islander','race_ Black','race_ Other', 'sex_ Female']
delete_these += ['native-country_ Cambodia', 'native-country_ Canada', 'native-country_ China', 'native-country_ Columbia', 'native-country_ Cuba', 'native-country_ Dominican-Republic', 'native-country_ Ecuador', 'native-country_ El-Salvador', 'native-country_ England', 'native-country_ France', 'native-country_ Germany', 'native-country_ Greece', 'native-country_ Guatemala', 'native-country_ Haiti', 'native-country_ Holand-Netherlands', 'native-country_ Honduras', 'native-country_ Hong', 'native-country_ Hungary', 'native-country_ India', 'native-country_ Iran', 'native-country_ Ireland', 'native-country_ Italy', 'native-country_ Jamaica', 'native-country_ Japan', 'native-country_ Laos', 'native-country_ Mexico', 'native-country_ Nicaragua', 'native-country_ Outlying-US(Guam-USVI-etc)', 'native-country_ Peru', 'native-country_ Philippines', 'native-country_ Poland', 'native-country_ Portugal', 'native-country_ Puerto-Rico', 'native-country_ Scotland', 'native-country_ South', 'native-country_ Taiwan', 'native-country_ Thailand', 'native-country_ Trinadad&Tobago', 'native-country_ United-States', 'native-country_ Vietnam', 'native-country_ Yugoslavia']
delete_these += ['fnlwgt', 'education']
df.drop(delete_these, axis=1, inplace=True)
return BinaryLabelDataset(df = df, label_names = ['y'], protected_attribute_names = ['sex_ Male', 'race_ White'])
def preprocess_adult_data(seed = 0):
'''
Description: Ths code (1) standardizes the continuous features, (2) one hot encodes the categorical features, (3) splits into a train (80%) and test set (20%), (4) based on this data, create another copy where gender is deleted as a predictive feature and the feature we predict is gender (used by SenSR when learning the sensitive directions)
Input: seed: the seed used to split data into train/test
'''
# Get the dataset and split into train and test
dataset_orig = get_adult_data()
# we will standardize continous features
continous_features = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
continous_features_indices = [dataset_orig.feature_names.index(feat) for feat in continous_features]
# get a 80%/20% train/test split
dataset_orig_train, dataset_orig_test = dataset_orig.split([0.8], shuffle=True, seed = seed)
SS = StandardScaler().fit(dataset_orig_train.features[:, continous_features_indices])
dataset_orig_train.features[:, continous_features_indices] = SS.transform(dataset_orig_train.features[:, continous_features_indices])
dataset_orig_test.features[:, continous_features_indices] = SS.transform(dataset_orig_test.features[:, continous_features_indices])
X_train = dataset_orig_train.features
X_test = dataset_orig_test.features
y_train = dataset_orig_train.labels
y_test = dataset_orig_test.labels
X_val = X_train[:len(X_test)]
y_val = y_train[:len(X_test)]
X_train = X_train[len(X_test):]
y_train = y_train[len(X_test):]
# gender id = 39
A_train = X_train[:,39]
A_val = X_val[:,39]
A_test = X_test[:,39]
X_train, bin_edges = DataQuantize(X_train)
X_val, _ = DataQuantize(X_val, bin_edges)
X_test, _ = DataQuantize(X_test, bin_edges)
return X_train, X_val, X_test, y_train, y_val, y_test, A_train, A_val, A_test
|
py
|
1a58de39ff0572ec7087269e22f00e24811e703e
|
'''
@Author: hua
@Date: 2019-12-03 14:44:23
@description:
@LastEditors: hua
@LastEditTime: 2019-12-03 15:18:00
'''
from app.Models.Admin import Admin
from sqlalchemy import event
import time
@event.listens_for(Admin, "before_insert")
def admin_before_insert(mapper, connection, target):
target.add_time = int(time.time())
target.update_time = int(time.time())
@event.listens_for(Admin, "before_update")
def admin_before_update(mapper, connection, target):
target.update_time = int(time.time())
|
py
|
1a58df596b6b75074a5e09054809b26c82930f77
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from coredb.query_managers.manager import BaseQueryManager
from polyaxon.pql.builder import BoolCondition, SearchCondition, ValueCondition
from polyaxon.pql.parser import parse_search_operation, parse_value_operation
class ArtifactQueryManager(BaseQueryManager):
NAME = "artifact"
FIELDS_ORDERING = ("name", "kind", "path", "is_input")
FIELDS_USE_UUID = {"run"}
FIELDS_PROXY = {
"id": "name",
"name": "artifact__name",
"kind": "artifact__kind",
"path": "artifact__path",
"state": "artifact__state",
}
CHECK_ALIVE = False
PARSERS_BY_FIELD = {
# Name
"name": parse_search_operation,
# Kind
"kind": parse_value_operation,
# Path
"path": parse_value_operation,
# State
"state": parse_value_operation,
# Is input
"is_input": parse_value_operation,
# Run
"run": parse_value_operation,
}
CONDITIONS_BY_FIELD = {
# Name
"name": SearchCondition,
# Kind
"kind": ValueCondition,
# Path
"path": ValueCondition,
# State
"state": ValueCondition,
# Is input
"is_input": BoolCondition,
# Run
"run": ValueCondition,
}
|
py
|
1a58e50570154071bf520b07705a25b2918c3e69
|
# Generated by Django 3.1.7 on 2021-05-07 11:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0012_auto_20210506_0650'),
]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'ordering': ['-pk', 'expected_activation_date']},
),
]
|
py
|
1a58e6199f8f4d13f7753779d461b125279d95eb
|
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 188250.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
|
py
|
1a58e647168c3fabdd9e4c5cd827a1f09abe49fe
|
import unittest
from cnc.gcode import *
class TestGCode(unittest.TestCase):
def setUp(self):
self.default = Coordinates(-7, 8, 9, -10)
def tearDown(self):
pass
def test_constructor(self):
# GCode shouldn't be created with constructor, but since it uses
# internally, let's check it.
self.assertRaises(TypeError, GCode)
gc = GCode({"X": "1", "Y": "-2", "Z": "0", "E": 99, "G": "1"})
self.assertEqual(gc.coordinates(self.default, 1).x, 1.0)
self.assertEqual(gc.coordinates(self.default, 1).y, -2.0)
self.assertEqual(gc.coordinates(self.default, 1).z, 0.0)
self.assertEqual(gc.coordinates(self.default, 1).e, 99.0)
def test_has(self):
gc = GCode.parse_line("g1X2Y3z4E5F50")
self.assertTrue(gc.has("G"))
self.assertTrue(gc.has("X"))
self.assertTrue(gc.has("Y"))
self.assertTrue(gc.has("Z"))
self.assertTrue(gc.has("E"))
self.assertTrue(gc.has("F"))
def test_parser(self):
gc = GCode.parse_line("G1X2Y-3Z4E1.5")
self.assertEqual(gc.command(), "G1")
self.assertEqual(gc.coordinates(self.default, 1).x, 2.0)
self.assertEqual(gc.coordinates(self.default, 1).y, -3.0)
self.assertEqual(gc.coordinates(self.default, 1).z, 4.0)
self.assertEqual(gc.coordinates(self.default, 1).e, 1.5)
gc = GCode.parse_line("")
self.assertIsNone(gc)
def test_defaults(self):
# defaults are values which should be returned if corresponding
# value doesn't exist in gcode.
default = Coordinates(11, -12, 14, -10)
gc = GCode.parse_line("G1")
self.assertEqual(gc.coordinates(default, 1).x, 11.0)
self.assertEqual(gc.coordinates(default, 1).y, -12.0)
self.assertEqual(gc.coordinates(default, 1).z, 14.0)
self.assertEqual(gc.coordinates(default, 1).e, -10.0)
def test_commands(self):
gc = GCode({"G": "1"})
self.assertEqual(gc.command(), "G1")
gc = GCode.parse_line("M99")
self.assertEqual(gc.command(), "M99")
def test_case_sensitivity(self):
gc = GCode.parse_line("m111")
self.assertEqual(gc.command(), "M111")
gc = GCode.parse_line("g2X3y-4Z5e6")
self.assertEqual(gc.command(), "G2")
self.assertEqual(gc.coordinates(self.default, 1).x, 3.0)
self.assertEqual(gc.coordinates(self.default, 1).y, -4.0)
self.assertEqual(gc.coordinates(self.default, 1).z, 5.0)
self.assertEqual(gc.coordinates(self.default, 1).e, 6.0)
def test_has_coordinates(self):
gc = GCode.parse_line("X2Y-3Z4")
self.assertTrue(gc.has_coordinates())
gc = GCode.parse_line("G1")
self.assertFalse(gc.has_coordinates())
gc = GCode.parse_line("X1")
self.assertTrue(gc.has_coordinates())
gc = GCode.parse_line("Y1")
self.assertTrue(gc.has_coordinates())
gc = GCode.parse_line("Z1")
self.assertTrue(gc.has_coordinates())
gc = GCode.parse_line("E1")
self.assertTrue(gc.has_coordinates())
def test_radius(self):
gc = GCode.parse_line("G2I1J2K3")
self.assertEqual(gc.radius(self.default, 1).x, 1)
self.assertEqual(gc.radius(self.default, 1).y, 2)
self.assertEqual(gc.radius(self.default, 1).z, 3)
gc = GCode.parse_line("G3")
self.assertEqual(gc.radius(self.default, 1).x, self.default.x)
self.assertEqual(gc.radius(self.default, 1).y, self.default.y)
self.assertEqual(gc.radius(self.default, 1).z, self.default.z)
def test_multiply(self):
# getting coordinates could modify value be specified multiplier.
gc = GCode.parse_line("X2 Y-3 Z4 E5")
self.assertEqual(gc.coordinates(self.default, 25.4).x, 50.8)
self.assertEqual(gc.coordinates(self.default, 2).y, -6)
self.assertEqual(gc.coordinates(self.default, 0).y, 0)
self.assertEqual(gc.coordinates(self.default, 5).e, 25)
def test_whitespaces(self):
gc = GCode.parse_line("X1 Y2")
self.assertEqual(gc.coordinates(self.default, 1).x, 1.0)
self.assertEqual(gc.coordinates(self.default, 1).y, 2.0)
gc = GCode.parse_line("X 3 Y4")
self.assertEqual(gc.coordinates(self.default, 1).x, 3.0)
self.assertEqual(gc.coordinates(self.default, 1).y, 4.0)
gc = GCode.parse_line("X 5 Y\t 6")
self.assertEqual(gc.coordinates(self.default, 1).x, 5.0)
self.assertEqual(gc.coordinates(self.default, 1).y, 6.0)
gc = GCode.parse_line(" \tX\t\t \t\t7\t ")
self.assertEqual(gc.coordinates(self.default, 1).x, 7.0)
def test_errors(self):
self.assertRaises(GCodeException, GCode.parse_line, "X1X1")
self.assertRaises(GCodeException, GCode.parse_line, "X1+Y1")
self.assertRaises(GCodeException, GCode.parse_line, "X1-Y1")
self.assertRaises(GCodeException, GCode.parse_line, "~Y1")
self.assertRaises(GCodeException, GCode.parse_line, "Y")
self.assertRaises(GCodeException, GCode.parse_line, "abracadabra")
self.assertRaises(GCodeException, GCode.parse_line, "G1M1")
self.assertRaises(GCodeException, GCode.parse_line, "x 1 y 1 z 1 X 1")
def test_comments(self):
self.assertIsNone(GCode.parse_line("; some text"))
self.assertIsNone(GCode.parse_line(" \t \t ; some text"))
self.assertIsNone(GCode.parse_line("(another comment)"))
gc = GCode.parse_line("X2.5 ; end of line comment")
self.assertEqual(gc.coordinates(self.default, 1).x, 2.5)
gc = GCode.parse_line("X2 Y(inline comment)7")
self.assertEqual(gc.coordinates(self.default, 1).x, 2.0)
self.assertEqual(gc.coordinates(self.default, 1).y, 7.0)
gc = GCode.parse_line("X2 Y(inline comment)3 \t(one more comment) "
"\tz4 ; multi comment test")
self.assertEqual(gc.coordinates(self.default, 1).x, 2.0)
self.assertEqual(gc.coordinates(self.default, 1).y, 3.0)
self.assertEqual(gc.coordinates(self.default, 1).z, 4.0)
if __name__ == '__main__':
unittest.main()
|
py
|
1a58e68915f23662dede86518f3bcf785c75cbf4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-17 16:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0007_auto_20160317_1504'),
]
operations = [
migrations.RemoveField(
model_name='tracker',
name='L1',
),
migrations.AddField(
model_name='tracker',
name='endpt',
field=models.CharField(choices=[(b'0', b'Data goes to both Cavatica and cBioPortal'), (b'1', b'Data goes to cBioPortal only'), (b'2', b'Data goes to Cavatica only')], default=b'0', max_length=254),
),
migrations.AddField(
model_name='tracker',
name='level',
field=models.CharField(choices=[(b'0', b'L1 Data: FASTQ'), (b'1', b'L2 Data: VCF, BAM'), (b'2', b'L3 Data: Processed data')], default=b'0', max_length=230),
),
migrations.AlterField(
model_name='tracker',
name='group',
field=models.CharField(choices=[(b'0', b'PRIVATE: requester access only'), (b'1', b'CBTTC'), (b'2', b'SU2C'), (b'3', b'PNOC'), (b'9', b'PUBLIC')], default=b'0', help_text=b'PNOC, CBTTC, SU2C, PUBLIC', max_length=245),
),
]
|
py
|
1a58e6c679a9d74fa851348409d44eeec8add291
|
#! /usr/bin/env python3
from nexus import settings,job,run_project,obj
from nexus import generate_physical_system
from nexus import generate_pyscf
settings(
results = '',
sleep = 3,
machine = 'ws16',
)
system = generate_physical_system(
units = 'A',
axes = '''1.785 1.785 0.000
0.000 1.785 1.785
1.785 0.000 1.785''',
elem_pos = '''
C 0.0000 0.0000 0.0000
C 0.8925 0.8925 0.8925
''',
kgrid = (1,1,1),
kshift = (0,0,0),
C = 4,
)
scf = generate_pyscf(
identifier = 'scf', # log output goes to scf.out
path = 'diamond_pp_dft_gamma', # directory to run in
job = job(serial=True,threads=16),# pyscf must run w/o mpi
template = './dft_template.py', # pyscf template file
system = system,
cell = obj( # used to make Cell() inputs
basis = 'bfd-vdz',
ecp = 'bfd',
drop_exponent = 0.1,
verbose = 5,
),
)
run_project()
|
py
|
1a58e7860ba9469db535a1ed97a1f857efe2fae3
|
import dsd, os
path = 'ADItotal\\'
lista = os.listdir(path)
dsd.limpar_arquivo('ADItotal(sem_andamentos).txt')
dsd.limpar_arquivo('ADItotal(andamentos).txt')
dsd.limpar_arquivo('excluidos.txt')
partes_total = []
dados_csv = []
andamentos_csv = []
lista_excluidos = []
dsd.limpar_arquivo('ADItotalpartes.txt')
dsd.write_csv_header('ADItotalpartes.txt', 'nome, tipo, processo')
contador=0
excluidos = 0
for item in lista[0:]:
gravar_processo = True
contador = contador +1
nome_arquivo = path+item
processo = item.replace('.txt','')
# carrega dados do arquivo
html = 'NA'
html = dsd.carregar_arquivo(nome_arquivo)
html = html.replace(',',';')
html = html.replace('\n','')
html = html.replace(' ',' ')
# extrai as partes
partes_string = dsd.extrair(html,'partes>>>>', '<div id="partes-resumidas">')
partes = dsd.extrair_partes(partes_string)
lista_das_partes = []
lista_das_partes = dsd.listar_partes(partes_string, item.replace('.txt',''))
for y in lista_das_partes:
dsd.write_csv_line('ADItotalpartes.txt', y)
# extrai os andamentos
andamentos = dsd.extrair(html,'andamentos>>>>', 'pauta>>>>')
andamentos = dsd.extrair_andamentos(andamentos)
#extrai os elementos do código fonte
codigofonte =dsd.extrair(html,'fonte>>>>', 'partes>>>>')
eletronico_fisico =dsd.extrair(codigofonte,'bg-primary">','</span>')
sigilo =dsd.extrair(codigofonte,'bg-success">','</span>')
nome_processo =dsd.extrair(codigofonte,'-processo" value="','">')
numerounico = dsd.extrair(codigofonte,'-rotulo">','</div>')
numerounico = dsd.extrair(numerounico,': ', '')
relator = dsd.extrair(codigofonte,'Relator:','</div>')
relator = relator.strip(' ')
relator = relator.replace('MIN. ','')
relator = dsd.remover_acentos(relator)
redator_acordao = dsd.extrair(codigofonte,'>Redator do acórdão:','</div>')
redator_acordao = dsd.remover_acentos(redator_acordao)
redator_acordao = redator_acordao.replace('MIN. ','')
redator_acordao = redator_acordao.strip(' ')
redator_acordao = redator_acordao.replace ('MINISTRO ','')
relator_ultimo_incidente = dsd.extrair(codigofonte,
'Relator do último incidente:'
,'</div>')
relator_ultimo_incidente = relator_ultimo_incidente.replace ('MIN. ','')
relator_ultimo_incidente = relator_ultimo_incidente.replace ('MINISTRO ','')
relator_ultimo_incidente = relator_ultimo_incidente.strip(' ')
relator_ultimo_incidente = dsd.remover_acentos(relator_ultimo_incidente)
ultimoincidente = dsd.extrair(relator_ultimo_incidente,"(",'')
relator_ultimo_incidente = dsd.extrair(relator_ultimo_incidente,'','(')
ultimoincidente = ultimoincidente.replace(')','')
ultimoincidente = ultimoincidente.strip(' ')
#extrai os elementos da aba informações
informacoes = dsd.extrair(html,'informacoes>>>>', '>>>>')
assuntos = dsd.extrair(informacoes, '<ul style="list-style:none;">', '</ul>')
assuntos = dsd.limpar(assuntos)
assuntos = dsd.extrair(assuntos,'<li>','')
assuntos = assuntos.replace('</li>','')
assuntos = dsd.limpar(assuntos)
protocolo_data = dsd.extrair(informacoes, '<div class="col-md-5 processo-detalhes-bold m-l-0">', '</div>')
protocolo_data = protocolo_data.strip(' ')
orgaodeorigem = dsd.extrair(informacoes, '''Órgão de Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
numerodeorigem = dsd.extrair(informacoes, '''Número de Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
origem = dsd.extrair(informacoes, '''Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
procedencia = dsd.extrair(informacoes, '''<span id="descricao-procedencia">''', '</span>')
procedencia = procedencia.replace(' ','')
procedencia = dsd.extrair(procedencia, '', ' -')
cc = 'NA'
# extrai campos CC
if 'ADI' in nome_processo or 'ADPF' in nome_processo or 'ADC' in nome_processo or 'ADO' in nome_processo:
cc = dsd.extrair(html, 'cc>>>','')
# extrai campo incidente
incidentecc = dsd.extrair (cc,
'verProcessoAndamento.asp?incidente=',
'">')
# extrai campos classe + liminar + numero
cln = 'NA'
cln = dsd.extrair(cc,
'<div><h3><strong>',
'</strong>')
dsd.limpar_cln(cln)
cln = cln.upper()
# extrai numero
numerocc = 'NA'
numerocc = dsd.extrair (cln, ' - ', '')
numerocc = dsd.limpar_numero(numerocc)
# extrai liminar e classe
if 'LIMINAR' in cln:
liminarcc = 'sim'
classecc = dsd.extrair(cln, '', ' (MED')
else:
liminarcc = 'não'
classecc = dsd.extrair(cln, '', ' - ')
dsd.limpar_classe(classecc)
classecc.upper()
classecc = classecc.replace('ACAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
classecc = classecc.replace('AÇÃO DIRETA DE INCONSTITUCIONALIDADE','ADI')
classecc = classecc.replace('ARGUIÇÃO DE DESCUMPRIMENTO DE PRECEITO FUNDAMENTAL','ADPF')
# definição de campo: origem
origemcc = 'NA'
origemcc = dsd.extrair(cc,'Origem:</td><td><strong>','</strong>')
procedencia = procedencia.replace('***', dsd.limpa_estado(origemcc).replace('/', ''))
## definição de campo: entrada
entradacc = dsd.extrair(cc,'Entrada no STF:</td><td><strong>','</strong>')
entradacc = dsd.substituir_data(entradacc)
## definição de campo: relator
relatorcc = dsd.extrair(cc,'Relator:</td><td><strong>','</strong>')
relatorcc = relatorcc.replace('MINISTRO ','')
relatorcc = relatorcc.replace('MINISTRA ','')
relatorcc = dsd.remover_acentos(relatorcc)
## definição de campo: distribuição
distribuicaocc = dsd.extrair(cc,'Distribuído:</td><td><strong>','</strong>')
distribuicaocc = dsd.substituir_data(distribuicaocc)
distribuicaocc = distribuicaocc.replace('-','/')
## definição de campo: requerente
requerentecc = dsd.extrair(cc,'Requerente: <strong>','</strong>')
requerentecc = requerentecc.replace(' ',' ')
requerentecc = requerentecc.replace(' ;',';')
requerentecc = requerentecc.replace('; ',';')
requerentecc = requerentecc.replace('( CF','(CF')
if '(CF' in requerentecc:
requerentesplit = requerentecc.split('(CF')
requerentecc = requerentesplit[0]
requerentecc = requerentecc.strip()
requerentetipo = requerentesplit[1]
requerentetipo = dsd.extrair(requerentetipo, ';','')
requerentetipo = requerentetipo.replace(')','')
requerentetipocc = requerentetipo.replace('0','')
requerentetipocc = requerentetipocc.replace(' 2','')
else:
requerentesplit = 'NA'
requerentetipocc = 'NA'
## definição de campo: requerido
requeridocc = dsd.extrair(cc,
'Requerido :<strong>',
'</strong>')
## definição de campo: dispositivo questionado
dispositivoquestionadocc = dsd.extrair(cc,
'Dispositivo Legal Questionado</b></strong><br /><pre>',
'</pre>')
dispositivoquestionadocc = dsd.limpar(dispositivoquestionadocc)
## definição de campo: resultado da liminar
resultadoliminarcc = dsd.extrair(cc,
'Resultado da Liminar</b></strong><br /><br />',
'<br />')
### filtro resultado liminar
# filtros
resultadoliminarcc = resultadoliminarcc.replace('Aguardadno','Aguardadno')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática - "Ad referendum"','Deferida')
resultadoliminarcc = resultadoliminarcc.replace('Monicrática','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Monoacrática','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Monocrático','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática Deferida -','Deferida')
resultadoliminarcc = resultadoliminarcc.replace('"','')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática - ','')
resultadoliminarcc = resultadoliminarcc.replace('liminar deferida','Deferida')
resultadoliminarcc = resultadoliminarcc.upper()
resultadoliminarcc = resultadoliminarcc.replace('PREJUDICADO','PREJUDICADA')
resultadoliminarcc = resultadoliminarcc.replace('PROCEDENTE','DEFERIDA')
resultadoliminarcc = resultadoliminarcc.replace('AD REFERENDUM','')
resultadoliminarcc = resultadoliminarcc.replace('PROCEDENTE','DEFERIDA')
## definição de campo: resultado final
resultadofinalcc = dsd.extrair(cc,
'Resultado Final</b></strong><br /><br />',
'<br />')
## definição de campo: decisão monocrática final
if 'Decisão Monocrática Final</b></strong><br /><pre>' in cc:
decisaomonofinal = dsd.extrair(cc,
'Decisão Monocrática Final</b></strong><br /><pre>',
'</pre>')
decisaomonofinalcc = dsd.limpar(decisaomonofinal)
else:
decisaomonofinalcc = 'NA'
## definição de campo: fundamento
if 'Fundamentação Constitucional</b></strong><br /><pre>' in cc:
fundamentocc = dsd.extrair(cc,
'Fundamentação Constitucional</b></strong><br /><pre>',
'</pre>')
fundamentocc = dsd.limpar(fundamentocc)
else:
fundamentocc = 'NA'
## definição de campo: indexação
if 'Indexação</b></strong><br /><pre>' in cc:
indexacaocc = dsd.extrair(cc,
'Indexação</b></strong><br /><pre>',
'</pre>')
indexacaocc = dsd.limpar(indexacaocc)
else:
indexacaocc = 'NA'
else:
gravar_processo = False
# criação da variável dados extraídos, com uma lista de dados
dados = [processo, incidentecc, requerentecc,
requerentetipocc, requeridocc, len(lista_das_partes), lista_das_partes ,len(andamentos),
andamentos[:9], eletronico_fisico, sigilo,
numerounico, relatorcc, relator, redator_acordao, ultimoincidente,
relator_ultimo_incidente, assuntos, procedencia, protocolo_data,
distribuicaocc, orgaodeorigem,
numerodeorigem, origem,
liminarcc, dispositivoquestionadocc, resultadoliminarcc, resultadofinalcc,
decisaomonofinalcc, fundamentocc, indexacaocc]
#inserir aqui o conteúdo da lista acima, trocando [] por ''
campos = '''processo, incidentecc, requerentecc,
requerentetipocc, requeridocc, len(partes),partes,len(andamentos),
andamentos[:9], eletronico_fisico, sigilo,
numerounico, relatorcc, relator, redator_acordao, ultimoincidente,
relator_ultimo_incidente, assuntos, procedencia, protocolo_data,
distribuicaocc, orgaodeorigem,
numerodeorigem, origem,
liminarcc, dispositivoquestionadocc, resultadoliminarcc, resultadofinalcc,
decisaomonofinalcc, fundamentocc, indexacaocc'''
campos = campos.replace('\n','')
campos = campos.replace(' ','')
dados2 = [processo, len(andamentos), len(str(andamentos)), andamentos]
campos2 = 'processo, len(andamentos), len(str(andamentos)), andamentos'
dsd.write_csv_header('ADItotal(sem_andamentos).txt',campos)
dsd.write_csv_header('excluidos.txt','processos excluídos')
dsd.write_csv_header('ADItotal(andamentos).txt',campos2)
# grava de 500 em 500
if andamentos == []:
andamentos = ['SEM ANDAMENTOS CADASTRADOS']
if (gravar_processo == False or
nome_processo == 'NA' or
len(lista_das_partes) == 0 or
'IMPOSSIBILIDADE DE PROCESSAMENTO' in andamentos[0] or
'REAUTUADO' in andamentos[0] or
'CANCELAMENTO DE AUTUACAO' in andamentos[0]):
lista_excluidos.append(processo)
excluidos = excluidos + 1
else:
dados_csv.append(dados)
andamentos_csv.append(dados2)
print(nome_processo)
dsd.write_csv_lines('ADItotal(sem_andamentos).txt',dados_csv)
dsd.write_csv_lines('ADItotal(andamentos).txt',andamentos_csv)
dsd.write_csv_lines('excluidos.txt',lista_excluidos)
print ('Gravados arquivos ADItotal(sem_andamentos).txt e ADItotal(andamentos).txt')
print (f'Excluídos {excluidos} processos')
|
py
|
1a58e97f71312d4b807a81395c788d73388308b6
|
import pytest
from Cryptodome.PublicKey import RSA
from django.urls import reverse
from oidc_provider.models import RESPONSE_TYPE_CHOICES, RSAKey, UserConsent
from oidc_apis.factories import ApiFactory, ApiScopeFactory
from users.factories import OIDCClientFactory, UserFactory
from users.views import TunnistamoOidcAuthorizeView
@pytest.mark.parametrize('with_trailing_slash', (True, False))
@pytest.mark.django_db
def test_tunnistamo_authorize_view_is_used(client, with_trailing_slash):
response = client.get('/openid/authorize{}'.format('/' if with_trailing_slash else ''))
assert response.resolver_match.func.__name__ == TunnistamoOidcAuthorizeView.as_view().__name__
@pytest.mark.parametrize('ui_locales, expected_text', (
(None, 'Sähköposti'),
('', 'Sähköposti'),
('bogus', 'Sähköposti'),
('en', 'Email'),
('fi en', 'Sähköposti'),
('bogus en fi', 'Email'),
))
@pytest.mark.django_db
def test_tunnistamo_authorize_view_language(client, ui_locales, expected_text):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
url = reverse('authorize')
data = {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'response_type': 'code',
'scope': 'email',
}
if ui_locales is not None:
data['ui_locales'] = ui_locales
response = client.get(url, data)
assert expected_text in response.content.decode('utf-8')
@pytest.mark.django_db
def test_api_scopes_are_shown_in_and_returned_from_consent_screen(client):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
api = ApiFactory(required_scopes=['github_username'])
api_scope = ApiScopeFactory(api=api)
response = client.get(reverse('authorize'), {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': api_scope.identifier,
'response_type': 'code',
})
assert response.status_code == 200
content = response.content.decode('utf-8')
expected_scope = '{} github_username'.format(api_scope.identifier)
assert '<input name="scope" type="hidden" value="{}" />'.format(expected_scope) in content
assert api_scope.name in content
assert api_scope.description in content
@pytest.mark.parametrize('api_scope_in_request', (False, True))
@pytest.mark.django_db
def test_api_scopes_are_added_to_user_consent_after_authorization(client, api_scope_in_request):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
api = ApiFactory(required_scopes=['github_username'])
api_scope = ApiScopeFactory(api=api)
response = client.post(reverse('authorize'), {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': '{} github_username'.format(api_scope.identifier) if api_scope_in_request else api_scope.identifier,
'response_type': 'code',
'allow': True,
})
assert response.status_code == 302
user_consent = UserConsent.objects.get(user=user, client=oidc_client)
assert 'github_username' in user_consent.scope
@pytest.mark.parametrize('create_client', (False, True))
@pytest.mark.django_db
def test_original_client_id_is_saved_to_the_session(
client,
loginmethod_factory,
oidcclient_factory,
create_client,
):
"""Test that the original client id is saved to the session
This is an implementation detail test, but we don't have a better way to test
this right now. Proper testing would need end-to-end tests with e.g. Selenium."""
oidc_client = None
if create_client:
oidc_client = oidcclient_factory(
client_id="test_client",
redirect_uris=['https://tunnistamo.test/redirect_uri'],
response_types=["id_token"]
)
url = reverse('authorize')
data = {
'client_id': 'test_client',
'response_type': 'id_token',
'redirect_uri': 'https://tunnistamo.test/redirect_uri',
'scope': 'openid',
'response_mode': 'form_post',
'nonce': 'abcdefg'
}
client.get(url, data)
if oidc_client:
session_client_id = client.session.get("oidc_authorize_original_client_id")
assert session_client_id == oidc_client.client_id
else:
assert "oidc_authorize_original_client_id" not in client.session
@pytest.mark.django_db
@pytest.mark.parametrize('with_pkce', (True, False))
@pytest.mark.parametrize('response_type', [key for key, val in RESPONSE_TYPE_CHOICES])
def test_public_clients_ability_to_skip_consent(
client,
user,
oidcclient_factory,
with_pkce,
response_type,
):
key = RSA.generate(1024)
rsakey = RSAKey(key=key.exportKey('PEM').decode('utf8'))
rsakey.save()
oidc_client = oidcclient_factory(
client_type='public',
require_consent=False,
response_types=[key for key, val in RESPONSE_TYPE_CHOICES],
redirect_uris=['https://example.com/callback'],
)
client.force_login(user)
url = reverse('authorize')
data = {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': 'openid profile',
'response_type': response_type,
'nonce': 'testnonce',
}
if with_pkce:
data.update({
# The code challenge value doesn't matter as only its existence is checked
# in the authorize endpoint. The value would be verified in the token endpoint.
'code_challenge': 'abcdefg',
'code_challenge_method': 'S256'
})
response = client.get(url, data)
# Consent skip should happen when using implicit flow, or code flow with pkce.
should_redirect_to_client_map = {
('code', True): True,
('code', False): False,
('id_token', True): True,
('id_token', False): True,
('id_token token', True): True,
('id_token token', False): True,
('code token', True): True,
('code token', False): False,
('code id_token', True): True,
('code id_token', False): False,
('code id_token token', True): True,
('code id_token token', False): False,
}
if should_redirect_to_client_map[(response_type, with_pkce)]:
assert response.status_code == 302
assert response['Location'].startswith(oidc_client.redirect_uris[0])
assert 'error' not in response['Location']
else:
assert response.status_code == 200
assert 'name="allow" type="submit"' in response.content.decode('utf-8')
|
py
|
1a58e9cd302698322742d260b397d2fbee2e8755
|
import logging
import traceback
import peewee
from flask import request
from east.exceptions import *
from app import app, db
print('TESTING')
class DummyLogger:
def log(self, *args):
pass
def error(self, *args):
pass
logger = DummyLogger()
# logger = logging.getLogger(__name__)
@app.errorhandler(BaseAPIException)
def handle_api_errors(e):
logger.error('API Exception <%s>:: %s', e.name, e.description)
db.rollback()
return e.make_response()
@app.errorhandler(peewee.DoesNotExist)
def handle_peewee_doesnotexist(e):
logger.error('DoesNotExist: %s' % e)
db.rollback()
return DoesNotExistError(str(e)).make_response()
@app.errorhandler(404)
def handle_404_error(e):
logger.error(str(e))
return APIRouteDoesNotExist().make_response()
@app.errorhandler(405)
def handle_405_error(e):
logger.error(str(e))
return APIMethodNotAllowed('Requested route does not support this method [%s].' % request.method).make_response()
@app.errorhandler(Exception)
def handle_generic_exception(e):
logger.error('Generic <%s>:: %s', e.__class__.__name__, e)
logger.error(traceback.format_exc())
db.rollback()
return BaseAPIException(e.__class__.__name__, str(e)).make_response()
|
py
|
1a58eac1550cf7acc5fdde8f70d2b24a7231037b
|
import numpy as np
from pyscf import gto, scf
from kspies import wy
mol = gto.M(atom = 'N 0 0 0 ; N 1.1 0 0',
basis = 'cc-pVDZ')
mf = scf.RHF(mol).run()
dm_tar = mf.make_rdm1()
PBS = gto.expand_etbs([(0, 13, 2**-4 , 2),
(1, 3 , 2**-2 , 2)])
mw = wy.RWY(mol, dm_tar, pbas=PBS)
#Note that for this designed-to-be ill-conditioned problem,
#Hessian-based optimization algorithms are problematic.
mw.method = 'bfgs'
mw.tol = 2e-7
mw.run()
mw.info()
Ws_fin = mw.Ws
etas = [ 2.**(-a) for a in np.linspace(5., 27., 45) ]
v = np.zeros(len(etas))
W = np.zeros(len(etas))
for i, eta in enumerate(etas):
mw.reg=eta
mw.run()
v[i] = mw.Dvb()
W[i] = mw.Ws
mw.info()
import matplotlib.pyplot as plt
fig,ax = plt.subplots(2)
ax[0].scatter(np.log10(Ws_fin-W), np.log10(v))
ax[1].scatter(np.log10(etas), v*etas/(Ws_fin-W))
plt.tight_layout()
#plt.savefig('L_curves.pdf', format='pdf')
#plt.savefig('L_curves.eps', format='eps')
plt.show()
|
py
|
1a58eac85481796e2e7c1eb243a563fc59964441
|
from flask import Flask, render_template
app = Flask(__name__, template_folder='', static_folder='')
@app.route('/')
def result():
return render_template('compare_ops.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
|
py
|
1a58ec2d9494e45c8993f65ee066a76990c2cca9
|
#!/usr/bin/env python
# Construct a command that will create a texture, appending console
# output to the file "out.txt".
def omaketx_command (infile, outfile, extraargs="",
options="", output_cmd="-otex",
showinfo=True, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("oiiotool")
+ " " + make_relpath(infile,tmpdir)
+ " " + extraargs
+ " " + output_cmd + options + " " + make_relpath(outfile,tmpdir) )
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# location of oiio-images directory
oiio_images = OIIO_TESTSUITE_IMAGEDIR
# Just for simplicity, make a checkerboard with a solid alpha
command += oiiotool (" --pattern checker 128x128 4 --ch R,G,B,=1.0"
+ " -d uint8 -o " + make_relpath("checker.tif") )
# Basic test - recreate the grid texture
command += omaketx_command (oiio_images + "/grid.tif", "grid.tx")
# Test --resize (to power of 2) with the grid, which is 1000x1000
command += omaketx_command (oiio_images + "/grid.tif", "grid-resize.tx",
options=":resize=1")
# Test -d to set output data type
command += omaketx_command ("checker.tif", "checker-uint16.tx",
"-d uint16")
# Test --ch to restrict the number of channels
command += omaketx_command ("checker.tif", "checker-1chan.tx",
"--ch 0")
# Test --tiles to set a non-default tile size
command += omaketx_command ("checker.tif", "checker-16x32tile.tx",
"--tile 16 32")
# Test --separate and --compression
command += omaketx_command ("checker.tif", "checker-seplzw.tx",
"--planarconfig separate --compression lzw")
# Test --wrap
command += omaketx_command ("checker.tif", "checker-clamp.tx",
options=":wrap=clamp")
# Test --swrap and --twrap
command += omaketx_command ("checker.tif", "checker-permir.tx",
options=":swrap=periodic:twrap=mirror")
# Test --nomipmap
command += omaketx_command ("checker.tif", "checker-nomip.tx",
options=":nomipmap=1")
# Test setting matrices
command += omaketx_command ("checker.tif", "checker-camera.tx",
"--attrib:type=matrix worldtocamera 1,0,0,0,0,2,0,0,0,0,1,0,0,0,0,1 " +
"--attrib:type=matrix worldtoscreen 3,0,0,0,0,3,0,0,0,0,3,0,1,2,3,1")
# Test --opaque-detect (should drop the alpha channel)
command += omaketx_command ("checker.tif", "checker-opaque.tx",
options=":opaque_detect=1")
# Test --monochrome-detect (first create a monochrome image)
command += oiiotool (" --pattern constant:color=.25,.25,.25 256x256 3 "
+ " -d uint8 -o " + make_relpath("gray.tif"))
command += omaketx_command ("gray.tif", "gray-mono.tx",
options=":monochrome_detect=1")
# Test --monochrome-detect on something that is NOT monochrome
command += oiiotool (" --pattern constant:color=.25,.2,.15 256x256 3 "
+ " -d uint8 -o " + make_relpath("pink.tif"))
command += omaketx_command ("pink.tif", "pink-mono.tx",
options=":monochrome_detect=1")
# Test --prman : should save 'separate' planarconfig, and funny 64x32 tiles
# since we are specifying 16 bits, and it should save as 'int16' even though
# we asked for unsigned.
command += omaketx_command ("checker.tif", "checker-prman.tx",
"-d uint16", options=":prman=1")
# Test --fixnan : take advantage of the bad.exr images in
# testsuite/oiiotool-fixnan. (Use --nomipmap to cut down on stats output)
# FIXME: would also like to test --checknan, but the problem with that is
# that is actually FAILS if there's a nan.
command += omaketx_command (OIIO_TESTSUITE_ROOT+"/oiiotool-fixnan/src/bad.exr", "nan.exr",
"--fixnan box3", options=":nomipmap=1",
showinfo=True, showinfo_extra="--stats")
# Test that when outputting half textures, we clamp large float values
# rather than inadvertetly turning into Inf in the process of output to
# half.
command += oiiotool (" --pattern constant:color=1.0e6,1.0e6,1.0e6 2x2 3 -d float -o million.tif")
command += omaketx_command ("million.tif", "bigval.exr",
"-d half", showinfo_extra="--stats")
# Test --format to force exr even though it can't be deduced from the name.
command += omaketx_command ("checker.tif", "checker-exr.pdq",
options=":fileformatname=exr")
# Test that the oiio:SHA-1 hash is stable, and that that changing filter and
# using -hicomp result in different images and different hashes.
command += omaketx_command (oiio_images + "/grid.tif", "grid-lanczos3.tx",
options = ":filter=lanczos3", showinfo=False)
command += omaketx_command (oiio_images + "/grid.tif", "grid-lanczos3-hicomp.tx",
options = ":filter=lanczos3:highlightcomp=1", showinfo=False)
command += info_command ("grid.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3-hicomp.tx",
extraargs="--metamatch oiio:SHA-1")
# Test that we cleanly replace any existing SHA-1 hash and ConstantColor
# hint in the ImageDescription of the input file.
command += oiiotool (" --pattern constant:color=1,0,0 64x64 3 "
+ " --caption \"foo SHA-1=1234abcd ConstantColor=[0.0,0,-0.0] bar\""
+ " -d uint8 -o " + make_relpath("small.tif") )
command += info_command ("small.tif", safematch=1);
command += omaketx_command ("small.tif", "small.tx",
options=":oiio=1:constant_color_detect=1")
# Regression test -- at one point, we had a bug where we were botching
# the poles of OpenEXR env maps, adding energy. Check it by creating an
# all-white image, turning it into an env map, and calculating its
# statistics (should be 1.0 everywhere).
command += oiiotool (" --pattern constant:color=1,1,1 4x2 3 "
+ " -d half -o " + make_relpath("white.exr"))
command += omaketx_command ("white.exr", "whiteenv.exr",
output_cmd="-oenv", showinfo=False)
command += oiiotool ("--stats -a whiteenv.exr")
command += oiiotool (" --pattern noise 64x64 1"
+ " -d half -o " + make_relpath("bump.exr"))
command += omaketx_command ("bump.exr", "bumpslope.exr",
extraargs="-d half",
output_cmd="-obump", showinfo=False)
command += oiiotool ("--stats -a bumpslope.exr")
outputs = [ "out.txt" ]
# To do: --filter --checknan --fullpixels
# --prman-metadata --ignore-unassoc
# --mipimage
# --envlatl TIFF, --envlatl EXR
# --colorconvert --unpremult -u --fovcot
|
py
|
1a58ec6d7209b57df579bd9557b324cbbdb65227
|
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="implicit_lambda",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="0.4.0",
description="Implicit lambdas with placeholder notation and code generation",
# Fix windows newlines.
long_description=long_description.replace("\r\n", "\n"),
# The project's main homepage.
url="https://github.com/blackhc/implicit_lambda",
# Author details
author="Andreas @blackhc Kirsch",
author_email="[email protected]",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
# What does your project relate to?
keywords="tools lambda placeholder",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["implicit_lambda", "implicit_lambda.details", "implicit_lambda.tests"],
package_dir={"": "src"},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
"dev": ["check-manifest"],
"test": ["coverage", "codecov", "pytest", "pytest-benchmark", "pytest-cov", "hypothesis"],
},
setup_requires=["pytest-runner"],
)
|
py
|
1a58eca0eaecbada72303cbdee5e3ca4e7154ab9
|
from cereal import car
from common.numpy_fast import mean
from selfdrive.config import Conversions as CV
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.gm.values import DBC, CAR, AccState, CanBus, \
CruiseButtons, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
self.shifter_values = can_define.dv["ECMPRDNL"]["PRNDL"]
def update(self, pt_cp):
ret = car.CarState.new_message()
self.prev_cruise_buttons = self.cruise_buttons
self.cruise_buttons = pt_cp.vl["ASCMSteeringButton"]['ACCButtons']
ret.wheelSpeeds.fl = pt_cp.vl["EBCMWheelSpdFront"]['FLWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = pt_cp.vl["EBCMWheelSpdFront"]['FRWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = pt_cp.vl["EBCMWheelSpdRear"]['RLWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = pt_cp.vl["EBCMWheelSpdRear"]['RRWheelSpd'] * CV.KPH_TO_MS
ret.vEgoRaw = mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr])
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.01
ret.steeringAngle = pt_cp.vl["PSCMSteeringAngle"]['SteeringWheelAngle']
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["ECMPRDNL"]['PRNDL'], None))
ret.brake = pt_cp.vl["EBCMBrakePedalPosition"]['BrakePedalPosition'] / 0xd0
# Brake pedal's potentiometer returns near-zero reading even when pedal is not pressed.
if ret.brake < 10/0xd0:
ret.brake = 0.
ret.gas = pt_cp.vl["AcceleratorPedal"]['AcceleratorPedal'] / 254.
ret.gasPressed = ret.gas > 1e-5
ret.steeringTorque = pt_cp.vl["PSCMStatus"]['LKADriverAppldTrq']
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
# 1 - open, 0 - closed
ret.doorOpen = (pt_cp.vl["BCMDoorBeltStatus"]['FrontLeftDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['FrontRightDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['RearLeftDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['RearRightDoor'] == 1)
# 1 - latched
ret.seatbeltUnlatched = pt_cp.vl["BCMDoorBeltStatus"]['LeftSeatBelt'] == 0
ret.leftBlinker = pt_cp.vl["BCMTurnSignals"]['TurnSignals'] == 1
ret.rightBlinker = pt_cp.vl["BCMTurnSignals"]['TurnSignals'] == 2
self.park_brake = pt_cp.vl["EPBStatus"]['EPBClosed']
ret.cruiseState.available = bool(pt_cp.vl["ECMEngineStatus"]['CruiseMainOn'])
ret.espDisabled = pt_cp.vl["ESPStatus"]['TractionControlOn'] != 1
self.pcm_acc_status = pt_cp.vl["AcceleratorPedal2"]['CruiseState']
ret.brakePressed = ret.brake > 1e-5
# Regen braking is braking
if self.car_fingerprint == CAR.VOLT:
ret.brakePressed = ret.brakePressed or bool(pt_cp.vl["EBCMRegenPaddle"]['RegenPaddle'])
ret.cruiseState.enabled = self.pcm_acc_status != AccState.OFF
# ret.cruiseState.standstill = self.pcm_acc_status == AccState.STANDSTILL
ret.cruiseState.standstill = False # Never be in standstill (for auto-resume to work)
# 0 - inactive, 1 - active, 2 - temporary limited, 3 - failed
self.lkas_status = pt_cp.vl["PSCMStatus"]['LKATorqueDeliveredStatus']
ret.steerWarning = self.lkas_status not in [0, 1]
return ret
@staticmethod
def get_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("BrakePedalPosition", "EBCMBrakePedalPosition", 0),
("FrontLeftDoor", "BCMDoorBeltStatus", 0),
("FrontRightDoor", "BCMDoorBeltStatus", 0),
("RearLeftDoor", "BCMDoorBeltStatus", 0),
("RearRightDoor", "BCMDoorBeltStatus", 0),
("LeftSeatBelt", "BCMDoorBeltStatus", 0),
("RightSeatBelt", "BCMDoorBeltStatus", 0),
("TurnSignals", "BCMTurnSignals", 0),
("AcceleratorPedal", "AcceleratorPedal", 0),
("CruiseState", "AcceleratorPedal2", 0),
("ACCButtons", "ASCMSteeringButton", CruiseButtons.UNPRESS),
("SteeringWheelAngle", "PSCMSteeringAngle", 0),
("FLWheelSpd", "EBCMWheelSpdFront", 0),
("FRWheelSpd", "EBCMWheelSpdFront", 0),
("RLWheelSpd", "EBCMWheelSpdRear", 0),
("RRWheelSpd", "EBCMWheelSpdRear", 0),
("PRNDL", "ECMPRDNL", 0),
("LKADriverAppldTrq", "PSCMStatus", 0),
("LKATorqueDeliveredStatus", "PSCMStatus", 0),
("TractionControlOn", "ESPStatus", 0),
("EPBClosed", "EPBStatus", 0),
("CruiseMainOn", "ECMEngineStatus", 0),
]
if CP.carFingerprint == CAR.VOLT:
signals += [
("RegenPaddle", "EBCMRegenPaddle", 0),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, [], CanBus.POWERTRAIN)
|
py
|
1a58ede6a34862736621ed9a801caf7df16b78b7
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis19.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43813504, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'label_position': 'high'})
chart.set_y_axis({'label_position': 'low'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
py
|
1a58ee430979cd89a00fe4735947807feb6391dd
|
import os
import unittest
from shutil import rmtree
import numpy as np
class TestSkeletonIo(unittest.TestCase):
shape = 128
n_nodes = 100
tmp_folder = './tmp'
def setUp(self):
os.makedirs(self.tmp_folder, exist_ok=True)
def tearDown(self):
try:
rmtree(self.tmp_folder)
except OSError:
pass
def _get_skel(self):
coords = np.random.randint(0, self.shape, size=(self.n_nodes, 3))
edges = np.random.randint(0, self.n_nodes, size=(self.n_nodes, 2))
return coords, edges
def test_swc(self):
from elf.skeleton.io import read_swc, write_swc
n_skels = 5
for skel_id in range(n_skels):
path = os.path.join(self.tmp_folder, f'{skel_id}.swc')
coords, edges = self._get_skel()
write_swc(path, coords, edges)
_, coords_read, parents_read, = read_swc(path)
self.assertTrue(np.array_equal(coords, coords_read))
self.assertEqual(len(parents_read), len(coords_read))
# checking for edges is a bit more complicated ...
# self.assertTrue(np.array_equal(edges, edges_read))
def test_nml(self):
from elf.skeleton.io import read_nml, write_nml
if __name__ == '__main__':
unittest.main()
|
py
|
1a58ee9df19cfb313f22364670558fe0b4d38f6e
|
from exception_wrappers.libraries.playhouse.apsw_ext import *
def migrate(migrator, database):
# Account
migrator.add_column('account', 'deleted', BooleanField(default=False))
#
# Schema specification (for migration verification)
#
SPEC = {
'account': {
'id': 'INTEGER PRIMARY KEY NOT NULL',
'name': 'VARCHAR(255)',
'thumb': 'TEXT',
'deleted': 'SMALLINT NOT NULL',
'refreshed_at': 'DATETIME'
},
}
|
py
|
1a58f06cdb19696e53c09f2916427d72457bde60
|
import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerBasketballEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.3
goal_low = (-0.1, 0.85, 0.15)
goal_high = (0.1, 0.9+1e-7, 0.15)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.03)
obj_high = (0.1, 0.7, 0.03)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.03], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0, 0.9, 0.15])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(
np.array(goal_low) + np.array([0, -0.05001, 0.1000]),
np.array(goal_high) + np.array([0, -0.05000, 0.1001])
)
@property
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_basketball.xml')
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickRew, placingDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
'reachDist': reachDist,
'goalDist': placingDist,
'epRew': reward,
'pickRew': pickRew,
'success': float(placingDist <= 0.08)
}
return ob, reward, False, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def reset_model(self):
self._reset_hand()
basket_pos = self.goal.copy()
self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos
self._target_pos = self.data.site_xpos[self.model.site_name2id('goal')]
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = self._get_state_rand_vec()
basket_pos = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - basket_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
basket_pos = goal_pos[3:]
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos
self._target_pos = basket_pos + np.array([0, -0.05, 0.1])
self._set_obj_xyz(self.obj_init_pos)
self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos)) + self.heightTarget
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
heightTarget = self.heightTarget
goal = self._target_pos
reachDist = np.linalg.norm(objPos - fingerCOM)
placingDist = np.linalg.norm(objPos - goal)
assert np.all(goal == self._get_site_pos('goal'))
def reachReward():
reachRew = -reachDist
reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])
zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])
if reachDistxy < 0.05:
reachRew = -reachDist
else:
reachRew = -reachDistxy - 2*zRew
#incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1],0)/50
return reachRew , reachDist
def pickCompletionCriteria():
tolerance = 0.01
if objPos[2] >= (heightTarget - tolerance):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)
def orig_pickReward():
hScale = 100
if self.pickCompleted and not(objDropped()):
return hScale*heightTarget
elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
return hScale* min(heightTarget, objPos[2])
else:
return 0
def placeReward():
c1 = 1000 ; c2 = 0.01 ; c3 = 0.001
cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())
if cond:
placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))
placeRew = max(placeRew,0)
return [placeRew , placingDist]
else:
return [0 , placingDist]
reachRew, reachDist = reachReward()
pickRew = orig_pickReward()
placeRew , placingDist = placeReward()
assert ((placeRew >=0) and (pickRew>=0))
reward = reachRew + pickRew + placeRew
return [reward, reachDist, pickRew, placingDist]
|
py
|
1a58f0895514afc56154f1fedd114dd3a3615c46
|
# Generated from 'v1_5_1.xml' on 2020-11-30 09:07:51.857660
from typing import Tuple
from toptica.lasersdk.client import UserLevel
from toptica.lasersdk.client import Client
from toptica.lasersdk.client import DecopBoolean
from toptica.lasersdk.client import DecopInteger
from toptica.lasersdk.client import DecopReal
from toptica.lasersdk.client import DecopString
from toptica.lasersdk.client import DecopBinary
from toptica.lasersdk.client import MutableDecopBoolean
from toptica.lasersdk.client import MutableDecopInteger
from toptica.lasersdk.client import MutableDecopReal
from toptica.lasersdk.client import MutableDecopString
from toptica.lasersdk.client import MutableDecopBinary
from toptica.lasersdk.client import Connection
from toptica.lasersdk.client import NetworkConnection
from toptica.lasersdk.client import SerialConnection
from toptica.lasersdk.client import DecopError
from toptica.lasersdk.client import DeviceNotFoundError
class Laser:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._type_ = DecopString(client, name + ':type')
self._product_name = DecopString(client, name + ':product-name')
self._emission = DecopBoolean(client, name + ':emission')
self._health = DecopInteger(client, name + ':health')
self._health_txt = DecopString(client, name + ':health-txt')
self._dl = LaserHead(client, name + ':dl')
self._ctl = CtlT(client, name + ':ctl')
self._amp = LaserAmp(client, name + ':amp')
self._scan = Siggen(client, name + ':scan')
self._scope = ScopeT(client, name + ':scope')
self._nlo = Nlo(client, name + ':nlo')
self._pd_ext = PdExt(client, name + ':pd-ext')
self._power_stabilization = PwrStab(client, name + ':power-stabilization')
@property
def type_(self) -> 'DecopString':
return self._type_
@property
def product_name(self) -> 'DecopString':
return self._product_name
@property
def emission(self) -> 'DecopBoolean':
return self._emission
@property
def health(self) -> 'DecopInteger':
return self._health
@property
def health_txt(self) -> 'DecopString':
return self._health_txt
@property
def dl(self) -> 'LaserHead':
return self._dl
@property
def ctl(self) -> 'CtlT':
return self._ctl
@property
def amp(self) -> 'LaserAmp':
return self._amp
@property
def scan(self) -> 'Siggen':
return self._scan
@property
def scope(self) -> 'ScopeT':
return self._scope
@property
def nlo(self) -> 'Nlo':
return self._nlo
@property
def pd_ext(self) -> 'PdExt':
return self._pd_ext
@property
def power_stabilization(self) -> 'PwrStab':
return self._power_stabilization
def detect(self) -> None:
self.__client.exec(self.__name + ':detect', input_stream=None, output_type=None, return_type=None)
def save(self) -> None:
self.__client.exec(self.__name + ':save', input_stream=None, output_type=None, return_type=None)
def load(self) -> None:
self.__client.exec(self.__name + ':load', input_stream=None, output_type=None, return_type=None)
class LaserHead:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._legacy = DecopBoolean(client, name + ':legacy')
self._type_ = DecopString(client, name + ':type')
self._version = DecopString(client, name + ':version')
self._serial_number = DecopString(client, name + ':serial-number')
self._ontime = DecopInteger(client, name + ':ontime')
self._ontime_txt = DecopString(client, name + ':ontime-txt')
self._cc = CurrDrv1(client, name + ':cc')
self._tc = TcChannel(client, name + ':tc')
self._pc = PiezoDrv1(client, name + ':pc')
self._lock = Lock(client, name + ':lock')
self._pressure_compensation = PressureCompensation(client, name + ':pressure-compensation')
self._factory_settings = LhFactory(client, name + ':factory-settings')
@property
def legacy(self) -> 'DecopBoolean':
return self._legacy
@property
def type_(self) -> 'DecopString':
return self._type_
@property
def version(self) -> 'DecopString':
return self._version
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def ontime(self) -> 'DecopInteger':
return self._ontime
@property
def ontime_txt(self) -> 'DecopString':
return self._ontime_txt
@property
def cc(self) -> 'CurrDrv1':
return self._cc
@property
def tc(self) -> 'TcChannel':
return self._tc
@property
def pc(self) -> 'PiezoDrv1':
return self._pc
@property
def lock(self) -> 'Lock':
return self._lock
@property
def pressure_compensation(self) -> 'PressureCompensation':
return self._pressure_compensation
@property
def factory_settings(self) -> 'LhFactory':
return self._factory_settings
def store(self) -> None:
self.__client.exec(self.__name + ':store', input_stream=None, output_type=None, return_type=None)
def restore(self) -> None:
self.__client.exec(self.__name + ':restore', input_stream=None, output_type=None, return_type=None)
class CurrDrv1:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._variant = DecopString(client, name + ':variant')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._emission = DecopBoolean(client, name + ':emission')
self._current_set = MutableDecopReal(client, name + ':current-set')
self._current_offset = MutableDecopReal(client, name + ':current-offset')
self._current_set_dithering = MutableDecopBoolean(client, name + ':current-set-dithering')
self._external_input = ExtInput1(client, name + ':external-input')
self._output_filter = OutputFilter1(client, name + ':output-filter')
self._current_act = DecopReal(client, name + ':current-act')
self._positive_polarity = MutableDecopBoolean(client, name + ':positive-polarity')
self._current_clip = MutableDecopReal(client, name + ':current-clip')
self._current_clip_limit = DecopReal(client, name + ':current-clip-limit')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._voltage_clip = MutableDecopReal(client, name + ':voltage-clip')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._pd = DecopReal(client, name + ':pd')
self._aux = DecopReal(client, name + ':aux')
self._snubber = MutableDecopBoolean(client, name + ':snubber')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._forced_off = MutableDecopBoolean(client, name + ':forced-off')
@property
def path(self) -> 'DecopString':
return self._path
@property
def variant(self) -> 'DecopString':
return self._variant
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def emission(self) -> 'DecopBoolean':
return self._emission
@property
def current_set(self) -> 'MutableDecopReal':
return self._current_set
@property
def current_offset(self) -> 'MutableDecopReal':
return self._current_offset
@property
def current_set_dithering(self) -> 'MutableDecopBoolean':
return self._current_set_dithering
@property
def external_input(self) -> 'ExtInput1':
return self._external_input
@property
def output_filter(self) -> 'OutputFilter1':
return self._output_filter
@property
def current_act(self) -> 'DecopReal':
return self._current_act
@property
def positive_polarity(self) -> 'MutableDecopBoolean':
return self._positive_polarity
@property
def current_clip(self) -> 'MutableDecopReal':
return self._current_clip
@property
def current_clip_limit(self) -> 'DecopReal':
return self._current_clip_limit
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def voltage_clip(self) -> 'MutableDecopReal':
return self._voltage_clip
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def pd(self) -> 'DecopReal':
return self._pd
@property
def aux(self) -> 'DecopReal':
return self._aux
@property
def snubber(self) -> 'MutableDecopBoolean':
return self._snubber
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def forced_off(self) -> 'MutableDecopBoolean':
return self._forced_off
class ExtInput1:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._signal = MutableDecopInteger(client, name + ':signal')
self._factor = MutableDecopReal(client, name + ':factor')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
@property
def signal(self) -> 'MutableDecopInteger':
return self._signal
@property
def factor(self) -> 'MutableDecopReal':
return self._factor
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
class OutputFilter1:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slew_rate = MutableDecopReal(client, name + ':slew-rate')
self._slew_rate_enabled = MutableDecopBoolean(client, name + ':slew-rate-enabled')
self._slew_rate_limited = DecopBoolean(client, name + ':slew-rate-limited')
@property
def slew_rate(self) -> 'MutableDecopReal':
return self._slew_rate
@property
def slew_rate_enabled(self) -> 'MutableDecopBoolean':
return self._slew_rate_enabled
@property
def slew_rate_limited(self) -> 'DecopBoolean':
return self._slew_rate_limited
class TcChannel:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._temp_act = DecopReal(client, name + ':temp-act')
self._temp_set = MutableDecopReal(client, name + ':temp-set')
self._ready = DecopBoolean(client, name + ':ready')
self._fault = DecopBoolean(client, name + ':fault')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._t_loop = TcChannelTLoop(client, name + ':t-loop')
self._c_loop = TcChannelCLoop(client, name + ':c-loop')
self._limits = TcChannelCheck(client, name + ':limits')
self._current_set = DecopReal(client, name + ':current-set')
self._current_set_min = MutableDecopReal(client, name + ':current-set-min')
self._current_set_max = MutableDecopReal(client, name + ':current-set-max')
self._current_act = DecopReal(client, name + ':current-act')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._resistance = DecopReal(client, name + ':resistance')
self._ntc_series_resistance = DecopReal(client, name + ':ntc-series-resistance')
self._temp_set_max = MutableDecopReal(client, name + ':temp-set-max')
self._temp_set_min = MutableDecopReal(client, name + ':temp-set-min')
self._temp_reset = MutableDecopBoolean(client, name + ':temp-reset')
self._temp_roc_enabled = MutableDecopBoolean(client, name + ':temp-roc-enabled')
self._temp_roc_limit = MutableDecopReal(client, name + ':temp-roc-limit')
self._power_source = DecopInteger(client, name + ':power-source')
self._drv_voltage = DecopReal(client, name + ':drv-voltage')
@property
def path(self) -> 'DecopString':
return self._path
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def temp_act(self) -> 'DecopReal':
return self._temp_act
@property
def temp_set(self) -> 'MutableDecopReal':
return self._temp_set
@property
def ready(self) -> 'DecopBoolean':
return self._ready
@property
def fault(self) -> 'DecopBoolean':
return self._fault
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def t_loop(self) -> 'TcChannelTLoop':
return self._t_loop
@property
def c_loop(self) -> 'TcChannelCLoop':
return self._c_loop
@property
def limits(self) -> 'TcChannelCheck':
return self._limits
@property
def current_set(self) -> 'DecopReal':
return self._current_set
@property
def current_set_min(self) -> 'MutableDecopReal':
return self._current_set_min
@property
def current_set_max(self) -> 'MutableDecopReal':
return self._current_set_max
@property
def current_act(self) -> 'DecopReal':
return self._current_act
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def resistance(self) -> 'DecopReal':
return self._resistance
@property
def ntc_series_resistance(self) -> 'DecopReal':
return self._ntc_series_resistance
@property
def temp_set_max(self) -> 'MutableDecopReal':
return self._temp_set_max
@property
def temp_set_min(self) -> 'MutableDecopReal':
return self._temp_set_min
@property
def temp_reset(self) -> 'MutableDecopBoolean':
return self._temp_reset
@property
def temp_roc_enabled(self) -> 'MutableDecopBoolean':
return self._temp_roc_enabled
@property
def temp_roc_limit(self) -> 'MutableDecopReal':
return self._temp_roc_limit
@property
def power_source(self) -> 'DecopInteger':
return self._power_source
@property
def drv_voltage(self) -> 'DecopReal':
return self._drv_voltage
def check_peltier(self) -> float:
return self.__client.exec(self.__name + ':check-peltier', input_stream=None, output_type=None, return_type=float)
class TcChannelTLoop:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._on = MutableDecopBoolean(client, name + ':on')
self._p_gain = MutableDecopReal(client, name + ':p-gain')
self._i_gain = MutableDecopReal(client, name + ':i-gain')
self._d_gain = MutableDecopReal(client, name + ':d-gain')
self._ok_tolerance = MutableDecopReal(client, name + ':ok-tolerance')
self._ok_time = MutableDecopReal(client, name + ':ok-time')
@property
def on(self) -> 'MutableDecopBoolean':
return self._on
@property
def p_gain(self) -> 'MutableDecopReal':
return self._p_gain
@property
def i_gain(self) -> 'MutableDecopReal':
return self._i_gain
@property
def d_gain(self) -> 'MutableDecopReal':
return self._d_gain
@property
def ok_tolerance(self) -> 'MutableDecopReal':
return self._ok_tolerance
@property
def ok_time(self) -> 'MutableDecopReal':
return self._ok_time
class TcChannelCLoop:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._on = MutableDecopBoolean(client, name + ':on')
self._i_gain = MutableDecopReal(client, name + ':i-gain')
@property
def on(self) -> 'MutableDecopBoolean':
return self._on
@property
def i_gain(self) -> 'MutableDecopReal':
return self._i_gain
class TcChannelCheck:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._temp_min = MutableDecopReal(client, name + ':temp-min')
self._temp_max = MutableDecopReal(client, name + ':temp-max')
self._timeout = MutableDecopInteger(client, name + ':timeout')
self._timed_out = DecopBoolean(client, name + ':timed-out')
self._out_of_range = DecopBoolean(client, name + ':out-of-range')
@property
def temp_min(self) -> 'MutableDecopReal':
return self._temp_min
@property
def temp_max(self) -> 'MutableDecopReal':
return self._temp_max
@property
def timeout(self) -> 'MutableDecopInteger':
return self._timeout
@property
def timed_out(self) -> 'DecopBoolean':
return self._timed_out
@property
def out_of_range(self) -> 'DecopBoolean':
return self._out_of_range
class PiezoDrv1:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._voltage_set = MutableDecopReal(client, name + ':voltage-set')
self._voltage_min = MutableDecopReal(client, name + ':voltage-min')
self._voltage_max = MutableDecopReal(client, name + ':voltage-max')
self._voltage_set_dithering = MutableDecopBoolean(client, name + ':voltage-set-dithering')
self._external_input = ExtInput1(client, name + ':external-input')
self._output_filter = OutputFilter1(client, name + ':output-filter')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
@property
def path(self) -> 'DecopString':
return self._path
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def voltage_set(self) -> 'MutableDecopReal':
return self._voltage_set
@property
def voltage_min(self) -> 'MutableDecopReal':
return self._voltage_min
@property
def voltage_max(self) -> 'MutableDecopReal':
return self._voltage_max
@property
def voltage_set_dithering(self) -> 'MutableDecopBoolean':
return self._voltage_set_dithering
@property
def external_input(self) -> 'ExtInput1':
return self._external_input
@property
def output_filter(self) -> 'OutputFilter1':
return self._output_filter
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
class Lock:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._type_ = MutableDecopInteger(client, name + ':type')
self._lock_without_lockpoint = MutableDecopBoolean(client, name + ':lock-without-lockpoint')
self._state = DecopInteger(client, name + ':state')
self._state_txt = DecopString(client, name + ':state-txt')
self._lock_enabled = MutableDecopBoolean(client, name + ':lock-enabled')
self._hold = MutableDecopBoolean(client, name + ':hold')
self._spectrum_input_channel = MutableDecopInteger(client, name + ':spectrum-input-channel')
self._pid_selection = MutableDecopInteger(client, name + ':pid-selection')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._relock = AlRelock(client, name + ':relock')
self._reset = AlReset(client, name + ':reset')
self._window = AlWindow(client, name + ':window')
self._pid1 = Pid(client, name + ':pid1')
self._pid2 = Pid(client, name + ':pid2')
self._lockin = Lockin(client, name + ':lockin')
self._lockpoint = AlLockpoint(client, name + ':lockpoint')
self._candidate_filter = AlCandidateFilter(client, name + ':candidate-filter')
self._candidates = DecopBinary(client, name + ':candidates')
self._locking_delay = MutableDecopInteger(client, name + ':locking-delay')
self._background_trace = DecopBinary(client, name + ':background-trace')
@property
def type_(self) -> 'MutableDecopInteger':
return self._type_
@property
def lock_without_lockpoint(self) -> 'MutableDecopBoolean':
return self._lock_without_lockpoint
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def state_txt(self) -> 'DecopString':
return self._state_txt
@property
def lock_enabled(self) -> 'MutableDecopBoolean':
return self._lock_enabled
@property
def hold(self) -> 'MutableDecopBoolean':
return self._hold
@property
def spectrum_input_channel(self) -> 'MutableDecopInteger':
return self._spectrum_input_channel
@property
def pid_selection(self) -> 'MutableDecopInteger':
return self._pid_selection
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def relock(self) -> 'AlRelock':
return self._relock
@property
def reset(self) -> 'AlReset':
return self._reset
@property
def window(self) -> 'AlWindow':
return self._window
@property
def pid1(self) -> 'Pid':
return self._pid1
@property
def pid2(self) -> 'Pid':
return self._pid2
@property
def lockin(self) -> 'Lockin':
return self._lockin
@property
def lockpoint(self) -> 'AlLockpoint':
return self._lockpoint
@property
def candidate_filter(self) -> 'AlCandidateFilter':
return self._candidate_filter
@property
def candidates(self) -> 'DecopBinary':
return self._candidates
@property
def locking_delay(self) -> 'MutableDecopInteger':
return self._locking_delay
@property
def background_trace(self) -> 'DecopBinary':
return self._background_trace
def show_candidates(self) -> Tuple[str, int]:
return self.__client.exec(self.__name + ':show-candidates', input_stream=None, output_type=str, return_type=int)
def find_candidates(self) -> None:
self.__client.exec(self.__name + ':find-candidates', input_stream=None, output_type=None, return_type=None)
def select_lockpoint(self, x: float, y: float, type_: int) -> None:
assert isinstance(x, float), "expected type 'float' for parameter 'x', got '{}'".format(type(x))
assert isinstance(y, float), "expected type 'float' for parameter 'y', got '{}'".format(type(y))
assert isinstance(type_, int), "expected type 'int' for parameter 'type_', got '{}'".format(type(type_))
self.__client.exec(self.__name + ':select-lockpoint', x, y, type_, input_stream=None, output_type=None, return_type=None)
def close(self) -> None:
self.__client.exec(self.__name + ':close', input_stream=None, output_type=None, return_type=None)
def open(self) -> None:
self.__client.exec(self.__name + ':open', input_stream=None, output_type=None, return_type=None)
class AlRelock:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._output_channel = MutableDecopInteger(client, name + ':output-channel')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._delay = MutableDecopReal(client, name + ':delay')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def output_channel(self) -> 'MutableDecopInteger':
return self._output_channel
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def delay(self) -> 'MutableDecopReal':
return self._delay
class AlReset:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
class AlWindow:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._level_high = MutableDecopReal(client, name + ':level-high')
self._level_low = MutableDecopReal(client, name + ':level-low')
self._level_hysteresis = MutableDecopReal(client, name + ':level-hysteresis')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def level_high(self) -> 'MutableDecopReal':
return self._level_high
@property
def level_low(self) -> 'MutableDecopReal':
return self._level_low
@property
def level_hysteresis(self) -> 'MutableDecopReal':
return self._level_hysteresis
class Pid:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._gain = Gain(client, name + ':gain')
self._sign = MutableDecopBoolean(client, name + ':sign')
self._slope = MutableDecopBoolean(client, name + ':slope')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._output_channel = MutableDecopInteger(client, name + ':output-channel')
self._outputlimit = Outputlimit(client, name + ':outputlimit')
self._hold = MutableDecopBoolean(client, name + ':hold')
self._lock_state = DecopBoolean(client, name + ':lock-state')
self._hold_state = DecopBoolean(client, name + ':hold-state')
self._regulating_state = DecopBoolean(client, name + ':regulating-state')
self._hold_output_on_unlock = MutableDecopBoolean(client, name + ':hold-output-on-unlock')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def gain(self) -> 'Gain':
return self._gain
@property
def sign(self) -> 'MutableDecopBoolean':
return self._sign
@property
def slope(self) -> 'MutableDecopBoolean':
return self._slope
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def output_channel(self) -> 'MutableDecopInteger':
return self._output_channel
@property
def outputlimit(self) -> 'Outputlimit':
return self._outputlimit
@property
def hold(self) -> 'MutableDecopBoolean':
return self._hold
@property
def lock_state(self) -> 'DecopBoolean':
return self._lock_state
@property
def hold_state(self) -> 'DecopBoolean':
return self._hold_state
@property
def regulating_state(self) -> 'DecopBoolean':
return self._regulating_state
@property
def hold_output_on_unlock(self) -> 'MutableDecopBoolean':
return self._hold_output_on_unlock
class Gain:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._all = MutableDecopReal(client, name + ':all')
self._p = MutableDecopReal(client, name + ':p')
self._i = MutableDecopReal(client, name + ':i')
self._d = MutableDecopReal(client, name + ':d')
self._i_cutoff = MutableDecopReal(client, name + ':i-cutoff')
self._i_cutoff_enabled = MutableDecopBoolean(client, name + ':i-cutoff-enabled')
self._fc_ip = DecopReal(client, name + ':fc-ip')
self._fc_pd = DecopReal(client, name + ':fc-pd')
@property
def all(self) -> 'MutableDecopReal':
return self._all
@property
def p(self) -> 'MutableDecopReal':
return self._p
@property
def i(self) -> 'MutableDecopReal':
return self._i
@property
def d(self) -> 'MutableDecopReal':
return self._d
@property
def i_cutoff(self) -> 'MutableDecopReal':
return self._i_cutoff
@property
def i_cutoff_enabled(self) -> 'MutableDecopBoolean':
return self._i_cutoff_enabled
@property
def fc_ip(self) -> 'DecopReal':
return self._fc_ip
@property
def fc_pd(self) -> 'DecopReal':
return self._fc_pd
class Outputlimit:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._max = MutableDecopReal(client, name + ':max')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def max(self) -> 'MutableDecopReal':
return self._max
class Lockin:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._modulation_enabled = MutableDecopBoolean(client, name + ':modulation-enabled')
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._modulation_output_channel = MutableDecopInteger(client, name + ':modulation-output-channel')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._phase_shift = MutableDecopReal(client, name + ':phase-shift')
self._lock_level = MutableDecopReal(client, name + ':lock-level')
@property
def modulation_enabled(self) -> 'MutableDecopBoolean':
return self._modulation_enabled
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def modulation_output_channel(self) -> 'MutableDecopInteger':
return self._modulation_output_channel
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def phase_shift(self) -> 'MutableDecopReal':
return self._phase_shift
@property
def lock_level(self) -> 'MutableDecopReal':
return self._lock_level
class AlLockpoint:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._position = Coordinate(client, name + ':position')
self._type_ = DecopString(client, name + ':type')
@property
def position(self) -> 'Coordinate':
return self._position
@property
def type_(self) -> 'DecopString':
return self._type_
class Coordinate:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
def get(self) -> Tuple[float, float]:
return self.__client.get(self.__name)
def set(self, x: float, y: float) -> None:
assert isinstance(x, float), "expected type 'float' for 'x', got '{}'".format(type(x))
assert isinstance(y, float), "expected type 'float' for 'y', got '{}'".format(type(y))
self.__client.set(self.__name, x, y)
class AlCandidateFilter:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._top = MutableDecopBoolean(client, name + ':top')
self._bottom = MutableDecopBoolean(client, name + ':bottom')
self._positive_edge = MutableDecopBoolean(client, name + ':positive-edge')
self._negative_edge = MutableDecopBoolean(client, name + ':negative-edge')
self._edge_level = MutableDecopReal(client, name + ':edge-level')
self._peak_noise_tolerance = MutableDecopReal(client, name + ':peak-noise-tolerance')
self._edge_min_distance = MutableDecopInteger(client, name + ':edge-min-distance')
@property
def top(self) -> 'MutableDecopBoolean':
return self._top
@property
def bottom(self) -> 'MutableDecopBoolean':
return self._bottom
@property
def positive_edge(self) -> 'MutableDecopBoolean':
return self._positive_edge
@property
def negative_edge(self) -> 'MutableDecopBoolean':
return self._negative_edge
@property
def edge_level(self) -> 'MutableDecopReal':
return self._edge_level
@property
def peak_noise_tolerance(self) -> 'MutableDecopReal':
return self._peak_noise_tolerance
@property
def edge_min_distance(self) -> 'MutableDecopInteger':
return self._edge_min_distance
class PressureCompensation:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._air_pressure = DecopReal(client, name + ':air-pressure')
self._factor = MutableDecopReal(client, name + ':factor')
self._offset = DecopReal(client, name + ':offset')
self._compensation_voltage = DecopReal(client, name + ':compensation-voltage')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def air_pressure(self) -> 'DecopReal':
return self._air_pressure
@property
def factor(self) -> 'MutableDecopReal':
return self._factor
@property
def offset(self) -> 'DecopReal':
return self._offset
@property
def compensation_voltage(self) -> 'DecopReal':
return self._compensation_voltage
class LhFactory:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._wavelength = MutableDecopReal(client, name + ':wavelength')
self._threshold_current = MutableDecopReal(client, name + ':threshold-current')
self._power = MutableDecopReal(client, name + ':power')
self._cc = LhFactoryCc(client, name + ':cc')
self._tc = TcFactorySettings(client, name + ':tc')
self._pc = PcFactorySettings(client, name + ':pc')
self._last_modified = DecopString(client, name + ':last-modified')
self._modified = DecopBoolean(client, name + ':modified')
@property
def wavelength(self) -> 'MutableDecopReal':
return self._wavelength
@property
def threshold_current(self) -> 'MutableDecopReal':
return self._threshold_current
@property
def power(self) -> 'MutableDecopReal':
return self._power
@property
def cc(self) -> 'LhFactoryCc':
return self._cc
@property
def tc(self) -> 'TcFactorySettings':
return self._tc
@property
def pc(self) -> 'PcFactorySettings':
return self._pc
@property
def last_modified(self) -> 'DecopString':
return self._last_modified
@property
def modified(self) -> 'DecopBoolean':
return self._modified
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
def retrieve_now(self) -> None:
self.__client.exec(self.__name + ':retrieve-now', input_stream=None, output_type=None, return_type=None)
class LhFactoryCc:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._current_set = MutableDecopReal(client, name + ':current-set')
self._current_clip = MutableDecopReal(client, name + ':current-clip')
self._current_clip_modified = DecopBoolean(client, name + ':current-clip-modified')
self._current_clip_last_modified = DecopString(client, name + ':current-clip-last-modified')
self._voltage_clip = MutableDecopReal(client, name + ':voltage-clip')
self._positive_polarity = MutableDecopBoolean(client, name + ':positive-polarity')
self._snubber = MutableDecopBoolean(client, name + ':snubber')
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def current_set(self) -> 'MutableDecopReal':
return self._current_set
@property
def current_clip(self) -> 'MutableDecopReal':
return self._current_clip
@property
def current_clip_modified(self) -> 'DecopBoolean':
return self._current_clip_modified
@property
def current_clip_last_modified(self) -> 'DecopString':
return self._current_clip_last_modified
@property
def voltage_clip(self) -> 'MutableDecopReal':
return self._voltage_clip
@property
def positive_polarity(self) -> 'MutableDecopBoolean':
return self._positive_polarity
@property
def snubber(self) -> 'MutableDecopBoolean':
return self._snubber
class TcFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._temp_min = MutableDecopReal(client, name + ':temp-min')
self._temp_max = MutableDecopReal(client, name + ':temp-max')
self._temp_set = MutableDecopReal(client, name + ':temp-set')
self._temp_roc_enabled = MutableDecopBoolean(client, name + ':temp-roc-enabled')
self._temp_roc_limit = MutableDecopReal(client, name + ':temp-roc-limit')
self._current_max = MutableDecopReal(client, name + ':current-max')
self._current_min = MutableDecopReal(client, name + ':current-min')
self._p_gain = MutableDecopReal(client, name + ':p-gain')
self._i_gain = MutableDecopReal(client, name + ':i-gain')
self._d_gain = MutableDecopReal(client, name + ':d-gain')
self._c_gain = MutableDecopReal(client, name + ':c-gain')
self._ok_tolerance = MutableDecopReal(client, name + ':ok-tolerance')
self._ok_time = MutableDecopReal(client, name + ':ok-time')
self._timeout = MutableDecopInteger(client, name + ':timeout')
self._power_source = MutableDecopInteger(client, name + ':power-source')
self._ntc_series_resistance = MutableDecopReal(client, name + ':ntc-series-resistance')
@property
def temp_min(self) -> 'MutableDecopReal':
return self._temp_min
@property
def temp_max(self) -> 'MutableDecopReal':
return self._temp_max
@property
def temp_set(self) -> 'MutableDecopReal':
return self._temp_set
@property
def temp_roc_enabled(self) -> 'MutableDecopBoolean':
return self._temp_roc_enabled
@property
def temp_roc_limit(self) -> 'MutableDecopReal':
return self._temp_roc_limit
@property
def current_max(self) -> 'MutableDecopReal':
return self._current_max
@property
def current_min(self) -> 'MutableDecopReal':
return self._current_min
@property
def p_gain(self) -> 'MutableDecopReal':
return self._p_gain
@property
def i_gain(self) -> 'MutableDecopReal':
return self._i_gain
@property
def d_gain(self) -> 'MutableDecopReal':
return self._d_gain
@property
def c_gain(self) -> 'MutableDecopReal':
return self._c_gain
@property
def ok_tolerance(self) -> 'MutableDecopReal':
return self._ok_tolerance
@property
def ok_time(self) -> 'MutableDecopReal':
return self._ok_time
@property
def timeout(self) -> 'MutableDecopInteger':
return self._timeout
@property
def power_source(self) -> 'MutableDecopInteger':
return self._power_source
@property
def ntc_series_resistance(self) -> 'MutableDecopReal':
return self._ntc_series_resistance
class PcFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._voltage_min = MutableDecopReal(client, name + ':voltage-min')
self._voltage_max = MutableDecopReal(client, name + ':voltage-max')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._capacitance = MutableDecopReal(client, name + ':capacitance')
self._scan_offset = MutableDecopReal(client, name + ':scan-offset')
self._scan_amplitude = MutableDecopReal(client, name + ':scan-amplitude')
self._slew_rate = MutableDecopReal(client, name + ':slew-rate')
self._slew_rate_enabled = MutableDecopBoolean(client, name + ':slew-rate-enabled')
self._pressure_compensation_factor = MutableDecopReal(client, name + ':pressure-compensation-factor')
@property
def voltage_min(self) -> 'MutableDecopReal':
return self._voltage_min
@property
def voltage_max(self) -> 'MutableDecopReal':
return self._voltage_max
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def capacitance(self) -> 'MutableDecopReal':
return self._capacitance
@property
def scan_offset(self) -> 'MutableDecopReal':
return self._scan_offset
@property
def scan_amplitude(self) -> 'MutableDecopReal':
return self._scan_amplitude
@property
def slew_rate(self) -> 'MutableDecopReal':
return self._slew_rate
@property
def slew_rate_enabled(self) -> 'MutableDecopBoolean':
return self._slew_rate_enabled
@property
def pressure_compensation_factor(self) -> 'MutableDecopReal':
return self._pressure_compensation_factor
class CtlT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._fpga_fw_ver = DecopInteger(client, name + ':fpga-fw-ver')
self._wavelength_set = MutableDecopReal(client, name + ':wavelength-set')
self._wavelength_act = DecopReal(client, name + ':wavelength-act')
self._wavelength_min = DecopReal(client, name + ':wavelength-min')
self._wavelength_max = DecopReal(client, name + ':wavelength-max')
self._tuning_current_min = DecopReal(client, name + ':tuning-current-min')
self._tuning_power_min = DecopReal(client, name + ':tuning-power-min')
self._state = DecopInteger(client, name + ':state')
self._state_txt = DecopString(client, name + ':state-txt')
self._head_temperature = DecopReal(client, name + ':head-temperature')
self._scan = CtlScanT(client, name + ':scan')
self._optimization = CtlOptimizationT(client, name + ':optimization')
self._remote_control = CtlRemoteControl(client, name + ':remote-control')
self._mode_control = CtlModeControl(client, name + ':mode-control')
self._motor = CtlMotor(client, name + ':motor')
self._power = CtlPower(client, name + ':power')
self._factory_settings = CtlFactory(client, name + ':factory-settings')
@property
def fpga_fw_ver(self) -> 'DecopInteger':
return self._fpga_fw_ver
@property
def wavelength_set(self) -> 'MutableDecopReal':
return self._wavelength_set
@property
def wavelength_act(self) -> 'DecopReal':
return self._wavelength_act
@property
def wavelength_min(self) -> 'DecopReal':
return self._wavelength_min
@property
def wavelength_max(self) -> 'DecopReal':
return self._wavelength_max
@property
def tuning_current_min(self) -> 'DecopReal':
return self._tuning_current_min
@property
def tuning_power_min(self) -> 'DecopReal':
return self._tuning_power_min
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def state_txt(self) -> 'DecopString':
return self._state_txt
@property
def head_temperature(self) -> 'DecopReal':
return self._head_temperature
@property
def scan(self) -> 'CtlScanT':
return self._scan
@property
def optimization(self) -> 'CtlOptimizationT':
return self._optimization
@property
def remote_control(self) -> 'CtlRemoteControl':
return self._remote_control
@property
def mode_control(self) -> 'CtlModeControl':
return self._mode_control
@property
def motor(self) -> 'CtlMotor':
return self._motor
@property
def power(self) -> 'CtlPower':
return self._power
@property
def factory_settings(self) -> 'CtlFactory':
return self._factory_settings
class CtlScanT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._wavelength_begin = MutableDecopReal(client, name + ':wavelength-begin')
self._wavelength_end = MutableDecopReal(client, name + ':wavelength-end')
self._speed = MutableDecopReal(client, name + ':speed')
self._speed_min = DecopReal(client, name + ':speed-min')
self._speed_max = DecopReal(client, name + ':speed-max')
self._microsteps = MutableDecopBoolean(client, name + ':microsteps')
self._progress = DecopInteger(client, name + ':progress')
self._remaining_time = DecopInteger(client, name + ':remaining-time')
self._continuous_mode = MutableDecopBoolean(client, name + ':continuous-mode')
self._trigger = CtlTriggerT(client, name + ':trigger')
self._shape = MutableDecopInteger(client, name + ':shape')
@property
def wavelength_begin(self) -> 'MutableDecopReal':
return self._wavelength_begin
@property
def wavelength_end(self) -> 'MutableDecopReal':
return self._wavelength_end
@property
def speed(self) -> 'MutableDecopReal':
return self._speed
@property
def speed_min(self) -> 'DecopReal':
return self._speed_min
@property
def speed_max(self) -> 'DecopReal':
return self._speed_max
@property
def microsteps(self) -> 'MutableDecopBoolean':
return self._microsteps
@property
def progress(self) -> 'DecopInteger':
return self._progress
@property
def remaining_time(self) -> 'DecopInteger':
return self._remaining_time
@property
def continuous_mode(self) -> 'MutableDecopBoolean':
return self._continuous_mode
@property
def trigger(self) -> 'CtlTriggerT':
return self._trigger
@property
def shape(self) -> 'MutableDecopInteger':
return self._shape
def start(self) -> None:
self.__client.exec(self.__name + ':start', input_stream=None, output_type=None, return_type=None)
def stop(self) -> None:
self.__client.exec(self.__name + ':stop', input_stream=None, output_type=None, return_type=None)
def pause(self) -> None:
self.__client.exec(self.__name + ':pause', input_stream=None, output_type=None, return_type=None)
def continue_(self) -> None:
self.__client.exec(self.__name + ':continue', input_stream=None, output_type=None, return_type=None)
class CtlTriggerT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._input_enabled = MutableDecopBoolean(client, name + ':input-enabled')
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._output_enabled = MutableDecopBoolean(client, name + ':output-enabled')
self._output_threshold = MutableDecopReal(client, name + ':output-threshold')
@property
def input_enabled(self) -> 'MutableDecopBoolean':
return self._input_enabled
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def output_enabled(self) -> 'MutableDecopBoolean':
return self._output_enabled
@property
def output_threshold(self) -> 'MutableDecopReal':
return self._output_threshold
class CtlOptimizationT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._progress = DecopInteger(client, name + ':progress')
@property
def progress(self) -> 'DecopInteger':
return self._progress
def smile(self) -> None:
self.__client.exec(self.__name + ':smile', input_stream=None, output_type=None, return_type=None)
def flow(self) -> None:
self.__client.exec(self.__name + ':flow', input_stream=None, output_type=None, return_type=None)
def abort(self) -> None:
self.__client.exec(self.__name + ':abort', input_stream=None, output_type=None, return_type=None)
class CtlRemoteControl:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._signal = MutableDecopInteger(client, name + ':signal')
self._factor = MutableDecopReal(client, name + ':factor')
self._speed = MutableDecopReal(client, name + ':speed')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
@property
def signal(self) -> 'MutableDecopInteger':
return self._signal
@property
def factor(self) -> 'MutableDecopReal':
return self._factor
@property
def speed(self) -> 'MutableDecopReal':
return self._speed
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
class CtlModeControl:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._loop_enabled = MutableDecopBoolean(client, name + ':loop-enabled')
@property
def loop_enabled(self) -> 'MutableDecopBoolean':
return self._loop_enabled
class CtlMotor:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._position_accuracy = MutableDecopInteger(client, name + ':position-accuracy')
self._position_hysteresis = MutableDecopInteger(client, name + ':position-hysteresis')
self._power_save_disabled = MutableDecopBoolean(client, name + ':power-save-disabled')
@property
def position_accuracy(self) -> 'MutableDecopInteger':
return self._position_accuracy
@property
def position_hysteresis(self) -> 'MutableDecopInteger':
return self._position_hysteresis
@property
def power_save_disabled(self) -> 'MutableDecopBoolean':
return self._power_save_disabled
class CtlPower:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._power_act = DecopReal(client, name + ':power-act')
@property
def power_act(self) -> 'DecopReal':
return self._power_act
class CtlFactory:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._wavelength_min = DecopReal(client, name + ':wavelength-min')
self._wavelength_max = DecopReal(client, name + ':wavelength-max')
self._tuning_current_min = DecopReal(client, name + ':tuning-current-min')
self._tuning_power_min = DecopReal(client, name + ':tuning-power-min')
@property
def wavelength_min(self) -> 'DecopReal':
return self._wavelength_min
@property
def wavelength_max(self) -> 'DecopReal':
return self._wavelength_max
@property
def tuning_current_min(self) -> 'DecopReal':
return self._tuning_current_min
@property
def tuning_power_min(self) -> 'DecopReal':
return self._tuning_power_min
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
class LaserAmp:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._legacy = DecopBoolean(client, name + ':legacy')
self._type_ = DecopString(client, name + ':type')
self._version = DecopString(client, name + ':version')
self._serial_number = DecopString(client, name + ':serial-number')
self._ontime = DecopInteger(client, name + ':ontime')
self._ontime_txt = DecopString(client, name + ':ontime-txt')
self._cc = Cc5000Drv(client, name + ':cc')
self._tc = TcChannel(client, name + ':tc')
self._seed_limits = AmpPower(client, name + ':seed-limits')
self._output_limits = AmpPower(client, name + ':output-limits')
self._seedonly_check = AmpSeedonlyCheck(client, name + ':seedonly-check')
self._factory_settings = AmpFactory(client, name + ':factory-settings')
@property
def legacy(self) -> 'DecopBoolean':
return self._legacy
@property
def type_(self) -> 'DecopString':
return self._type_
@property
def version(self) -> 'DecopString':
return self._version
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def ontime(self) -> 'DecopInteger':
return self._ontime
@property
def ontime_txt(self) -> 'DecopString':
return self._ontime_txt
@property
def cc(self) -> 'Cc5000Drv':
return self._cc
@property
def tc(self) -> 'TcChannel':
return self._tc
@property
def seed_limits(self) -> 'AmpPower':
return self._seed_limits
@property
def output_limits(self) -> 'AmpPower':
return self._output_limits
@property
def seedonly_check(self) -> 'AmpSeedonlyCheck':
return self._seedonly_check
@property
def factory_settings(self) -> 'AmpFactory':
return self._factory_settings
def store(self) -> None:
self.__client.exec(self.__name + ':store', input_stream=None, output_type=None, return_type=None)
def restore(self) -> None:
self.__client.exec(self.__name + ':restore', input_stream=None, output_type=None, return_type=None)
class Cc5000Drv:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._variant = DecopString(client, name + ':variant')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._emission = DecopBoolean(client, name + ':emission')
self._current_set = MutableDecopReal(client, name + ':current-set')
self._current_offset = MutableDecopReal(client, name + ':current-offset')
self._output_filter = OutputFilter1(client, name + ':output-filter')
self._current_act = DecopReal(client, name + ':current-act')
self._current_clip = MutableDecopReal(client, name + ':current-clip')
self._current_clip_limit = DecopReal(client, name + ':current-clip-limit')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._voltage_out = DecopReal(client, name + ':voltage-out')
self._voltage_clip = MutableDecopReal(client, name + ':voltage-clip')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._aux = DecopReal(client, name + ':aux')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._forced_off = MutableDecopBoolean(client, name + ':forced-off')
@property
def path(self) -> 'DecopString':
return self._path
@property
def variant(self) -> 'DecopString':
return self._variant
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def emission(self) -> 'DecopBoolean':
return self._emission
@property
def current_set(self) -> 'MutableDecopReal':
return self._current_set
@property
def current_offset(self) -> 'MutableDecopReal':
return self._current_offset
@property
def output_filter(self) -> 'OutputFilter1':
return self._output_filter
@property
def current_act(self) -> 'DecopReal':
return self._current_act
@property
def current_clip(self) -> 'MutableDecopReal':
return self._current_clip
@property
def current_clip_limit(self) -> 'DecopReal':
return self._current_clip_limit
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def voltage_out(self) -> 'DecopReal':
return self._voltage_out
@property
def voltage_clip(self) -> 'MutableDecopReal':
return self._voltage_clip
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def aux(self) -> 'DecopReal':
return self._aux
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def forced_off(self) -> 'MutableDecopBoolean':
return self._forced_off
class AmpPower:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._power = DecopReal(client, name + ':power')
self._photodiode = DecopReal(client, name + ':photodiode')
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
self._cal_factor = MutableDecopReal(client, name + ':cal-factor')
self._power_min = MutableDecopReal(client, name + ':power-min')
self._power_min_warning_delay = MutableDecopReal(client, name + ':power-min-warning-delay')
self._power_min_shutdown_delay = MutableDecopReal(client, name + ':power-min-shutdown-delay')
self._power_max = MutableDecopReal(client, name + ':power-max')
self._power_max_warning_delay = MutableDecopReal(client, name + ':power-max-warning-delay')
self._power_max_shutdown_delay = MutableDecopReal(client, name + ':power-max-shutdown-delay')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
@property
def power(self) -> 'DecopReal':
return self._power
@property
def photodiode(self) -> 'DecopReal':
return self._photodiode
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
@property
def cal_factor(self) -> 'MutableDecopReal':
return self._cal_factor
@property
def power_min(self) -> 'MutableDecopReal':
return self._power_min
@property
def power_min_warning_delay(self) -> 'MutableDecopReal':
return self._power_min_warning_delay
@property
def power_min_shutdown_delay(self) -> 'MutableDecopReal':
return self._power_min_shutdown_delay
@property
def power_max(self) -> 'MutableDecopReal':
return self._power_max
@property
def power_max_warning_delay(self) -> 'MutableDecopReal':
return self._power_max_warning_delay
@property
def power_max_shutdown_delay(self) -> 'MutableDecopReal':
return self._power_max_shutdown_delay
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
class AmpSeedonlyCheck:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._seed = DecopBoolean(client, name + ':seed')
self._pump = DecopBoolean(client, name + ':pump')
self._warning_delay = MutableDecopReal(client, name + ':warning-delay')
self._shutdown_delay = MutableDecopReal(client, name + ':shutdown-delay')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
@property
def seed(self) -> 'DecopBoolean':
return self._seed
@property
def pump(self) -> 'DecopBoolean':
return self._pump
@property
def warning_delay(self) -> 'MutableDecopReal':
return self._warning_delay
@property
def shutdown_delay(self) -> 'MutableDecopReal':
return self._shutdown_delay
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
class AmpFactory:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._wavelength = MutableDecopReal(client, name + ':wavelength')
self._power = MutableDecopReal(client, name + ':power')
self._cc = AmpFactoryCc(client, name + ':cc')
self._tc = TcFactorySettings(client, name + ':tc')
self._seed_limits = AmpFactoryPower(client, name + ':seed-limits')
self._output_limits = AmpFactoryPower(client, name + ':output-limits')
self._seedonly_check = AmpFactorySeedonly(client, name + ':seedonly-check')
self._last_modified = DecopString(client, name + ':last-modified')
self._modified = DecopBoolean(client, name + ':modified')
@property
def wavelength(self) -> 'MutableDecopReal':
return self._wavelength
@property
def power(self) -> 'MutableDecopReal':
return self._power
@property
def cc(self) -> 'AmpFactoryCc':
return self._cc
@property
def tc(self) -> 'TcFactorySettings':
return self._tc
@property
def seed_limits(self) -> 'AmpFactoryPower':
return self._seed_limits
@property
def output_limits(self) -> 'AmpFactoryPower':
return self._output_limits
@property
def seedonly_check(self) -> 'AmpFactorySeedonly':
return self._seedonly_check
@property
def last_modified(self) -> 'DecopString':
return self._last_modified
@property
def modified(self) -> 'DecopBoolean':
return self._modified
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
def retrieve_now(self) -> None:
self.__client.exec(self.__name + ':retrieve-now', input_stream=None, output_type=None, return_type=None)
class AmpFactoryCc:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._current_set = MutableDecopReal(client, name + ':current-set')
self._current_clip = MutableDecopReal(client, name + ':current-clip')
self._current_clip_modified = DecopBoolean(client, name + ':current-clip-modified')
self._current_clip_last_modified = DecopString(client, name + ':current-clip-last-modified')
self._voltage_clip = MutableDecopReal(client, name + ':voltage-clip')
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def current_set(self) -> 'MutableDecopReal':
return self._current_set
@property
def current_clip(self) -> 'MutableDecopReal':
return self._current_clip
@property
def current_clip_modified(self) -> 'DecopBoolean':
return self._current_clip_modified
@property
def current_clip_last_modified(self) -> 'DecopString':
return self._current_clip_last_modified
@property
def voltage_clip(self) -> 'MutableDecopReal':
return self._voltage_clip
class AmpFactoryPower:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
self._cal_factor = MutableDecopReal(client, name + ':cal-factor')
self._power_min = MutableDecopReal(client, name + ':power-min')
self._power_min_warning_delay = MutableDecopReal(client, name + ':power-min-warning-delay')
self._power_min_shutdown_delay = MutableDecopReal(client, name + ':power-min-shutdown-delay')
self._power_max = MutableDecopReal(client, name + ':power-max')
self._power_max_warning_delay = MutableDecopReal(client, name + ':power-max-warning-delay')
self._power_max_shutdown_delay = MutableDecopReal(client, name + ':power-max-shutdown-delay')
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
@property
def cal_factor(self) -> 'MutableDecopReal':
return self._cal_factor
@property
def power_min(self) -> 'MutableDecopReal':
return self._power_min
@property
def power_min_warning_delay(self) -> 'MutableDecopReal':
return self._power_min_warning_delay
@property
def power_min_shutdown_delay(self) -> 'MutableDecopReal':
return self._power_min_shutdown_delay
@property
def power_max(self) -> 'MutableDecopReal':
return self._power_max
@property
def power_max_warning_delay(self) -> 'MutableDecopReal':
return self._power_max_warning_delay
@property
def power_max_shutdown_delay(self) -> 'MutableDecopReal':
return self._power_max_shutdown_delay
class AmpFactorySeedonly:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._warning_delay = MutableDecopReal(client, name + ':warning-delay')
self._shutdown_delay = MutableDecopReal(client, name + ':shutdown-delay')
@property
def warning_delay(self) -> 'MutableDecopReal':
return self._warning_delay
@property
def shutdown_delay(self) -> 'MutableDecopReal':
return self._shutdown_delay
class Siggen:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._hold = MutableDecopBoolean(client, name + ':hold')
self._signal_type = MutableDecopInteger(client, name + ':signal-type')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._phase_shift = MutableDecopReal(client, name + ':phase-shift')
self._output_channel = MutableDecopInteger(client, name + ':output-channel')
self._unit = DecopString(client, name + ':unit')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._offset = MutableDecopReal(client, name + ':offset')
self._start = MutableDecopReal(client, name + ':start')
self._end = MutableDecopReal(client, name + ':end')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def hold(self) -> 'MutableDecopBoolean':
return self._hold
@property
def signal_type(self) -> 'MutableDecopInteger':
return self._signal_type
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def phase_shift(self) -> 'MutableDecopReal':
return self._phase_shift
@property
def output_channel(self) -> 'MutableDecopInteger':
return self._output_channel
@property
def unit(self) -> 'DecopString':
return self._unit
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def offset(self) -> 'MutableDecopReal':
return self._offset
@property
def start(self) -> 'MutableDecopReal':
return self._start
@property
def end(self) -> 'MutableDecopReal':
return self._end
class ScopeT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._variant = MutableDecopInteger(client, name + ':variant')
self._update_rate = MutableDecopInteger(client, name + ':update-rate')
self._channel1 = ScopeChannelT(client, name + ':channel1')
self._channel2 = ScopeChannelT(client, name + ':channel2')
self._channelx = ScopeXAxisT(client, name + ':channelx')
self._timescale = MutableDecopReal(client, name + ':timescale')
self._data = DecopBinary(client, name + ':data')
@property
def variant(self) -> 'MutableDecopInteger':
return self._variant
@property
def update_rate(self) -> 'MutableDecopInteger':
return self._update_rate
@property
def channel1(self) -> 'ScopeChannelT':
return self._channel1
@property
def channel2(self) -> 'ScopeChannelT':
return self._channel2
@property
def channelx(self) -> 'ScopeXAxisT':
return self._channelx
@property
def timescale(self) -> 'MutableDecopReal':
return self._timescale
@property
def data(self) -> 'DecopBinary':
return self._data
class ScopeChannelT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._signal = MutableDecopInteger(client, name + ':signal')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._unit = DecopString(client, name + ':unit')
self._name = DecopString(client, name + ':name')
@property
def signal(self) -> 'MutableDecopInteger':
return self._signal
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def unit(self) -> 'DecopString':
return self._unit
@property
def name(self) -> 'DecopString':
return self._name
class ScopeXAxisT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._xy_signal = MutableDecopInteger(client, name + ':xy-signal')
self._scope_timescale = MutableDecopReal(client, name + ':scope-timescale')
self._spectrum_range = MutableDecopReal(client, name + ':spectrum-range')
self._spectrum_omit_dc = MutableDecopBoolean(client, name + ':spectrum-omit-dc')
self._unit = DecopString(client, name + ':unit')
self._name = DecopString(client, name + ':name')
@property
def xy_signal(self) -> 'MutableDecopInteger':
return self._xy_signal
@property
def scope_timescale(self) -> 'MutableDecopReal':
return self._scope_timescale
@property
def spectrum_range(self) -> 'MutableDecopReal':
return self._spectrum_range
@property
def spectrum_omit_dc(self) -> 'MutableDecopBoolean':
return self._spectrum_omit_dc
@property
def unit(self) -> 'DecopString':
return self._unit
@property
def name(self) -> 'DecopString':
return self._name
class Nlo:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._servo = NloLaserHeadServos(client, name + ':servo')
self._pd = NloLaserHeadPhotoDiodes(client, name + ':pd')
self._power_optimization = NloLaserHeadPowerOptimization(client, name + ':power-optimization')
self._shg = Shg(client, name + ':shg')
self._fhg = Fhg(client, name + ':fhg')
self._ssw_ver = DecopString(client, name + ':ssw-ver')
@property
def servo(self) -> 'NloLaserHeadServos':
return self._servo
@property
def pd(self) -> 'NloLaserHeadPhotoDiodes':
return self._pd
@property
def power_optimization(self) -> 'NloLaserHeadPowerOptimization':
return self._power_optimization
@property
def shg(self) -> 'Shg':
return self._shg
@property
def fhg(self) -> 'Fhg':
return self._fhg
@property
def ssw_ver(self) -> 'DecopString':
return self._ssw_ver
class NloLaserHeadServos:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._ta1_hor = NloLaserHeadServoPwm(client, name + ':ta1-hor')
self._ta1_vert = NloLaserHeadServoPwm(client, name + ':ta1-vert')
self._ta2_hor = NloLaserHeadServoPwm(client, name + ':ta2-hor')
self._ta2_vert = NloLaserHeadServoPwm(client, name + ':ta2-vert')
self._shg1_hor = NloLaserHeadServoPwm(client, name + ':shg1-hor')
self._shg1_vert = NloLaserHeadServoPwm(client, name + ':shg1-vert')
self._shg2_hor = NloLaserHeadServoPwm(client, name + ':shg2-hor')
self._shg2_vert = NloLaserHeadServoPwm(client, name + ':shg2-vert')
self._fhg1_hor = NloLaserHeadServoPwm(client, name + ':fhg1-hor')
self._fhg1_vert = NloLaserHeadServoPwm(client, name + ':fhg1-vert')
self._fhg2_hor = NloLaserHeadServoPwm(client, name + ':fhg2-hor')
self._fhg2_vert = NloLaserHeadServoPwm(client, name + ':fhg2-vert')
self._fiber1_hor = NloLaserHeadServoPwm(client, name + ':fiber1-hor')
self._fiber1_vert = NloLaserHeadServoPwm(client, name + ':fiber1-vert')
self._fiber2_hor = NloLaserHeadServoPwm(client, name + ':fiber2-hor')
self._fiber2_vert = NloLaserHeadServoPwm(client, name + ':fiber2-vert')
self._uv_outcpl = NloLaserHeadServoPwm(client, name + ':uv-outcpl')
self._uv_cryst = NloLaserHeadServoPwm(client, name + ':uv-cryst')
@property
def ta1_hor(self) -> 'NloLaserHeadServoPwm':
return self._ta1_hor
@property
def ta1_vert(self) -> 'NloLaserHeadServoPwm':
return self._ta1_vert
@property
def ta2_hor(self) -> 'NloLaserHeadServoPwm':
return self._ta2_hor
@property
def ta2_vert(self) -> 'NloLaserHeadServoPwm':
return self._ta2_vert
@property
def shg1_hor(self) -> 'NloLaserHeadServoPwm':
return self._shg1_hor
@property
def shg1_vert(self) -> 'NloLaserHeadServoPwm':
return self._shg1_vert
@property
def shg2_hor(self) -> 'NloLaserHeadServoPwm':
return self._shg2_hor
@property
def shg2_vert(self) -> 'NloLaserHeadServoPwm':
return self._shg2_vert
@property
def fhg1_hor(self) -> 'NloLaserHeadServoPwm':
return self._fhg1_hor
@property
def fhg1_vert(self) -> 'NloLaserHeadServoPwm':
return self._fhg1_vert
@property
def fhg2_hor(self) -> 'NloLaserHeadServoPwm':
return self._fhg2_hor
@property
def fhg2_vert(self) -> 'NloLaserHeadServoPwm':
return self._fhg2_vert
@property
def fiber1_hor(self) -> 'NloLaserHeadServoPwm':
return self._fiber1_hor
@property
def fiber1_vert(self) -> 'NloLaserHeadServoPwm':
return self._fiber1_vert
@property
def fiber2_hor(self) -> 'NloLaserHeadServoPwm':
return self._fiber2_hor
@property
def fiber2_vert(self) -> 'NloLaserHeadServoPwm':
return self._fiber2_vert
@property
def uv_outcpl(self) -> 'NloLaserHeadServoPwm':
return self._uv_outcpl
@property
def uv_cryst(self) -> 'NloLaserHeadServoPwm':
return self._uv_cryst
def center_ta_servos(self) -> None:
self.__client.exec(self.__name + ':center-ta-servos', input_stream=None, output_type=None, return_type=None)
def center_shg_servos(self) -> None:
self.__client.exec(self.__name + ':center-shg-servos', input_stream=None, output_type=None, return_type=None)
def center_fhg_servos(self) -> None:
self.__client.exec(self.__name + ':center-fhg-servos', input_stream=None, output_type=None, return_type=None)
def center_fiber_servos(self) -> None:
self.__client.exec(self.__name + ':center-fiber-servos', input_stream=None, output_type=None, return_type=None)
def center_all_servos(self) -> None:
self.__client.exec(self.__name + ':center-all-servos', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadServoPwm:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._display_name = DecopString(client, name + ':display-name')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._value = MutableDecopInteger(client, name + ':value')
@property
def display_name(self) -> 'DecopString':
return self._display_name
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def value(self) -> 'MutableDecopInteger':
return self._value
def center_servo(self) -> None:
self.__client.exec(self.__name + ':center-servo', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadPhotoDiodes:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._dl = NloLaserHeadNloPhotodiode(client, name + ':dl')
self._amp = NloLaserHeadNloPhotodiode(client, name + ':amp')
self._fiber = NloLaserHeadNloPhotodiode(client, name + ':fiber')
self._shg = NloLaserHeadNloPhotodiode(client, name + ':shg')
self._shg_int = NloLaserHeadNloDigilockPhotodiode(client, name + ':shg-int')
self._shg_pdh_dc = NloLaserHeadNloDigilockPhotodiode(client, name + ':shg-pdh-dc')
self._shg_pdh_rf = NloLaserHeadNloPdhPhotodiode(client, name + ':shg-pdh-rf')
self._fhg = NloLaserHeadNloPhotodiode(client, name + ':fhg')
self._fhg_int = NloLaserHeadNloDigilockPhotodiode(client, name + ':fhg-int')
self._fhg_pdh_dc = NloLaserHeadNloDigilockPhotodiode(client, name + ':fhg-pdh-dc')
self._fhg_pdh_rf = NloLaserHeadNloPdhPhotodiode(client, name + ':fhg-pdh-rf')
@property
def dl(self) -> 'NloLaserHeadNloPhotodiode':
return self._dl
@property
def amp(self) -> 'NloLaserHeadNloPhotodiode':
return self._amp
@property
def fiber(self) -> 'NloLaserHeadNloPhotodiode':
return self._fiber
@property
def shg(self) -> 'NloLaserHeadNloPhotodiode':
return self._shg
@property
def shg_int(self) -> 'NloLaserHeadNloDigilockPhotodiode':
return self._shg_int
@property
def shg_pdh_dc(self) -> 'NloLaserHeadNloDigilockPhotodiode':
return self._shg_pdh_dc
@property
def shg_pdh_rf(self) -> 'NloLaserHeadNloPdhPhotodiode':
return self._shg_pdh_rf
@property
def fhg(self) -> 'NloLaserHeadNloPhotodiode':
return self._fhg
@property
def fhg_int(self) -> 'NloLaserHeadNloDigilockPhotodiode':
return self._fhg_int
@property
def fhg_pdh_dc(self) -> 'NloLaserHeadNloDigilockPhotodiode':
return self._fhg_pdh_dc
@property
def fhg_pdh_rf(self) -> 'NloLaserHeadNloPdhPhotodiode':
return self._fhg_pdh_rf
class NloLaserHeadNloPhotodiode:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._power = DecopReal(client, name + ':power')
self._photodiode = DecopReal(client, name + ':photodiode')
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
self._cal_factor = MutableDecopReal(client, name + ':cal-factor')
@property
def power(self) -> 'DecopReal':
return self._power
@property
def photodiode(self) -> 'DecopReal':
return self._photodiode
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
@property
def cal_factor(self) -> 'MutableDecopReal':
return self._cal_factor
class NloLaserHeadNloDigilockPhotodiode:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._photodiode = DecopReal(client, name + ':photodiode')
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
@property
def photodiode(self) -> 'DecopReal':
return self._photodiode
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
class NloLaserHeadNloPdhPhotodiode:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._photodiode = DecopReal(client, name + ':photodiode')
self._gain = MutableDecopReal(client, name + ':gain')
@property
def photodiode(self) -> 'DecopReal':
return self._photodiode
@property
def gain(self) -> 'MutableDecopReal':
return self._gain
class NloLaserHeadPowerOptimization:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._ongoing = DecopBoolean(client, name + ':ongoing')
self._progress = DecopInteger(client, name + ':progress')
self._status = DecopInteger(client, name + ':status')
self._status_string = DecopString(client, name + ':status-string')
self._shg_advanced = MutableDecopBoolean(client, name + ':shg-advanced')
self._stage1 = NloLaserHeadStage(client, name + ':stage1')
self._stage2 = NloLaserHeadStage(client, name + ':stage2')
self._stage3 = NloLaserHeadStage(client, name + ':stage3')
self._stage4 = NloLaserHeadStage(client, name + ':stage4')
self._stage5 = NloLaserHeadStage(client, name + ':stage5')
self._progress_data_amp = DecopBinary(client, name + ':progress-data-amp')
self._progress_data_shg = DecopBinary(client, name + ':progress-data-shg')
self._progress_data_fiber = DecopBinary(client, name + ':progress-data-fiber')
self._progress_data_fhg = DecopBinary(client, name + ':progress-data-fhg')
self._abort = MutableDecopBoolean(client, name + ':abort')
@property
def ongoing(self) -> 'DecopBoolean':
return self._ongoing
@property
def progress(self) -> 'DecopInteger':
return self._progress
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_string(self) -> 'DecopString':
return self._status_string
@property
def shg_advanced(self) -> 'MutableDecopBoolean':
return self._shg_advanced
@property
def stage1(self) -> 'NloLaserHeadStage':
return self._stage1
@property
def stage2(self) -> 'NloLaserHeadStage':
return self._stage2
@property
def stage3(self) -> 'NloLaserHeadStage':
return self._stage3
@property
def stage4(self) -> 'NloLaserHeadStage':
return self._stage4
@property
def stage5(self) -> 'NloLaserHeadStage':
return self._stage5
@property
def progress_data_amp(self) -> 'DecopBinary':
return self._progress_data_amp
@property
def progress_data_shg(self) -> 'DecopBinary':
return self._progress_data_shg
@property
def progress_data_fiber(self) -> 'DecopBinary':
return self._progress_data_fiber
@property
def progress_data_fhg(self) -> 'DecopBinary':
return self._progress_data_fhg
@property
def abort(self) -> 'MutableDecopBoolean':
return self._abort
def start_optimization_all(self) -> int:
return self.__client.exec(self.__name + ':start-optimization-all', input_stream=None, output_type=None, return_type=int)
def start_optimization_amp(self) -> int:
return self.__client.exec(self.__name + ':start-optimization-amp', input_stream=None, output_type=None, return_type=int)
def start_optimization_shg(self) -> int:
return self.__client.exec(self.__name + ':start-optimization-shg', input_stream=None, output_type=None, return_type=int)
def start_optimization_fiber(self) -> int:
return self.__client.exec(self.__name + ':start-optimization-fiber', input_stream=None, output_type=None, return_type=int)
def start_optimization_fhg(self) -> int:
return self.__client.exec(self.__name + ':start-optimization-fhg', input_stream=None, output_type=None, return_type=int)
class NloLaserHeadStage:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._input = NloLaserHeadOptInput(client, name + ':input')
self._progress = DecopInteger(client, name + ':progress')
self._optimization_in_progress = DecopBoolean(client, name + ':optimization-in-progress')
self._restore_on_abort = MutableDecopBoolean(client, name + ':restore-on-abort')
self._restore_on_regress = MutableDecopBoolean(client, name + ':restore-on-regress')
self._regress_tolerance = MutableDecopInteger(client, name + ':regress-tolerance')
@property
def input(self) -> 'NloLaserHeadOptInput':
return self._input
@property
def progress(self) -> 'DecopInteger':
return self._progress
@property
def optimization_in_progress(self) -> 'DecopBoolean':
return self._optimization_in_progress
@property
def restore_on_abort(self) -> 'MutableDecopBoolean':
return self._restore_on_abort
@property
def restore_on_regress(self) -> 'MutableDecopBoolean':
return self._restore_on_regress
@property
def regress_tolerance(self) -> 'MutableDecopInteger':
return self._regress_tolerance
def start_optimization(self) -> int:
return self.__client.exec(self.__name + ':start-optimization', input_stream=None, output_type=None, return_type=int)
class NloLaserHeadOptInput:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._value_calibrated = DecopReal(client, name + ':value-calibrated')
@property
def value_calibrated(self) -> 'DecopReal':
return self._value_calibrated
class Shg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._tc = TcChannel(client, name + ':tc')
self._pc = PiezoDrv1(client, name + ':pc')
self._scan = NloLaserHeadSiggen(client, name + ':scan')
self._scope = NloLaserHeadScopeT(client, name + ':scope')
self._lock = NloLaserHeadLockShg(client, name + ':lock')
self._factory_settings = ShgFactorySettings(client, name + ':factory-settings')
@property
def tc(self) -> 'TcChannel':
return self._tc
@property
def pc(self) -> 'PiezoDrv1':
return self._pc
@property
def scan(self) -> 'NloLaserHeadSiggen':
return self._scan
@property
def scope(self) -> 'NloLaserHeadScopeT':
return self._scope
@property
def lock(self) -> 'NloLaserHeadLockShg':
return self._lock
@property
def factory_settings(self) -> 'ShgFactorySettings':
return self._factory_settings
def store(self) -> None:
self.__client.exec(self.__name + ':store', input_stream=None, output_type=None, return_type=None)
def restore(self) -> None:
self.__client.exec(self.__name + ':restore', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadSiggen:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._offset = MutableDecopReal(client, name + ':offset')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def offset(self) -> 'MutableDecopReal':
return self._offset
class NloLaserHeadScopeT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._variant = MutableDecopInteger(client, name + ':variant')
self._update_rate = MutableDecopInteger(client, name + ':update-rate')
self._channel1 = NloLaserHeadScopeChannelT(client, name + ':channel1')
self._channel2 = NloLaserHeadScopeChannelT(client, name + ':channel2')
self._channelx = NloLaserHeadScopeXAxisT(client, name + ':channelx')
self._timescale = MutableDecopReal(client, name + ':timescale')
self._data = DecopBinary(client, name + ':data')
@property
def variant(self) -> 'MutableDecopInteger':
return self._variant
@property
def update_rate(self) -> 'MutableDecopInteger':
return self._update_rate
@property
def channel1(self) -> 'NloLaserHeadScopeChannelT':
return self._channel1
@property
def channel2(self) -> 'NloLaserHeadScopeChannelT':
return self._channel2
@property
def channelx(self) -> 'NloLaserHeadScopeXAxisT':
return self._channelx
@property
def timescale(self) -> 'MutableDecopReal':
return self._timescale
@property
def data(self) -> 'DecopBinary':
return self._data
class NloLaserHeadScopeChannelT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._signal = MutableDecopInteger(client, name + ':signal')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._unit = DecopString(client, name + ':unit')
self._name = DecopString(client, name + ':name')
@property
def signal(self) -> 'MutableDecopInteger':
return self._signal
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def unit(self) -> 'DecopString':
return self._unit
@property
def name(self) -> 'DecopString':
return self._name
class NloLaserHeadScopeXAxisT:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._xy_signal = MutableDecopInteger(client, name + ':xy-signal')
self._scope_timescale = MutableDecopReal(client, name + ':scope-timescale')
self._spectrum_range = MutableDecopReal(client, name + ':spectrum-range')
self._spectrum_omit_dc = MutableDecopBoolean(client, name + ':spectrum-omit-dc')
self._unit = DecopString(client, name + ':unit')
self._name = DecopString(client, name + ':name')
@property
def xy_signal(self) -> 'MutableDecopInteger':
return self._xy_signal
@property
def scope_timescale(self) -> 'MutableDecopReal':
return self._scope_timescale
@property
def spectrum_range(self) -> 'MutableDecopReal':
return self._spectrum_range
@property
def spectrum_omit_dc(self) -> 'MutableDecopBoolean':
return self._spectrum_omit_dc
@property
def unit(self) -> 'DecopString':
return self._unit
@property
def name(self) -> 'DecopString':
return self._name
class NloLaserHeadLockShg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._state = DecopInteger(client, name + ':state')
self._state_txt = DecopString(client, name + ':state-txt')
self._lock_enabled = MutableDecopBoolean(client, name + ':lock-enabled')
self._pid_selection = MutableDecopInteger(client, name + ':pid-selection')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._relock = NloLaserHeadRelock(client, name + ':relock')
self._window = NloLaserHeadWindow(client, name + ':window')
self._pid1 = NloLaserHeadPid(client, name + ':pid1')
self._pid2 = NloLaserHeadPid(client, name + ':pid2')
self._analog_dl_gain = NloLaserHeadMinifalc(client, name + ':analog-dl-gain')
self._local_oscillator = NloLaserHeadLocalOscillatorShg(client, name + ':local-oscillator')
self._cavity_fast_pzt_voltage = MutableDecopReal(client, name + ':cavity-fast-pzt-voltage')
self._cavity_slow_pzt_voltage = MutableDecopReal(client, name + ':cavity-slow-pzt-voltage')
self._background_trace = DecopBinary(client, name + ':background-trace')
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def state_txt(self) -> 'DecopString':
return self._state_txt
@property
def lock_enabled(self) -> 'MutableDecopBoolean':
return self._lock_enabled
@property
def pid_selection(self) -> 'MutableDecopInteger':
return self._pid_selection
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def relock(self) -> 'NloLaserHeadRelock':
return self._relock
@property
def window(self) -> 'NloLaserHeadWindow':
return self._window
@property
def pid1(self) -> 'NloLaserHeadPid':
return self._pid1
@property
def pid2(self) -> 'NloLaserHeadPid':
return self._pid2
@property
def analog_dl_gain(self) -> 'NloLaserHeadMinifalc':
return self._analog_dl_gain
@property
def local_oscillator(self) -> 'NloLaserHeadLocalOscillatorShg':
return self._local_oscillator
@property
def cavity_fast_pzt_voltage(self) -> 'MutableDecopReal':
return self._cavity_fast_pzt_voltage
@property
def cavity_slow_pzt_voltage(self) -> 'MutableDecopReal':
return self._cavity_slow_pzt_voltage
@property
def background_trace(self) -> 'DecopBinary':
return self._background_trace
class NloLaserHeadRelock:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._delay = MutableDecopReal(client, name + ':delay')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def delay(self) -> 'MutableDecopReal':
return self._delay
class NloLaserHeadWindow:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._threshold = MutableDecopReal(client, name + ':threshold')
self._level_hysteresis = MutableDecopReal(client, name + ':level-hysteresis')
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def threshold(self) -> 'MutableDecopReal':
return self._threshold
@property
def level_hysteresis(self) -> 'MutableDecopReal':
return self._level_hysteresis
class NloLaserHeadPid:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._gain = NloLaserHeadGain(client, name + ':gain')
@property
def gain(self) -> 'NloLaserHeadGain':
return self._gain
class NloLaserHeadGain:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._all = MutableDecopReal(client, name + ':all')
self._p = MutableDecopReal(client, name + ':p')
self._i = MutableDecopReal(client, name + ':i')
self._d = MutableDecopReal(client, name + ':d')
self._i_cutoff = MutableDecopReal(client, name + ':i-cutoff')
self._i_cutoff_enabled = MutableDecopBoolean(client, name + ':i-cutoff-enabled')
@property
def all(self) -> 'MutableDecopReal':
return self._all
@property
def p(self) -> 'MutableDecopReal':
return self._p
@property
def i(self) -> 'MutableDecopReal':
return self._i
@property
def d(self) -> 'MutableDecopReal':
return self._d
@property
def i_cutoff(self) -> 'MutableDecopReal':
return self._i_cutoff
@property
def i_cutoff_enabled(self) -> 'MutableDecopBoolean':
return self._i_cutoff_enabled
class NloLaserHeadMinifalc:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._p_gain = MutableDecopReal(client, name + ':p-gain')
@property
def p_gain(self) -> 'MutableDecopReal':
return self._p_gain
class NloLaserHeadLocalOscillatorShg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._coupled_modulation = MutableDecopBoolean(client, name + ':coupled-modulation')
self._use_fast_oscillator = MutableDecopBoolean(client, name + ':use-fast-oscillator')
self._use_external_oscillator = MutableDecopBoolean(client, name + ':use-external-oscillator')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._attenuation_raw = MutableDecopInteger(client, name + ':attenuation-raw')
self._phase_shift = MutableDecopReal(client, name + ':phase-shift')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def coupled_modulation(self) -> 'MutableDecopBoolean':
return self._coupled_modulation
@property
def use_fast_oscillator(self) -> 'MutableDecopBoolean':
return self._use_fast_oscillator
@property
def use_external_oscillator(self) -> 'MutableDecopBoolean':
return self._use_external_oscillator
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def attenuation_raw(self) -> 'MutableDecopInteger':
return self._attenuation_raw
@property
def phase_shift(self) -> 'MutableDecopReal':
return self._phase_shift
def auto_pdh(self) -> None:
self.__client.exec(self.__name + ':auto-pdh', input_stream=None, output_type=None, return_type=None)
class ShgFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._modified = DecopBoolean(client, name + ':modified')
self._tc = NloLaserHeadTcFactorySettings(client, name + ':tc')
self._pc = NloLaserHeadPcFactorySettings(client, name + ':pc')
self._pd = NloLaserHeadShgPhotodiodesFactorySettings(client, name + ':pd')
self._lock = NloLaserHeadLockFactorySettings(client, name + ':lock')
@property
def modified(self) -> 'DecopBoolean':
return self._modified
@property
def tc(self) -> 'NloLaserHeadTcFactorySettings':
return self._tc
@property
def pc(self) -> 'NloLaserHeadPcFactorySettings':
return self._pc
@property
def pd(self) -> 'NloLaserHeadShgPhotodiodesFactorySettings':
return self._pd
@property
def lock(self) -> 'NloLaserHeadLockFactorySettings':
return self._lock
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
def retrieve_now(self) -> None:
self.__client.exec(self.__name + ':retrieve-now', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadTcFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._temp_min = MutableDecopReal(client, name + ':temp-min')
self._temp_max = MutableDecopReal(client, name + ':temp-max')
self._temp_set = MutableDecopReal(client, name + ':temp-set')
self._temp_roc_limit = MutableDecopReal(client, name + ':temp-roc-limit')
self._temp_roc_enabled = MutableDecopBoolean(client, name + ':temp-roc-enabled')
self._current_max = MutableDecopReal(client, name + ':current-max')
self._current_min = MutableDecopReal(client, name + ':current-min')
self._p_gain = MutableDecopReal(client, name + ':p-gain')
self._i_gain = MutableDecopReal(client, name + ':i-gain')
self._d_gain = MutableDecopReal(client, name + ':d-gain')
self._c_gain = MutableDecopReal(client, name + ':c-gain')
self._ok_tolerance = MutableDecopReal(client, name + ':ok-tolerance')
self._ok_time = MutableDecopReal(client, name + ':ok-time')
self._timeout = MutableDecopInteger(client, name + ':timeout')
self._power_source = MutableDecopInteger(client, name + ':power-source')
self._ntc_series_resistance = MutableDecopReal(client, name + ':ntc-series-resistance')
@property
def temp_min(self) -> 'MutableDecopReal':
return self._temp_min
@property
def temp_max(self) -> 'MutableDecopReal':
return self._temp_max
@property
def temp_set(self) -> 'MutableDecopReal':
return self._temp_set
@property
def temp_roc_limit(self) -> 'MutableDecopReal':
return self._temp_roc_limit
@property
def temp_roc_enabled(self) -> 'MutableDecopBoolean':
return self._temp_roc_enabled
@property
def current_max(self) -> 'MutableDecopReal':
return self._current_max
@property
def current_min(self) -> 'MutableDecopReal':
return self._current_min
@property
def p_gain(self) -> 'MutableDecopReal':
return self._p_gain
@property
def i_gain(self) -> 'MutableDecopReal':
return self._i_gain
@property
def d_gain(self) -> 'MutableDecopReal':
return self._d_gain
@property
def c_gain(self) -> 'MutableDecopReal':
return self._c_gain
@property
def ok_tolerance(self) -> 'MutableDecopReal':
return self._ok_tolerance
@property
def ok_time(self) -> 'MutableDecopReal':
return self._ok_time
@property
def timeout(self) -> 'MutableDecopInteger':
return self._timeout
@property
def power_source(self) -> 'MutableDecopInteger':
return self._power_source
@property
def ntc_series_resistance(self) -> 'MutableDecopReal':
return self._ntc_series_resistance
class NloLaserHeadPcFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._voltage_min = MutableDecopReal(client, name + ':voltage-min')
self._voltage_max = MutableDecopReal(client, name + ':voltage-max')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._capacitance = MutableDecopReal(client, name + ':capacitance')
self._scan_offset = MutableDecopReal(client, name + ':scan-offset')
self._scan_amplitude = MutableDecopReal(client, name + ':scan-amplitude')
self._scan_frequency = MutableDecopReal(client, name + ':scan-frequency')
@property
def voltage_min(self) -> 'MutableDecopReal':
return self._voltage_min
@property
def voltage_max(self) -> 'MutableDecopReal':
return self._voltage_max
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def capacitance(self) -> 'MutableDecopReal':
return self._capacitance
@property
def scan_offset(self) -> 'MutableDecopReal':
return self._scan_offset
@property
def scan_amplitude(self) -> 'MutableDecopReal':
return self._scan_amplitude
@property
def scan_frequency(self) -> 'MutableDecopReal':
return self._scan_frequency
class NloLaserHeadShgPhotodiodesFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._shg = NloLaserHeadPdFactorySettings(client, name + ':shg')
self._fiber = NloLaserHeadPdFactorySettings(client, name + ':fiber')
self._int = NloLaserHeadPdDigilockFactorySettings(client, name + ':int')
self._pdh_dc = NloLaserHeadPdDigilockFactorySettings(client, name + ':pdh-dc')
self._pdh_rf = NloLaserHeadPdPdhFactorySettings(client, name + ':pdh-rf')
@property
def shg(self) -> 'NloLaserHeadPdFactorySettings':
return self._shg
@property
def fiber(self) -> 'NloLaserHeadPdFactorySettings':
return self._fiber
@property
def int(self) -> 'NloLaserHeadPdDigilockFactorySettings':
return self._int
@property
def pdh_dc(self) -> 'NloLaserHeadPdDigilockFactorySettings':
return self._pdh_dc
@property
def pdh_rf(self) -> 'NloLaserHeadPdPdhFactorySettings':
return self._pdh_rf
class NloLaserHeadPdFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
self._cal_factor = MutableDecopReal(client, name + ':cal-factor')
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
@property
def cal_factor(self) -> 'MutableDecopReal':
return self._cal_factor
class NloLaserHeadPdDigilockFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
class NloLaserHeadPdPdhFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._gain = MutableDecopReal(client, name + ':gain')
@property
def gain(self) -> 'MutableDecopReal':
return self._gain
class NloLaserHeadLockFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._pid_selection = MutableDecopInteger(client, name + ':pid-selection')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._relock = NloLaserHeadRelockFactorySettings(client, name + ':relock')
self._window = NloLaserHeadLockWindowFactorySettings(client, name + ':window')
self._pid1_gain = NloLaserHeadPidGainFactorySettings(client, name + ':pid1-gain')
self._pid2_gain = NloLaserHeadPidGainFactorySettings(client, name + ':pid2-gain')
self._analog_p_gain = MutableDecopReal(client, name + ':analog-p-gain')
self._local_oscillator = NloLaserHeadLocalOscillatorFactorySettings(client, name + ':local-oscillator')
@property
def pid_selection(self) -> 'MutableDecopInteger':
return self._pid_selection
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def relock(self) -> 'NloLaserHeadRelockFactorySettings':
return self._relock
@property
def window(self) -> 'NloLaserHeadLockWindowFactorySettings':
return self._window
@property
def pid1_gain(self) -> 'NloLaserHeadPidGainFactorySettings':
return self._pid1_gain
@property
def pid2_gain(self) -> 'NloLaserHeadPidGainFactorySettings':
return self._pid2_gain
@property
def analog_p_gain(self) -> 'MutableDecopReal':
return self._analog_p_gain
@property
def local_oscillator(self) -> 'NloLaserHeadLocalOscillatorFactorySettings':
return self._local_oscillator
class NloLaserHeadRelockFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._frequency = MutableDecopReal(client, name + ':frequency')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._delay = MutableDecopReal(client, name + ':delay')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def frequency(self) -> 'MutableDecopReal':
return self._frequency
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def delay(self) -> 'MutableDecopReal':
return self._delay
class NloLaserHeadLockWindowFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._threshold = MutableDecopReal(client, name + ':threshold')
self._level_hysteresis = MutableDecopReal(client, name + ':level-hysteresis')
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def threshold(self) -> 'MutableDecopReal':
return self._threshold
@property
def level_hysteresis(self) -> 'MutableDecopReal':
return self._level_hysteresis
class NloLaserHeadPidGainFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._all = MutableDecopReal(client, name + ':all')
self._p = MutableDecopReal(client, name + ':p')
self._i = MutableDecopReal(client, name + ':i')
self._d = MutableDecopReal(client, name + ':d')
self._i_cutoff = MutableDecopReal(client, name + ':i-cutoff')
self._i_cutoff_enabled = MutableDecopBoolean(client, name + ':i-cutoff-enabled')
@property
def all(self) -> 'MutableDecopReal':
return self._all
@property
def p(self) -> 'MutableDecopReal':
return self._p
@property
def i(self) -> 'MutableDecopReal':
return self._i
@property
def d(self) -> 'MutableDecopReal':
return self._d
@property
def i_cutoff(self) -> 'MutableDecopReal':
return self._i_cutoff
@property
def i_cutoff_enabled(self) -> 'MutableDecopBoolean':
return self._i_cutoff_enabled
class NloLaserHeadLocalOscillatorFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._use_fast_oscillator = MutableDecopBoolean(client, name + ':use-fast-oscillator')
self._coupled_modulation = MutableDecopBoolean(client, name + ':coupled-modulation')
self._attenuation_shg_raw = MutableDecopInteger(client, name + ':attenuation-shg-raw')
self._attenuation_fhg_raw = MutableDecopInteger(client, name + ':attenuation-fhg-raw')
self._phase_shift_shg = MutableDecopReal(client, name + ':phase-shift-shg')
self._phase_shift_fhg = MutableDecopReal(client, name + ':phase-shift-fhg')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def use_fast_oscillator(self) -> 'MutableDecopBoolean':
return self._use_fast_oscillator
@property
def coupled_modulation(self) -> 'MutableDecopBoolean':
return self._coupled_modulation
@property
def attenuation_shg_raw(self) -> 'MutableDecopInteger':
return self._attenuation_shg_raw
@property
def attenuation_fhg_raw(self) -> 'MutableDecopInteger':
return self._attenuation_fhg_raw
@property
def phase_shift_shg(self) -> 'MutableDecopReal':
return self._phase_shift_shg
@property
def phase_shift_fhg(self) -> 'MutableDecopReal':
return self._phase_shift_fhg
class Fhg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._tc = TcChannel(client, name + ':tc')
self._pc = PiezoDrv1(client, name + ':pc')
self._scan = NloLaserHeadSiggen(client, name + ':scan')
self._scope = NloLaserHeadScopeT(client, name + ':scope')
self._lock = NloLaserHeadLockFhg(client, name + ':lock')
self._factory_settings = FhgFactorySettings(client, name + ':factory-settings')
@property
def tc(self) -> 'TcChannel':
return self._tc
@property
def pc(self) -> 'PiezoDrv1':
return self._pc
@property
def scan(self) -> 'NloLaserHeadSiggen':
return self._scan
@property
def scope(self) -> 'NloLaserHeadScopeT':
return self._scope
@property
def lock(self) -> 'NloLaserHeadLockFhg':
return self._lock
@property
def factory_settings(self) -> 'FhgFactorySettings':
return self._factory_settings
def store(self) -> None:
self.__client.exec(self.__name + ':store', input_stream=None, output_type=None, return_type=None)
def restore(self) -> None:
self.__client.exec(self.__name + ':restore', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadLockFhg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._state = DecopInteger(client, name + ':state')
self._state_txt = DecopString(client, name + ':state-txt')
self._lock_enabled = MutableDecopBoolean(client, name + ':lock-enabled')
self._pid_selection = MutableDecopInteger(client, name + ':pid-selection')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._relock = NloLaserHeadRelock(client, name + ':relock')
self._window = NloLaserHeadWindow(client, name + ':window')
self._pid1 = NloLaserHeadPid(client, name + ':pid1')
self._pid2 = NloLaserHeadPid(client, name + ':pid2')
self._local_oscillator = NloLaserHeadLocalOscillatorFhg(client, name + ':local-oscillator')
self._cavity_fast_pzt_voltage = MutableDecopReal(client, name + ':cavity-fast-pzt-voltage')
self._cavity_slow_pzt_voltage = MutableDecopReal(client, name + ':cavity-slow-pzt-voltage')
self._background_trace = DecopBinary(client, name + ':background-trace')
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def state_txt(self) -> 'DecopString':
return self._state_txt
@property
def lock_enabled(self) -> 'MutableDecopBoolean':
return self._lock_enabled
@property
def pid_selection(self) -> 'MutableDecopInteger':
return self._pid_selection
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def relock(self) -> 'NloLaserHeadRelock':
return self._relock
@property
def window(self) -> 'NloLaserHeadWindow':
return self._window
@property
def pid1(self) -> 'NloLaserHeadPid':
return self._pid1
@property
def pid2(self) -> 'NloLaserHeadPid':
return self._pid2
@property
def local_oscillator(self) -> 'NloLaserHeadLocalOscillatorFhg':
return self._local_oscillator
@property
def cavity_fast_pzt_voltage(self) -> 'MutableDecopReal':
return self._cavity_fast_pzt_voltage
@property
def cavity_slow_pzt_voltage(self) -> 'MutableDecopReal':
return self._cavity_slow_pzt_voltage
@property
def background_trace(self) -> 'DecopBinary':
return self._background_trace
class NloLaserHeadLocalOscillatorFhg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._coupled_modulation = MutableDecopBoolean(client, name + ':coupled-modulation')
self._use_fast_oscillator = MutableDecopBoolean(client, name + ':use-fast-oscillator')
self._amplitude = MutableDecopReal(client, name + ':amplitude')
self._attenuation_raw = MutableDecopInteger(client, name + ':attenuation-raw')
self._phase_shift = MutableDecopReal(client, name + ':phase-shift')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def coupled_modulation(self) -> 'MutableDecopBoolean':
return self._coupled_modulation
@property
def use_fast_oscillator(self) -> 'MutableDecopBoolean':
return self._use_fast_oscillator
@property
def amplitude(self) -> 'MutableDecopReal':
return self._amplitude
@property
def attenuation_raw(self) -> 'MutableDecopInteger':
return self._attenuation_raw
@property
def phase_shift(self) -> 'MutableDecopReal':
return self._phase_shift
def auto_pdh(self) -> None:
self.__client.exec(self.__name + ':auto-pdh', input_stream=None, output_type=None, return_type=None)
class FhgFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._modified = DecopBoolean(client, name + ':modified')
self._tc = NloLaserHeadTcFactorySettings(client, name + ':tc')
self._pc = NloLaserHeadPcFactorySettings(client, name + ':pc')
self._pd = NloLaserHeadFhgPhotodiodesFactorySettings(client, name + ':pd')
self._lock = NloLaserHeadLockFactorySettings(client, name + ':lock')
@property
def modified(self) -> 'DecopBoolean':
return self._modified
@property
def tc(self) -> 'NloLaserHeadTcFactorySettings':
return self._tc
@property
def pc(self) -> 'NloLaserHeadPcFactorySettings':
return self._pc
@property
def pd(self) -> 'NloLaserHeadFhgPhotodiodesFactorySettings':
return self._pd
@property
def lock(self) -> 'NloLaserHeadLockFactorySettings':
return self._lock
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
def retrieve_now(self) -> None:
self.__client.exec(self.__name + ':retrieve-now', input_stream=None, output_type=None, return_type=None)
class NloLaserHeadFhgPhotodiodesFactorySettings:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._fhg = NloLaserHeadPdFactorySettings(client, name + ':fhg')
self._int = NloLaserHeadPdDigilockFactorySettings(client, name + ':int')
self._pdh_dc = NloLaserHeadPdDigilockFactorySettings(client, name + ':pdh-dc')
self._pdh_rf = NloLaserHeadPdPdhFactorySettings(client, name + ':pdh-rf')
@property
def fhg(self) -> 'NloLaserHeadPdFactorySettings':
return self._fhg
@property
def int(self) -> 'NloLaserHeadPdDigilockFactorySettings':
return self._int
@property
def pdh_dc(self) -> 'NloLaserHeadPdDigilockFactorySettings':
return self._pdh_dc
@property
def pdh_rf(self) -> 'NloLaserHeadPdPdhFactorySettings':
return self._pdh_rf
class PdExt:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._photodiode = DecopReal(client, name + ':photodiode')
self._power = DecopReal(client, name + ':power')
self._cal_offset = MutableDecopReal(client, name + ':cal-offset')
self._cal_factor = MutableDecopReal(client, name + ':cal-factor')
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def photodiode(self) -> 'DecopReal':
return self._photodiode
@property
def power(self) -> 'DecopReal':
return self._power
@property
def cal_offset(self) -> 'MutableDecopReal':
return self._cal_offset
@property
def cal_factor(self) -> 'MutableDecopReal':
return self._cal_factor
class PwrStab:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._gain = PwrStabGain(client, name + ':gain')
self._sign = MutableDecopBoolean(client, name + ':sign')
self._input_channel = MutableDecopInteger(client, name + ':input-channel')
self._setpoint = MutableDecopReal(client, name + ':setpoint')
self._window = PwrStabWindow(client, name + ':window')
self._hold_output_on_unlock = MutableDecopBoolean(client, name + ':hold-output-on-unlock')
self._output_channel = DecopInteger(client, name + ':output-channel')
self._input_channel_value_act = DecopReal(client, name + ':input-channel-value-act')
self._state = DecopInteger(client, name + ':state')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def gain(self) -> 'PwrStabGain':
return self._gain
@property
def sign(self) -> 'MutableDecopBoolean':
return self._sign
@property
def input_channel(self) -> 'MutableDecopInteger':
return self._input_channel
@property
def setpoint(self) -> 'MutableDecopReal':
return self._setpoint
@property
def window(self) -> 'PwrStabWindow':
return self._window
@property
def hold_output_on_unlock(self) -> 'MutableDecopBoolean':
return self._hold_output_on_unlock
@property
def output_channel(self) -> 'DecopInteger':
return self._output_channel
@property
def input_channel_value_act(self) -> 'DecopReal':
return self._input_channel_value_act
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
class PwrStabGain:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._all = MutableDecopReal(client, name + ':all')
self._p = MutableDecopReal(client, name + ':p')
self._i = MutableDecopReal(client, name + ':i')
self._d = MutableDecopReal(client, name + ':d')
@property
def all(self) -> 'MutableDecopReal':
return self._all
@property
def p(self) -> 'MutableDecopReal':
return self._p
@property
def i(self) -> 'MutableDecopReal':
return self._i
@property
def d(self) -> 'MutableDecopReal':
return self._d
class PwrStabWindow:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._level_low = MutableDecopReal(client, name + ':level-low')
self._level_hysteresis = MutableDecopReal(client, name + ':level-hysteresis')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def level_low(self) -> 'MutableDecopReal':
return self._level_low
@property
def level_hysteresis(self) -> 'MutableDecopReal':
return self._level_hysteresis
class CcBoard:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slot = DecopString(client, name + ':slot')
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopInteger(client, name + ':fpga-fw-ver')
self._board_temp = DecopReal(client, name + ':board-temp')
self._variant = DecopString(client, name + ':variant')
self._parallel_mode = DecopBoolean(client, name + ':parallel-mode')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._channel1 = CurrDrv2(client, name + ':channel1')
self._channel2 = CurrDrv2(client, name + ':channel2')
@property
def slot(self) -> 'DecopString':
return self._slot
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopInteger':
return self._fpga_fw_ver
@property
def board_temp(self) -> 'DecopReal':
return self._board_temp
@property
def variant(self) -> 'DecopString':
return self._variant
@property
def parallel_mode(self) -> 'DecopBoolean':
return self._parallel_mode
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def channel1(self) -> 'CurrDrv2':
return self._channel1
@property
def channel2(self) -> 'CurrDrv2':
return self._channel2
class CurrDrv2:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._variant = DecopString(client, name + ':variant')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._emission = DecopBoolean(client, name + ':emission')
self._current_set = MutableDecopReal(client, name + ':current-set')
self._current_offset = MutableDecopReal(client, name + ':current-offset')
self._current_set_dithering = MutableDecopBoolean(client, name + ':current-set-dithering')
self._external_input = ExtInput2(client, name + ':external-input')
self._output_filter = OutputFilter2(client, name + ':output-filter')
self._current_act = DecopReal(client, name + ':current-act')
self._positive_polarity = MutableDecopBoolean(client, name + ':positive-polarity')
self._current_clip = MutableDecopReal(client, name + ':current-clip')
self._current_clip_limit = DecopReal(client, name + ':current-clip-limit')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._voltage_clip = MutableDecopReal(client, name + ':voltage-clip')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._pd = DecopReal(client, name + ':pd')
self._aux = DecopReal(client, name + ':aux')
self._snubber = MutableDecopBoolean(client, name + ':snubber')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._forced_off = MutableDecopBoolean(client, name + ':forced-off')
@property
def path(self) -> 'DecopString':
return self._path
@property
def variant(self) -> 'DecopString':
return self._variant
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def emission(self) -> 'DecopBoolean':
return self._emission
@property
def current_set(self) -> 'MutableDecopReal':
return self._current_set
@property
def current_offset(self) -> 'MutableDecopReal':
return self._current_offset
@property
def current_set_dithering(self) -> 'MutableDecopBoolean':
return self._current_set_dithering
@property
def external_input(self) -> 'ExtInput2':
return self._external_input
@property
def output_filter(self) -> 'OutputFilter2':
return self._output_filter
@property
def current_act(self) -> 'DecopReal':
return self._current_act
@property
def positive_polarity(self) -> 'MutableDecopBoolean':
return self._positive_polarity
@property
def current_clip(self) -> 'MutableDecopReal':
return self._current_clip
@property
def current_clip_limit(self) -> 'DecopReal':
return self._current_clip_limit
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def voltage_clip(self) -> 'MutableDecopReal':
return self._voltage_clip
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def pd(self) -> 'DecopReal':
return self._pd
@property
def aux(self) -> 'DecopReal':
return self._aux
@property
def snubber(self) -> 'MutableDecopBoolean':
return self._snubber
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def forced_off(self) -> 'MutableDecopBoolean':
return self._forced_off
class ExtInput2:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._signal = MutableDecopInteger(client, name + ':signal')
self._factor = MutableDecopReal(client, name + ':factor')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
@property
def signal(self) -> 'MutableDecopInteger':
return self._signal
@property
def factor(self) -> 'MutableDecopReal':
return self._factor
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
class OutputFilter2:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slew_rate = MutableDecopReal(client, name + ':slew-rate')
self._slew_rate_enabled = MutableDecopBoolean(client, name + ':slew-rate-enabled')
self._slew_rate_limited = DecopBoolean(client, name + ':slew-rate-limited')
@property
def slew_rate(self) -> 'MutableDecopReal':
return self._slew_rate
@property
def slew_rate_enabled(self) -> 'MutableDecopBoolean':
return self._slew_rate_enabled
@property
def slew_rate_limited(self) -> 'DecopBoolean':
return self._slew_rate_limited
class Cc5000Board:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slot = DecopString(client, name + ':slot')
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopInteger(client, name + ':fpga-fw-ver')
self._board_temp = DecopReal(client, name + ':board-temp')
self._variant = DecopString(client, name + ':variant')
self._parallel_mode = DecopBoolean(client, name + ':parallel-mode')
self._inverter_temp = DecopReal(client, name + ':inverter-temp')
self._inverter_temp_fuse = DecopReal(client, name + ':inverter-temp-fuse')
self._regulator_temp = DecopReal(client, name + ':regulator-temp')
self._regulator_temp_fuse = DecopReal(client, name + ':regulator-temp-fuse')
self._power_15v = MutableDecopBoolean(client, name + ':power-15v')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._channel1 = Cc5000Drv(client, name + ':channel1')
@property
def slot(self) -> 'DecopString':
return self._slot
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopInteger':
return self._fpga_fw_ver
@property
def board_temp(self) -> 'DecopReal':
return self._board_temp
@property
def variant(self) -> 'DecopString':
return self._variant
@property
def parallel_mode(self) -> 'DecopBoolean':
return self._parallel_mode
@property
def inverter_temp(self) -> 'DecopReal':
return self._inverter_temp
@property
def inverter_temp_fuse(self) -> 'DecopReal':
return self._inverter_temp_fuse
@property
def regulator_temp(self) -> 'DecopReal':
return self._regulator_temp
@property
def regulator_temp_fuse(self) -> 'DecopReal':
return self._regulator_temp_fuse
@property
def power_15v(self) -> 'MutableDecopBoolean':
return self._power_15v
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def channel1(self) -> 'Cc5000Drv':
return self._channel1
class PcBoard:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slot = DecopString(client, name + ':slot')
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopInteger(client, name + ':fpga-fw-ver')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
self._heatsink_temp = DecopReal(client, name + ':heatsink-temp')
self._channel1 = PiezoDrv2(client, name + ':channel1')
@property
def slot(self) -> 'DecopString':
return self._slot
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopInteger':
return self._fpga_fw_ver
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
@property
def heatsink_temp(self) -> 'DecopReal':
return self._heatsink_temp
@property
def channel1(self) -> 'PiezoDrv2':
return self._channel1
class PiezoDrv2:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._path = DecopString(client, name + ':path')
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._voltage_set = MutableDecopReal(client, name + ':voltage-set')
self._voltage_min = MutableDecopReal(client, name + ':voltage-min')
self._voltage_max = MutableDecopReal(client, name + ':voltage-max')
self._voltage_set_dithering = MutableDecopBoolean(client, name + ':voltage-set-dithering')
self._external_input = ExtInput2(client, name + ':external-input')
self._output_filter = OutputFilter2(client, name + ':output-filter')
self._voltage_act = DecopReal(client, name + ':voltage-act')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
@property
def path(self) -> 'DecopString':
return self._path
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def voltage_set(self) -> 'MutableDecopReal':
return self._voltage_set
@property
def voltage_min(self) -> 'MutableDecopReal':
return self._voltage_min
@property
def voltage_max(self) -> 'MutableDecopReal':
return self._voltage_max
@property
def voltage_set_dithering(self) -> 'MutableDecopBoolean':
return self._voltage_set_dithering
@property
def external_input(self) -> 'ExtInput2':
return self._external_input
@property
def output_filter(self) -> 'OutputFilter2':
return self._output_filter
@property
def voltage_act(self) -> 'DecopReal':
return self._voltage_act
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
class TcBoard:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._slot = DecopString(client, name + ':slot')
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopString(client, name + ':fpga-fw-ver')
self._board_temp = DecopReal(client, name + ':board-temp')
self._channel1 = TcChannel(client, name + ':channel1')
self._channel2 = TcChannel(client, name + ':channel2')
@property
def slot(self) -> 'DecopString':
return self._slot
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopString':
return self._fpga_fw_ver
@property
def board_temp(self) -> 'DecopReal':
return self._board_temp
@property
def channel1(self) -> 'TcChannel':
return self._channel1
@property
def channel2(self) -> 'TcChannel':
return self._channel2
class McBoard:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopString(client, name + ':fpga-fw-ver')
self._board_temp = DecopReal(client, name + ':board-temp')
self._relative_humidity = DecopReal(client, name + ':relative-humidity')
self._air_pressure = DecopReal(client, name + ':air-pressure')
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopString':
return self._fpga_fw_ver
@property
def board_temp(self) -> 'DecopReal':
return self._board_temp
@property
def relative_humidity(self) -> 'DecopReal':
return self._relative_humidity
@property
def air_pressure(self) -> 'DecopReal':
return self._air_pressure
class IoBoard:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._fpga_fw_ver = DecopInteger(client, name + ':fpga-fw-ver')
self._out_a = IoOutputChannel(client, name + ':out-a')
self._out_b = IoOutputChannel(client, name + ':out-b')
self._digital_in0 = IoDigitalInput(client, name + ':digital-in0')
self._digital_in1 = IoDigitalInput(client, name + ':digital-in1')
self._digital_in2 = IoDigitalInput(client, name + ':digital-in2')
self._digital_in3 = IoDigitalInput(client, name + ':digital-in3')
self._digital_out0 = IoDigitalOutput(client, name + ':digital-out0')
self._digital_out1 = IoDigitalOutput(client, name + ':digital-out1')
self._digital_out2 = IoDigitalOutput(client, name + ':digital-out2')
self._digital_out3 = IoDigitalOutput(client, name + ':digital-out3')
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def fpga_fw_ver(self) -> 'DecopInteger':
return self._fpga_fw_ver
@property
def out_a(self) -> 'IoOutputChannel':
return self._out_a
@property
def out_b(self) -> 'IoOutputChannel':
return self._out_b
@property
def digital_in0(self) -> 'IoDigitalInput':
return self._digital_in0
@property
def digital_in1(self) -> 'IoDigitalInput':
return self._digital_in1
@property
def digital_in2(self) -> 'IoDigitalInput':
return self._digital_in2
@property
def digital_in3(self) -> 'IoDigitalInput':
return self._digital_in3
@property
def digital_out0(self) -> 'IoDigitalOutput':
return self._digital_out0
@property
def digital_out1(self) -> 'IoDigitalOutput':
return self._digital_out1
@property
def digital_out2(self) -> 'IoDigitalOutput':
return self._digital_out2
@property
def digital_out3(self) -> 'IoDigitalOutput':
return self._digital_out3
class IoOutputChannel:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._voltage_set = MutableDecopReal(client, name + ':voltage-set')
self._voltage_offset = MutableDecopReal(client, name + ':voltage-offset')
self._voltage_min = MutableDecopReal(client, name + ':voltage-min')
self._voltage_max = MutableDecopReal(client, name + ':voltage-max')
self._external_input = ExtInput1(client, name + ':external-input')
self._output_filter = OutputFilter1(client, name + ':output-filter')
self._feedforward_master = MutableDecopInteger(client, name + ':feedforward-master')
self._feedforward_enabled = MutableDecopBoolean(client, name + ':feedforward-enabled')
self._feedforward_factor = MutableDecopReal(client, name + ':feedforward-factor')
@property
def voltage_set(self) -> 'MutableDecopReal':
return self._voltage_set
@property
def voltage_offset(self) -> 'MutableDecopReal':
return self._voltage_offset
@property
def voltage_min(self) -> 'MutableDecopReal':
return self._voltage_min
@property
def voltage_max(self) -> 'MutableDecopReal':
return self._voltage_max
@property
def external_input(self) -> 'ExtInput1':
return self._external_input
@property
def output_filter(self) -> 'OutputFilter1':
return self._output_filter
@property
def feedforward_master(self) -> 'MutableDecopInteger':
return self._feedforward_master
@property
def feedforward_enabled(self) -> 'MutableDecopBoolean':
return self._feedforward_enabled
@property
def feedforward_factor(self) -> 'MutableDecopReal':
return self._feedforward_factor
class IoDigitalInput:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._value_act = DecopBoolean(client, name + ':value-act')
@property
def value_act(self) -> 'DecopBoolean':
return self._value_act
class IoDigitalOutput:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._value_act = DecopBoolean(client, name + ':value-act')
self._value_set = MutableDecopBoolean(client, name + ':value-set')
self._mode = MutableDecopInteger(client, name + ':mode')
self._invert = MutableDecopBoolean(client, name + ':invert')
@property
def value_act(self) -> 'DecopBoolean':
return self._value_act
@property
def value_set(self) -> 'MutableDecopBoolean':
return self._value_set
@property
def mode(self) -> 'MutableDecopInteger':
return self._mode
@property
def invert(self) -> 'MutableDecopBoolean':
return self._invert
class PowerSupply:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._serial_number = DecopString(client, name + ':serial-number')
self._revision = DecopString(client, name + ':revision')
self._board_temp = DecopReal(client, name + ':board-temp')
self._heatsink_temp = DecopReal(client, name + ':heatsink-temp')
self._current_5V = DecopReal(client, name + ':current-5V')
self._current_15V = DecopReal(client, name + ':current-15V')
self._current_15Vn = DecopReal(client, name + ':current-15Vn')
self._voltage_5V = DecopReal(client, name + ':voltage-5V')
self._voltage_15V = DecopReal(client, name + ':voltage-15V')
self._voltage_15Vn = DecopReal(client, name + ':voltage-15Vn')
self._voltage_3V3 = DecopReal(client, name + ':voltage-3V3')
self._load = DecopReal(client, name + ':load')
self._status = DecopInteger(client, name + ':status')
self._status_txt = DecopString(client, name + ':status-txt')
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def revision(self) -> 'DecopString':
return self._revision
@property
def board_temp(self) -> 'DecopReal':
return self._board_temp
@property
def heatsink_temp(self) -> 'DecopReal':
return self._heatsink_temp
@property
def current_5V(self) -> 'DecopReal':
return self._current_5V
@property
def current_15V(self) -> 'DecopReal':
return self._current_15V
@property
def current_15Vn(self) -> 'DecopReal':
return self._current_15Vn
@property
def voltage_5V(self) -> 'DecopReal':
return self._voltage_5V
@property
def voltage_15V(self) -> 'DecopReal':
return self._voltage_15V
@property
def voltage_15Vn(self) -> 'DecopReal':
return self._voltage_15Vn
@property
def voltage_3V3(self) -> 'DecopReal':
return self._voltage_3V3
@property
def load(self) -> 'DecopReal':
return self._load
@property
def status(self) -> 'DecopInteger':
return self._status
@property
def status_txt(self) -> 'DecopString':
return self._status_txt
class Buzzer:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._welcome = MutableDecopString(client, name + ':welcome')
@property
def welcome(self) -> 'MutableDecopString':
return self._welcome
def play_welcome(self) -> None:
self.__client.exec(self.__name + ':play-welcome', input_stream=None, output_type=None, return_type=None)
def play(self, melody: str) -> None:
assert isinstance(melody, str), "expected type 'str' for parameter 'melody', got '{}'".format(type(melody))
self.__client.exec(self.__name + ':play', melody, input_stream=None, output_type=None, return_type=None)
class Display:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._brightness = MutableDecopReal(client, name + ':brightness')
self._auto_dark = MutableDecopBoolean(client, name + ':auto-dark')
self._idle_timeout = MutableDecopInteger(client, name + ':idle-timeout')
self._state = DecopInteger(client, name + ':state')
@property
def brightness(self) -> 'MutableDecopReal':
return self._brightness
@property
def auto_dark(self) -> 'MutableDecopBoolean':
return self._auto_dark
@property
def idle_timeout(self) -> 'MutableDecopInteger':
return self._idle_timeout
@property
def state(self) -> 'DecopInteger':
return self._state
def update_state(self, active: bool) -> None:
assert isinstance(active, bool), "expected type 'bool' for parameter 'active', got '{}'".format(type(active))
self.__client.exec(self.__name + ':update-state', active, input_stream=None, output_type=None, return_type=None)
class Standby:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = MutableDecopBoolean(client, name + ':enabled')
self._state = DecopInteger(client, name + ':state')
self._laser1 = StandbyLaser(client, name + ':laser1')
@property
def enabled(self) -> 'MutableDecopBoolean':
return self._enabled
@property
def state(self) -> 'DecopInteger':
return self._state
@property
def laser1(self) -> 'StandbyLaser':
return self._laser1
class StandbyLaser:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._dl = StandbyDl(client, name + ':dl')
self._amp = StandbyAmp(client, name + ':amp')
self._nlo = StandbyShg(client, name + ':nlo')
@property
def dl(self) -> 'StandbyDl':
return self._dl
@property
def amp(self) -> 'StandbyAmp':
return self._amp
@property
def nlo(self) -> 'StandbyShg':
return self._nlo
class StandbyDl:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._disable_pc = MutableDecopBoolean(client, name + ':disable-pc')
self._disable_cc = MutableDecopBoolean(client, name + ':disable-cc')
self._disable_tc = MutableDecopBoolean(client, name + ':disable-tc')
@property
def disable_pc(self) -> 'MutableDecopBoolean':
return self._disable_pc
@property
def disable_cc(self) -> 'MutableDecopBoolean':
return self._disable_cc
@property
def disable_tc(self) -> 'MutableDecopBoolean':
return self._disable_tc
class StandbyAmp:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._disable_cc = MutableDecopBoolean(client, name + ':disable-cc')
self._disable_tc = MutableDecopBoolean(client, name + ':disable-tc')
@property
def disable_cc(self) -> 'MutableDecopBoolean':
return self._disable_cc
@property
def disable_tc(self) -> 'MutableDecopBoolean':
return self._disable_tc
class StandbyShg:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._disable_pc = MutableDecopBoolean(client, name + ':disable-pc')
self._disable_tc = MutableDecopBoolean(client, name + ':disable-tc')
self._disable_servo_subsystem = MutableDecopBoolean(client, name + ':disable-servo-subsystem')
self._disable_power_stabilization = MutableDecopBoolean(client, name + ':disable-power-stabilization')
self._disable_cavity_lock = MutableDecopBoolean(client, name + ':disable-cavity-lock')
@property
def disable_pc(self) -> 'MutableDecopBoolean':
return self._disable_pc
@property
def disable_tc(self) -> 'MutableDecopBoolean':
return self._disable_tc
@property
def disable_servo_subsystem(self) -> 'MutableDecopBoolean':
return self._disable_servo_subsystem
@property
def disable_power_stabilization(self) -> 'MutableDecopBoolean':
return self._disable_power_stabilization
@property
def disable_cavity_lock(self) -> 'MutableDecopBoolean':
return self._disable_cavity_lock
class SystemMessages:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._count = DecopInteger(client, name + ':count')
self._count_new = DecopInteger(client, name + ':count-new')
self._latest_message = DecopString(client, name + ':latest-message')
@property
def count(self) -> 'DecopInteger':
return self._count
@property
def count_new(self) -> 'DecopInteger':
return self._count_new
@property
def latest_message(self) -> 'DecopString':
return self._latest_message
def mark_as_read(self, ID: int) -> None:
assert isinstance(ID, int), "expected type 'int' for parameter 'ID', got '{}'".format(type(ID))
self.__client.exec(self.__name + ':mark-as-read', ID, input_stream=None, output_type=None, return_type=None)
def show_all(self) -> str:
return self.__client.exec(self.__name + ':show-all', input_stream=None, output_type=str, return_type=None)
def show_new(self) -> str:
return self.__client.exec(self.__name + ':show-new', input_stream=None, output_type=str, return_type=None)
def show_log(self) -> str:
return self.__client.exec(self.__name + ':show-log', input_stream=None, output_type=str, return_type=None)
def show_persistent(self) -> str:
return self.__client.exec(self.__name + ':show-persistent', input_stream=None, output_type=str, return_type=None)
class Licenses:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._options = LicenseOptions(client, name + ':options')
self._installed_keys = DecopInteger(client, name + ':installed-keys')
@property
def options(self) -> 'LicenseOptions':
return self._options
@property
def installed_keys(self) -> 'DecopInteger':
return self._installed_keys
def get_key(self, key_number: int) -> str:
assert isinstance(key_number, int), "expected type 'int' for parameter 'key_number', got '{}'".format(type(key_number))
return self.__client.exec(self.__name + ':get-key', key_number, input_stream=None, output_type=None, return_type=str)
def install(self, licensekey: str) -> bool:
assert isinstance(licensekey, str), "expected type 'str' for parameter 'licensekey', got '{}'".format(type(licensekey))
return self.__client.exec(self.__name + ':install', licensekey, input_stream=None, output_type=None, return_type=bool)
class LicenseOptions:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._lock = LicenseOption(client, name + ':lock')
@property
def lock(self) -> 'LicenseOption':
return self._lock
class LicenseOption:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._enabled = DecopBoolean(client, name + ':enabled')
self._licensee = DecopString(client, name + ':licensee')
self._valid_until = DecopString(client, name + ':valid-until')
@property
def enabled(self) -> 'DecopBoolean':
return self._enabled
@property
def licensee(self) -> 'DecopString':
return self._licensee
@property
def valid_until(self) -> 'DecopString':
return self._valid_until
class FwUpdate:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
def upload(self, input_stream: bytes, filename: str) -> None:
assert isinstance(input_stream, bytes), "expected type 'bytes' for parameter 'input_stream', got '{}'".format(type(input_stream))
assert isinstance(filename, str), "expected type 'str' for parameter 'filename', got '{}'".format(type(filename))
self.__client.exec(self.__name + ':upload', filename, input_stream=input_stream, output_type=None, return_type=None)
def show_log(self) -> str:
return self.__client.exec(self.__name + ':show-log', input_stream=None, output_type=str, return_type=None)
def show_history(self) -> str:
return self.__client.exec(self.__name + ':show-history', input_stream=None, output_type=str, return_type=None)
class ServiceReport:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._ready = DecopBoolean(client, name + ':ready')
@property
def ready(self) -> 'DecopBoolean':
return self._ready
def service_report(self) -> bytes:
return self.__client.exec(self.__name + ':service-report', input_stream=None, output_type=bytes, return_type=None)
def request(self) -> None:
self.__client.exec(self.__name + ':request', input_stream=None, output_type=None, return_type=None)
def print(self) -> bytes:
return self.__client.exec(self.__name + ':print', input_stream=None, output_type=bytes, return_type=None)
class BuildInformation:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._build_number = DecopInteger(client, name + ':build-number')
self._build_id = DecopString(client, name + ':build-id')
self._build_tag = DecopString(client, name + ':build-tag')
self._job_name = DecopString(client, name + ':job-name')
self._build_node_name = DecopString(client, name + ':build-node-name')
self._build_url = DecopString(client, name + ':build-url')
self._cxx_compiler_version = DecopString(client, name + ':cxx-compiler-version')
self._c_compiler_version = DecopString(client, name + ':c-compiler-version')
self._cxx_compiler_id = DecopString(client, name + ':cxx-compiler-id')
self._c_compiler_id = DecopString(client, name + ':c-compiler-id')
@property
def build_number(self) -> 'DecopInteger':
return self._build_number
@property
def build_id(self) -> 'DecopString':
return self._build_id
@property
def build_tag(self) -> 'DecopString':
return self._build_tag
@property
def job_name(self) -> 'DecopString':
return self._job_name
@property
def build_node_name(self) -> 'DecopString':
return self._build_node_name
@property
def build_url(self) -> 'DecopString':
return self._build_url
@property
def cxx_compiler_version(self) -> 'DecopString':
return self._cxx_compiler_version
@property
def c_compiler_version(self) -> 'DecopString':
return self._c_compiler_version
@property
def cxx_compiler_id(self) -> 'DecopString':
return self._cxx_compiler_id
@property
def c_compiler_id(self) -> 'DecopString':
return self._c_compiler_id
class Ipconfig:
def __init__(self, client: Client, name: str) -> None:
self.__client = client
self.__name = name
self._ip_addr = DecopString(client, name + ':ip-addr')
self._net_mask = DecopString(client, name + ':net-mask')
self._mac_addr = DecopString(client, name + ':mac-addr')
self._dhcp = DecopBoolean(client, name + ':dhcp')
self._cmd_port = DecopInteger(client, name + ':cmd-port')
self._mon_port = DecopInteger(client, name + ':mon-port')
@property
def ip_addr(self) -> 'DecopString':
return self._ip_addr
@property
def net_mask(self) -> 'DecopString':
return self._net_mask
@property
def mac_addr(self) -> 'DecopString':
return self._mac_addr
@property
def dhcp(self) -> 'DecopBoolean':
return self._dhcp
@property
def cmd_port(self) -> 'DecopInteger':
return self._cmd_port
@property
def mon_port(self) -> 'DecopInteger':
return self._mon_port
def set_dhcp(self) -> None:
self.__client.exec(self.__name + ':set-dhcp', input_stream=None, output_type=None, return_type=None)
def set_ip(self, ip_addr: str, net_mask: str) -> None:
assert isinstance(ip_addr, str), "expected type 'str' for parameter 'ip_addr', got '{}'".format(type(ip_addr))
assert isinstance(net_mask, str), "expected type 'str' for parameter 'net_mask', got '{}'".format(type(net_mask))
self.__client.exec(self.__name + ':set-ip', ip_addr, net_mask, input_stream=None, output_type=None, return_type=None)
def apply(self) -> None:
self.__client.exec(self.__name + ':apply', input_stream=None, output_type=None, return_type=None)
class DLCpro:
def __init__(self, connection: Connection) -> None:
self.__client = Client(connection)
self._interlock_open = DecopBoolean(self.__client, 'interlock-open')
self._frontkey_locked = DecopBoolean(self.__client, 'frontkey-locked')
self._emission = DecopBoolean(self.__client, 'emission')
self._system_health = DecopInteger(self.__client, 'system-health')
self._system_health_txt = DecopString(self.__client, 'system-health-txt')
self._laser1 = Laser(self.__client, 'laser1')
self._cc1 = CcBoard(self.__client, 'cc1')
self._ampcc1 = Cc5000Board(self.__client, 'ampcc1')
self._ampcc2 = Cc5000Board(self.__client, 'ampcc2')
self._pc1 = PcBoard(self.__client, 'pc1')
self._pc2 = PcBoard(self.__client, 'pc2')
self._pc3 = PcBoard(self.__client, 'pc3')
self._tc1 = TcBoard(self.__client, 'tc1')
self._tc2 = TcBoard(self.__client, 'tc2')
self._mc = McBoard(self.__client, 'mc')
self._io = IoBoard(self.__client, 'io')
self._power_supply = PowerSupply(self.__client, 'power-supply')
self._buzzer = Buzzer(self.__client, 'buzzer')
self._display = Display(self.__client, 'display')
self._standby = Standby(self.__client, 'standby')
self._time = MutableDecopString(self.__client, 'time')
self._tan = DecopInteger(self.__client, 'tan')
self._system_messages = SystemMessages(self.__client, 'system-messages')
self._licenses = Licenses(self.__client, 'licenses')
self._fw_update = FwUpdate(self.__client, 'fw-update')
self._system_service_report = ServiceReport(self.__client, 'system-service-report')
self._uptime = DecopInteger(self.__client, 'uptime')
self._uptime_txt = DecopString(self.__client, 'uptime-txt')
self._fw_ver = DecopString(self.__client, 'fw-ver')
self._ssw_ver = DecopString(self.__client, 'ssw-ver')
self._decof_ver = DecopString(self.__client, 'decof-ver')
self._echo = MutableDecopBoolean(self.__client, 'echo')
self._serial_number = DecopString(self.__client, 'serial-number')
self._system_type = DecopString(self.__client, 'system-type')
self._system_model = DecopString(self.__client, 'system-model')
self._system_label = MutableDecopString(self.__client, 'system-label')
self._svn_revision = DecopString(self.__client, 'svn-revision')
self._decof_svn_revision = DecopString(self.__client, 'decof-svn-revision')
self._ssw_svn_revision = DecopString(self.__client, 'ssw-svn-revision')
self._build_information = BuildInformation(self.__client, 'build-information')
self._net_conf = Ipconfig(self.__client, 'net-conf')
self._ul = MutableDecopInteger(self.__client, 'ul')
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def open(self) -> None:
self.__client.open()
def close(self) -> None:
self.__client.close()
def run(self, timeout: int = None) -> None:
self.__client.run(timeout)
def stop(self) -> None:
self.__client.stop()
def poll(self) -> None:
self.__client.poll()
@property
def interlock_open(self) -> 'DecopBoolean':
return self._interlock_open
@property
def frontkey_locked(self) -> 'DecopBoolean':
return self._frontkey_locked
@property
def emission(self) -> 'DecopBoolean':
return self._emission
@property
def system_health(self) -> 'DecopInteger':
return self._system_health
@property
def system_health_txt(self) -> 'DecopString':
return self._system_health_txt
@property
def laser1(self) -> 'Laser':
return self._laser1
@property
def cc1(self) -> 'CcBoard':
return self._cc1
@property
def ampcc1(self) -> 'Cc5000Board':
return self._ampcc1
@property
def ampcc2(self) -> 'Cc5000Board':
return self._ampcc2
@property
def pc1(self) -> 'PcBoard':
return self._pc1
@property
def pc2(self) -> 'PcBoard':
return self._pc2
@property
def pc3(self) -> 'PcBoard':
return self._pc3
@property
def tc1(self) -> 'TcBoard':
return self._tc1
@property
def tc2(self) -> 'TcBoard':
return self._tc2
@property
def mc(self) -> 'McBoard':
return self._mc
@property
def io(self) -> 'IoBoard':
return self._io
@property
def power_supply(self) -> 'PowerSupply':
return self._power_supply
@property
def buzzer(self) -> 'Buzzer':
return self._buzzer
@property
def display(self) -> 'Display':
return self._display
@property
def standby(self) -> 'Standby':
return self._standby
@property
def time(self) -> 'MutableDecopString':
return self._time
@property
def tan(self) -> 'DecopInteger':
return self._tan
@property
def system_messages(self) -> 'SystemMessages':
return self._system_messages
@property
def licenses(self) -> 'Licenses':
return self._licenses
@property
def fw_update(self) -> 'FwUpdate':
return self._fw_update
@property
def system_service_report(self) -> 'ServiceReport':
return self._system_service_report
@property
def uptime(self) -> 'DecopInteger':
return self._uptime
@property
def uptime_txt(self) -> 'DecopString':
return self._uptime_txt
@property
def fw_ver(self) -> 'DecopString':
return self._fw_ver
@property
def ssw_ver(self) -> 'DecopString':
return self._ssw_ver
@property
def decof_ver(self) -> 'DecopString':
return self._decof_ver
@property
def echo(self) -> 'MutableDecopBoolean':
return self._echo
@property
def serial_number(self) -> 'DecopString':
return self._serial_number
@property
def system_type(self) -> 'DecopString':
return self._system_type
@property
def system_model(self) -> 'DecopString':
return self._system_model
@property
def system_label(self) -> 'MutableDecopString':
return self._system_label
@property
def svn_revision(self) -> 'DecopString':
return self._svn_revision
@property
def decof_svn_revision(self) -> 'DecopString':
return self._decof_svn_revision
@property
def ssw_svn_revision(self) -> 'DecopString':
return self._ssw_svn_revision
@property
def build_information(self) -> 'BuildInformation':
return self._build_information
@property
def net_conf(self) -> 'Ipconfig':
return self._net_conf
@property
def ul(self) -> 'MutableDecopInteger':
return self._ul
def system_connections(self) -> Tuple[str, int]:
return self.__client.exec('system-connections', input_stream=None, output_type=str, return_type=int)
def debug_log(self) -> str:
return self.__client.exec('debug-log', input_stream=None, output_type=str, return_type=None)
def error_log(self) -> str:
return self.__client.exec('error-log', input_stream=None, output_type=str, return_type=None)
def service_log(self) -> str:
return self.__client.exec('service-log', input_stream=None, output_type=str, return_type=None)
def service_script(self, input_stream: bytes) -> None:
assert isinstance(input_stream, bytes), "expected type 'bytes' for parameter 'input_stream', got '{}'".format(type(input_stream))
self.__client.exec('service-script', input_stream=input_stream, output_type=None, return_type=None)
def service_report(self) -> bytes:
return self.__client.exec('service-report', input_stream=None, output_type=bytes, return_type=None)
def system_summary(self) -> str:
return self.__client.exec('system-summary', input_stream=None, output_type=str, return_type=None)
def change_ul(self, ul: UserLevel, passwd: str) -> int:
assert isinstance(ul, UserLevel), "expected type 'UserLevel' for parameter 'ul', got '{}'".format(type(ul))
assert isinstance(passwd, str), "expected type 'str' for parameter 'passwd', got '{}'".format(type(passwd))
return self.__client.change_ul(ul, passwd)
def change_password(self, password: str) -> None:
assert isinstance(password, str), "expected type 'str' for parameter 'password', got '{}'".format(type(password))
self.__client.exec('change-password', password, input_stream=None, output_type=None, return_type=None)
|
py
|
1a58f0a3f820e40e177ce31f79b8812c4925ce85
|
from pgshovel.interfaces.replication_pb2 import (
State,
StreamState,
)
from pgshovel.replication.validation.bootstrap import validate_bootstrap_state
from pgshovel.replication.validation.consumers import validate_consumer_state
from pgshovel.replication.validation.transactions import validate_transaction_state
class MultipleStateValidator(object):
def __init__(self, message, validators):
self.message = message
self.validators = validators
def __call__(self, state, *args, **kwargs):
states = {}
for name, validator in self.validators.items():
if state is not None and state.HasField(name):
value = getattr(state, name)
else:
value = None
result = validator(value, *args, **kwargs)
if result is not None:
states[name] = result
return self.message(**states)
validate_state = MultipleStateValidator(State, {
'bootstrap_state': validate_bootstrap_state,
'stream_state': MultipleStateValidator(StreamState, {
'consumer_state': validate_consumer_state,
'transaction_state': validate_transaction_state,
})
})
#: The expected types of event for a stream of transactions when there is no
#: existing ``TransactionState``.
TRANSACTION_START_EVENT_TYPES = validate_state.validators['stream_state'].validators['transaction_state'].receivers[None].keys() # noqa
|
py
|
1a58f1c1b951a92e1c96a0a963d68dd15824fffe
|
"""This module provides file I/O for Quake BSP2 map files.
Example:
bsp_file = bsp.Bsp.open('ad_sepulcher.bsp')
"""
import struct
from .bsp29 import Bsp as Bsp29
__all__ = ['is_bspfile', 'Bsp']
IDENTITY = b'BSP2'
def _check_bspfile(fp):
fp.seek(0)
data = fp.read(struct.calcsize('<4s'))
identity = struct.unpack('<4s', data)[0]
fp.seek(0)
return identity == IDENTITY
def is_bspfile(filename):
"""Quickly see if a file is a bsp file by checking the magic number.
The filename argument may be a file for file-like object.
Args:
filename: File to check as string or file-like object.
Returns:
True if given file's magic number is correct.
"""
try:
if hasattr(filename, 'read'):
return _check_bspfile(fp=filename)
else:
with open(filename, 'rb') as fp:
return _check_bspfile(fp)
except Exception:
return False
class Node(Bsp29.factory.Node):
format = '<i8i2I'
size = struct.calcsize(format)
class Face(Bsp29.factory.Face):
format = '<2ii2i4Bi'
size = struct.calcsize(format)
class ClipNode(Bsp29.factory.ClipNode):
format = '<i2i'
size = struct.calcsize(format)
class Leaf(Bsp29.factory.Leaf):
format = '<2i6i2I4B'
size = struct.calcsize(format)
class Edge(Bsp29.factory.Edge):
format = '<2I'
size = struct.calcsize(format)
class Bsp(Bsp29):
"""Class for working with Bsp files
Example:
Basic usage::
from vgio.quake.bsp.bsp29a import Bsp
b = Bsp.open('ad_sepulcher.bsp')
Attributes:
version: Version of the map file. Vanilla Quake is 29.
entities: A string containing the entity definitions.
planes: A sequence of Planes used by the bsp tree data structure.
miptextures: A sequence of Miptextures.
vertexes: A sequence of Vertexes.
visibilities: A sequence of ints representing visibility data.
nodes: A sequence of Nodes used by the bsp tree data structure.
texture_infos: A sequence of TextureInfo objects.
faces: A sequence of Faces.
lighting: A sequence of ints representing lighting data.
clip_nodes: A sequence of ClipNodes used by the bsp tree data structure.
leafs: A sequence of Leafs used by the bsp tree data structure.
mark_surfaces: A sequence of ints representing lists of consecutive faces
used by the Node objects.
edges: A sequence of Edges.
surf_edges: A sequence of ints representing list of consecutive edges used
by the Face objects.
models: A sequence of Models.
Note:
The first model is the entire level.
fp: The file-like object to read data from.
mode: The file mode for the file-like object.
"""
class factory(Bsp29.factory):
Node = Node
Face = Face
ClipNode = ClipNode
Leaf = Leaf
Edge = Edge
|
py
|
1a58f1c6d315109172c4b0515072043f078d0d58
|
import os
import setuptools
dir_repo = os.path.abspath(os.path.dirname(__file__))
# read the contents of REQUIREMENTS file
with open(os.path.join(dir_repo, "requirements.txt"), "r") as f:
requirements = f.read().splitlines()
# read the contents of README file
with open(os.path.join(dir_repo, "README.md"), encoding="utf-8") as f:
readme = f.read()
setuptools.setup(
name="neuralprophet",
version="0.2.5",
description="A simple yet customizable forecaster",
author="Oskar Triebe",
author_email='[email protected]',
url="https://github.com/ourownstory/neural_prophet",
license="MIT",
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=requirements,
extras_require={
"dev": ["livelossplot>=0.5.3", "black"],
"live": ["livelossplot>=0.5.3"],
},
# setup_requires=[""],
scripts=["scripts/neuralprophet_dev_setup"],
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
py
|
1a58f27b4c433647d0fb7334f418d96f3c8934f8
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
The included functions supplement the logical operations currently provided
in numpy in order to provide a complete set of logical operations.
"""
from __future__ import absolute_import, division, print_function
from numpy import (logical_and, logical_or, logical_not, logical_xor, add,
subtract, multiply, divide)
__all__ = ["add", "subtract", "multiply", "divide", "logical_and",
"logical_or", "logical_nor", "logical_xor", "logical_not",
"logical_sub", "logical_nand"]
def logical_nand(x1, x2, out=None):
"""Computes the truth value of NOT (x1 AND x2) element wise.
This function enables the computation of the LOGICAL_NAND of two image or
volume data sets. This function enables easy isolation of all data points
NOT INCLUDED IN BOTH SOURCE DATA SETS. This function can be used for data
comparison, material isolation, noise removal, or mask
application/generation.
Parameters
----------
x1, x2 : array-like
Input arrays. `x1` and `x2` must be of the same shape.
output : array-like
Boolean result with the same shape as `x1` and `x2` of the logical
operation on corresponding elements of `x1` and `x2`.
Returns
-------
output : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
NAND operation on corresponding elements of `x1` and `x2`.
Example
-------
>>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]]
>>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]]
>>> logical_nand(x1, x2)
array([[ True, True, True, True, True],
[False, False, False, False, False],
[ True, True, True, True, True]], dtype=bool)
"""
return logical_not(logical_and(x1, x2, out), out)
def logical_nor(x1, x2, out=None):
"""Compute truth value of NOT (x1 OR x2)) element wise.
This function enables the computation of the LOGICAL_NOR of two image or
volume data sets. This function enables easy isolation of all data points
NOT INCLUDED IN EITHER OF THE SOURCE DATA SETS. This function can be used
for data comparison, material isolation, noise removal, or mask
application/generation.
Parameters
----------
x1, x2 : array-like
Input arrays. `x1` and `x2` must be of the same shape.
output : array-like
Boolean result with the same shape as `x1` and `x2` of the logical
operation on corresponding elements of `x1` and `x2`.
Returns
-------
output : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
NOR operation on corresponding elements of `x1` and `x2`.
Example
-------
>>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]]
>>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]]
>>> logical_nor(x1, x2)
array([[ True, True, False, True, True],
[False, False, False, False, False],
[False, True, False, True, False]], dtype=bool)
"""
return logical_not(logical_or(x1, x2, out), out)
def logical_sub(x1, x2, out=None):
"""Compute truth value of x1 AND (NOT (x1 AND x2)) element wise.
This function enables LOGICAL SUBTRACTION of one binary image or volume
data set from another. This function can be used to remove phase
information, interface boundaries, or noise, present in two data sets,
without having to worry about mislabeling of pixels which would result
from arithmetic subtraction. This function will evaluate as true for all
"true" voxels present ONLY in Source Dataset 1. This function can be used
for data cleanup, or boundary/interface analysis.
Parameters
----------
x1, x2 : array-like
Input arrays. `x1` and `x2` must be of the same shape.
output : array-like
Boolean result with the same shape as `x1` and `x2` of the logical
operation on corresponding elements of `x1` and `x2`.
Returns
-------
output : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
SUBTRACT operation on corresponding elements of `x1` and `x2`.
Example
-------
>>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]]
>>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]]
>>> logical_sub(x1, x2)
array([[False, False, True, False, False],
[False, False, False, False, False],
[ True, False, True, False, True]], dtype=bool)
"""
return logical_and(x1, logical_not(logical_and(x1, x2, out), out), out)
|
py
|
1a58f2e44bbfb40dc203e807c3d115aacf4b0193
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _utils, _io, _logger
from ._graph_execution_manager import GraphExecutionManager, _RunStateInfo
from ._execution_agent import InferenceAgent
from .debug_options import DebugOptions
from ._fallback import ORTModuleFallbackException, _FallbackPolicy, _FallbackManager
from onnxruntime.capi import _pybind_state as C
import onnx
import torch
import warnings
class InferenceManager(GraphExecutionManager):
"""Concrete instance of GraphExecutionManager that is able to manage the inference model
InferenceManager is resposible for building and running the forward graph of the inference model
"""
def __init__(self, model, debug_options: DebugOptions, fallback_manager: _FallbackManager):
super().__init__(model, debug_options, fallback_manager)
self._export_mode = torch.onnx.TrainingMode.EVAL
@staticmethod
def execution_session_run_forward(execution_session, onnx_model, device, *inputs):
"""Runs the forward graph on execution_session with given model inputs and device"""
# Assert that the input and model device match
_utils._check_same_device(device, "Input argument to forward", *inputs)
# TODO: Try to reuse the output buffers as some of the output tensors are same sizes,
# especially the backward graph outputs.
# REVIEW(codemzs): Consolidate Training Agent with InferenceAgent on C++ side to not
# have the need for passing IOBinding.
io_binding = execution_session.io_binding()
run_options = C.RunOptions()
# Use IO binding
_utils._create_iobinding(io_binding, inputs, onnx_model, device)
# Run and return module outputs.
ort_output = execution_session.run_forward(io_binding, run_options)
forward_outputs, run_id = ort_output.ortvalues, ort_output.run_id
user_outputs = tuple(_utils._ortvalue_to_torch_tensor(
forward_output._ortvalue) for forward_output in forward_outputs)
state = None
# Assert that the outputs and model device match
_utils._check_same_device(
device, "Output argument from forward", *user_outputs)
output_info = [(output.shape, output.device, output.dtype)
for output in user_outputs]
run_info = _RunStateInfo(state, output_info)
# Return user outputs and forward run information
return user_outputs, run_info
def forward(self, *inputs, **kwargs):
'''Forward pass of the inference model
ONNX model is exported the first time this method is executed.
Next, we build an optimized inference graph with module_graph_builder.
Finally, we instantiate the ONNX Runtime InferenceSession through the InferenceAgent.
'''
# Fallback to PyTorch due to failures *external* to forward(),
# typically from initialization
if self._fallback_manager.is_pending():
return self._fallback_manager.fallback(self._original_module, self._debug_options.logging.log_level, *inputs, **kwargs)
try:
# Exporting module to ONNX for the first time
build_graph = self._export_model(*inputs, **kwargs)
if build_graph:
# If model was exported, then initialize the graph builder
self._initialize_graph_builder(training=False)
# Build the inference graph
if build_graph:
self._build_graph()
module_device = _utils.get_device_from_module(
self._original_module)
# The inference session should be created every time
# the graph was built or if the device changed between calls to forward
create_execution_session = build_graph or self._device != module_device
if self._device != module_device:
self._device = module_device
if create_execution_session:
# Create execution session creates the inference_session
self._create_execution_agent()
user_outputs, _ = InferenceManager.execution_session_run_forward(self._execution_agent,
self._onnx_models.optimized_model,
self._device,
*_io._combine_input_buffers_initializers(
self._graph_initializers,
self._graph_info.user_input_names,
self._input_info,
self._flattened_module.named_buffers(),
inputs,
kwargs,
self._device))
return _io.unflatten_user_output(self._module_output_schema,
user_outputs)
except ORTModuleFallbackException as e:
# Exceptions subject to fallback are handled here
self._fallback_manager.handle_exception(exception=e,
log_level=self._debug_options.logging.log_level)
except Exception as e:
# Catch-all FALLBACK_FORCE_TORCH_FORWARD fallback is handled here
self._fallback_manager.handle_exception(exception=e,
log_level=self._debug_options.logging.log_level,
override_policy=_FallbackPolicy.FALLBACK_FORCE_TORCH_FORWARD)
# Fallback to PyTorch due to failures *during* forward(),
# (e.g. export, model/input post-processing, forward, output processing, etc)
if self._fallback_manager.is_pending():
return self._fallback_manager.fallback(self._original_module, self._debug_options.logging.log_level, *inputs, **kwargs)
def _build_graph(self):
"""Build an optimized inference graph using the module_graph_builder"""
super()._build_graph()
if self._debug_options.save_onnx_models.save:
self._onnx_models.save_optimized_model(self._debug_options.save_onnx_models.path,
self._debug_options.save_onnx_models.name_prefix,
self._export_mode)
def _create_execution_agent(self):
"""Creates an InferenceAgent that can run forward graph on an inference model"""
session_options, providers, provider_options = self._get_session_config()
self._execution_agent = InferenceAgent(self._onnx_models.optimized_model.SerializeToString(),
session_options, providers, provider_options)
|
py
|
1a58f3311683c3427730706c018a7c9b77f92f1f
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Sum(function.Function):
"""Summation over all elements."""
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype == numpy.float32
)
def forward_cpu(self, x):
return numpy.array(x[0].sum()),
def forward_gpu(self, x):
return x[0].sum(),
def backward_cpu(self, x, gy):
return numpy.full_like(x[0], gy[0]),
def backward_gpu(self, x, gy):
# TODO(beam2d): Make it async
return cuda.full_like(x[0], gy[0].get()),
def sum(x):
"""Computes sum of all elements."""
return Sum()(x)
|
py
|
1a58f48803a9b5a09a55703048d259f03eadc254
|
# Copyright (C) 2011-2020 Airbus, [email protected]
import sys, os
import logging
log = logging.getLogger("plasmasm")
try:
# Check amoco dependency on OrderedDict
from collections import OrderedDict
del OrderedDict
except ImportError:
log.error('amoco backend needs python 2.7, with OrderedDict')
raise ImportError('amoco backend needs python 2.7, with OrderedDict')
try:
# Check amoco dependency on pyparsing
import pyparsing
del pyparsing
except ImportError:
log.error('amoco backend needs that pyparsing is installed')
raise ImportError('amoco backend needs that pyparsing is installed')
# If amoco is not installed system-wide, it is recommended to install it
# in the parent directory of plasmasm.
basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if basedir == '': basedir = '.'
sys.path.append(basedir+'/amoco')
sys.path.append(basedir+'/crysp')
sys.path.append(basedir+'/grandalf')
import amoco
from amoco.logger import Log
Log.progress = lambda count, total=0, pfx='': None
try:
from amoco.arch.core import type_data_processing, type_control_flow, type_other, type_cpu_state, type_undefined
except ImportError:
log.error("PATH %s", sys.path)
e = 'amoco backend not well installed: %s' % sys.exc_info()[1]
log.error(e)
raise ImportError(e)
from amoco.cas.mapper import mapper
from amoco.arch.x86 import env
from amoco.arch.x86 import cpu_x86 as cpu_amoco
try:
# Newer amoco
from amoco.arch.x86.cpu_x86 import instruction_x86 as instruction
except ImportError:
# Older amoco
from amoco.arch.core import instruction
env.internals['keep_order'] = True
cpu_addrsize = 32
from amoco.arch.x86.formats import default_mnemo_name, default_eqn_parser, \
mnemo_string_rep, \
IA32_Binutils_ATT, IA32_Binutils_Intel, IA32_MacOSX_ATT
#NON_REGRESSION_FOUND = True # Define this variable to avoid raising errors
# Encapsulation of internals
class API_AMOCO(object):
# API to access opname or prefix
def opname(self):
return default_mnemo_name(self.amoco)[-1][1]
opname = property(opname)
def prefix(self):
if self.amoco.misc.get('pfx') is None:
return []
pfx = []
if self.amoco.misc['pfx'][0] is not None:
pfx.append({
'lock': 0xf0,
'repne': 0xf2,
'rep': 0xf3,
}[self.amoco.misc['pfx'][0]])
if self.amoco.misc['pfx'][1] is not None:
assert 'segreg' == self.amoco.misc['pfx'][1]
pfx.append({
env.es: 0x26,
env.cs: 0x2e,
env.ss: 0x36,
env.ds: 0x3e,
env.fs: 0x64,
env.gs: 0x65,
}[self.amoco.misc['segreg']])
if self.amoco.misc['pfx'][2] is not None:
pfx.append({
'opdsz': 0x66,
}[self.amoco.misc['pfx'][2]])
if self.amoco.misc['pfx'][3] is not None:
pfx.append({
'adrsz': 0x67,
}[self.amoco.misc['pfx'][3]])
return pfx
prefix = property(prefix)
#
# API to access the arguments
def api_nb_arg(self):
''' How many arguments for this instruction '''
return len(self.amoco.operands)
def api_arg_txt(self, pos, asm_format=None):
''' Text representation of argument 'pos' '''
if asm_format == 'att_syntax':
from amoco.arch.x86.formats import att_opers
return list(reversed(att_opers(self.amoco)))[pos*2][1]
else:
from amoco.arch.x86.formats import intel_opers
res = intel_opers(self.amoco)[pos*2][1]
if res.startswith('DWORD PTR '): res = res[10:]
return res
def api_get_cst(self, pos):
''' If the argument 'pos' is numeric,
then get its value as an 'int' '''
arg = self.amoco.operands[pos]
if arg._is_cst:
return int(int(arg))
return None
def api_get_imm(self, pos):
''' If the argument 'pos' contains an immediate value / displacement
then get its value as an 'int' '''
arg = self.amoco.operands[pos]
if arg._is_cst:
return int(int(arg))
elif arg._is_mem and arg.a.base._is_cst:
return int(int(arg.a.base))
elif arg._is_mem and not hasattr(arg.a.disp, '_is_cst'):
return int(int(arg.a.disp))
elif arg._is_eqn and arg.op.symbol == '+' and arg.r._is_cst:
return arg.r.value
elif arg._is_eqn and arg.op.symbol == '-' and arg.r._is_cst:
return (-arg.r).value
return None
def api_get_symbol(self, pos):
''' Gets the argument 'pos' in the form of a symbol if it is a label '''
arg = self.amoco.operands[pos]
if arg._is_lab:
return arg.ref
return None
def api_get_label(self, pos):
''' Gets a label if present in the argument 'pos'.
Gets two labels if it is a label difference. '''
arg = self.amoco.operands[pos]
if arg._is_mem and not hasattr(arg.a.disp, '_is_lab'):
label, label_dif, cste = default_eqn_parser(arg.a.base)
return label, label_dif
elif arg._is_mem:
label, label_dif, cste = default_eqn_parser(arg.a.disp)
return label, label_dif
else:
label, label_dif, cste = default_eqn_parser(arg)
return label, label_dif
def api_is_address(self, pos):
''' True if the argument 'pos' is an address '''
arg = self.amoco.operands[pos]
return arg is not None and arg._is_mem
def api_is_arg_size(self, pos, size):
''' True if the argument 'pos' is a size-bit argument '''
arg = self.amoco.operands[pos]
if arg.size != size: return False
return True
def api_is_reg_size(self, pos, size=None):
''' True if the argument 'pos' is a size-bit register '''
arg = self.amoco.operands[pos]
if expr.get_reg(arg) is None: return False
if size is not None and arg.size != size: return False
return True
def api_is_reg_in_arg(self, pos, reg):
''' True if the argument 'pos' contains a reference to a given register '''
arg = self.amoco.operands[pos]
log.debug("(DEBUG:api_is_reg_in_arg) %s %s", arg, reg)
return str(reg) in str(arg)
def api_same_base_reg(self, pos, instr):
''' Checks that arguments at position 'pos' in 'self' and 'instr'
have the same base register (they may have different disp) '''
arg = expr.get_reg(self.amoco.operands[pos].a.base)
return arg is not None and arg == expr.get_reg(instr.amoco.operands[pos].a.base)
def api_set_imm_label(self, pos, value, label=None, label_dif=None):
''' If the argument 'pos' contains an immediate value / displacement
then substract 'value' and add the symbol 'label'.
If the argument is an absolute address, then 'label' should be at
address 'value'; if it is a relative address, then 'label' should
be at 'value' bytes of the current instruction.
'label_dif' is used for Mach-O binaries to represent LOC_DIF
relocations.
If 'label' is None, we only change the immediate.
If 'label' is False, we remove the label. '''
arg = self.amoco.operands[pos]
sym = 0
if label is False:
# Delete label
assert arg._is_mem
if arg.a.base._is_lab:
_, _, cste = default_eqn_parser(arg.a.base)
arg.a.base = expressions.cst(cste, size=cpu_addrsize)
elif not hasattr(arg.a.disp, '_is_lab'):
_, _, cste = default_eqn_parser(arg.a.base)
arg.a.base = expressions.cst(cste, size=cpu_addrsize)
elif arg.a.disp._is_lab:
arg.a.disp = 0
elif arg.a.disp._is_eqn:
_, _, cste = default_eqn_parser(arg.a.disp)
arg.a.disp = cste
else:
NEVER
elif label is not None:
sym = expressions.lab(label, size=cpu_addrsize)
if label_dif is not None:
sym -= expressions.lab(label_dif, size=cpu_addrsize)
if arg._is_cst:
self.amoco.operands[pos] -= value
self.amoco.operands[pos] += sym
elif arg._is_mem and arg.a.base._is_cst:
arg.a.base -= value
arg.a.base += sym
elif arg._is_mem and (arg.a.base._is_reg or arg.a.base._is_eqn):
arg.a.disp -= value
arg.a.disp += sym
if hasattr(arg.a.disp, '_is_cst') and arg.a.disp._is_cst:
arg.a.disp = arg.a.disp.value
else:
NEVER
def reg_from_name(reg):
if reg == 'eflag': reg = 'eflags'
return env.__dict__[reg]
reg_from_name = staticmethod(reg_from_name)
def api_add_reg(self, pos, reg, last=False):
arg = self.amoco.operands[pos]
reg = self.reg_from_name(reg)
assert arg._is_mem
if arg.a.base._is_cst:
arg.a.disp += arg.a.base.value
arg.a.base = reg
elif arg.a.base._is_lab:
arg.a.disp += arg.a.base
arg.a.base = reg
elif arg.a.base._is_eqn \
and not (arg.a.base.l._is_reg and not arg.a.base.l._is_lab) \
and not (arg.a.base.r._is_reg and not arg.a.base.r._is_lab):
# No register in arg.a.base => becomes a displacement
arg.a.disp += arg.a.base
arg.a.base = reg
elif arg.a.base._is_reg or arg.a.base._is_eqn:
# Replace 'reg+reg' with '2*reg'
if arg.a.base._is_eqn and arg.a.base.op.symbol == '+' \
and arg.a.base.l is arg.a.base.r:
arg.a.base = expressions.op('*',
arg.a.base.l,
expressions.cst(2,size=arg.a.base.l.size))
# Force the order of operands
if last: # reg is last
arg.a.base = expressions.op('+', arg.a.base, reg)
else: # reg is first
arg.a.base = expressions.op('+', reg, arg.a.base)
if env.internals.get('keep_order'): arg.a.base.prop |= 16
else:
NEVER
def api_replace_reg(self, src, dst):
''' In all arguments, replace register 'src' with 'dst'. '''
src = self.reg_from_name(src)
dst = self.reg_from_name(dst)
for pos, arg in enumerate(self.amoco.operands):
if arg._is_cst: pass
elif arg._is_eqn: pass
elif arg._is_reg:
if arg is src: self.amoco.operands[pos] = dst
elif arg._is_mem and arg.a.base._is_reg:
if arg.a.base is src: arg.a.base = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '*' and \
arg.a.base.l._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '+' and \
arg.a.base.l._is_reg and \
arg.a.base.r._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
if arg.a.base.r is src: arg.a.base.r = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '+' and \
arg.a.base.l._is_reg and \
arg.a.base.r._is_eqn and \
arg.a.base.r.op.symbol == '*' and \
arg.a.base.r.l._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
if arg.a.base.r.l is src: arg.a.base.r.l = dst
else:
log.error("ARG=%s", arg)
NEVER
class StubNone(object):
''' When amoco fails to disassemble the data '''
def __str__(self, asm_format=None): return "NoneASM"
def __init__(self, offset, bytes):
self.length = len(bytes)
self.bytes = bytes
mnemonic = 'NoneASM'
type = None
operands = []
misc = {}
def __call__(self, m):
# Calling a mapper
pass
def clang_bug_test(self):
if self.amoco.mnemonic == 'TEST' \
and self.symbols.meta.get('compiler') == 'clang' \
and self.symbols.meta.get('os_minversion', (0,0,0))[1] < 14 \
and self.api_is_address(0) \
and self.api_is_reg_size(1) \
:
# Clang-LLVM on MacOSX sometimes use Intel argument order
# it is the case for
# Apple LLVM version 6.0 (clang-600.0.54)
# Apple LLVM version 7.0.2 (clang-700.1.81)
# Apple LLVM version 9.0.0 (clang-900.0.39.2)
# not for
# Apple clang version 11.0.0 (clang-1100.0.33.17)
instr = self.amoco.__class__(b"")
instr.mnemonic = self.amoco.mnemonic
instr.operands = list(reversed(self.amoco.operands))
instr.spec = self.amoco.spec
return instr
else:
return self.amoco
def att_bug_fsub_fdiv(instr):
if not instr.mnemonic[:4] in [ 'FSUB', 'FDIV' ]:
return
for _ in instr.operands:
if _._is_mem:
return
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=372528
# The binutils mix fsubp/fdivp with fsubrp/fdivrp
if instr.mnemonic[4:] == 'P':
instr.mnemonic = instr.mnemonic[:4] + 'RP'
elif instr.mnemonic[4:] == 'RP':
instr.mnemonic = instr.mnemonic[:4] + 'P'
elif len(instr.operands) == 2 and str(instr.operands[0]) != 'st0':
if instr.mnemonic[4:] == '':
instr.mnemonic = instr.mnemonic + 'R'
elif instr.mnemonic[4:] == 'R':
instr.mnemonic = instr.mnemonic[:4]
spec_table = {}
for spec in amoco.arch.x86.spec_ia32.ISPECS \
+ amoco.arch.x86.spec_fpu.ISPECS \
+ amoco.arch.x86.spec_sse.ISPECS:
mnemo = spec.iattr.get('mnemonic', None)
if not mnemo in spec_table:
spec_table[mnemo] = [spec]
elif not spec in spec_table[mnemo]:
spec_table[mnemo].append(spec)
del spec
def set_spec(i, spec_table):
log.debug("%s %s", i.mnemonic, [_.size for _ in reversed(i.operands)])
spec_collision = {
'CBW': 'CWDE',
'CWD': 'CDQ',
'IRET': 'IRETD',
'CDQE': 'CWDE',
'CQO': 'CDQ',
'LFENCE': 'XRSTOR',
'MFENCE': 'XSAVEOPT',
'SFENCE': 'CLFLUSH',
'PEXTRQ': 'PEXTRD',
'PINSRQ': 'PINSRD',
'CMPXCHG16B': 'CMPXCHG8B',
}
if i.mnemonic in spec_collision:
spec_list = spec_table[spec_collision[i.mnemonic]]
elif i.mnemonic[:-1].lower() in mnemo_string_rep:
spec_list = spec_table[i.mnemonic[:-1]+'D']
else:
spec_list = spec_table[i.mnemonic]
if len(spec_list) > 1:
log.debug("Many possible spec for %s", i.mnemonic)
for spec in spec_list:
log.debug("... %s", spec.hook)
log.debug(" misc: %s", i.misc)
ispec_idx = 0
if i.mnemonic in ('CALL','JMP'):
if i.operands[0]._is_mem:
ispec_idx = 0
elif i.operands[0]._is_reg and not i.operands[0]._is_lab:
ispec_idx = 0
else:
ispec_idx = 1
if i.mnemonic.lower()[:-1] in mnemo_string_rep:
if not len(i.operands):
ispec_idx = -1
i.spec = spec_list[ispec_idx]
if 'type' in i.spec.iattr:
i.type = i.spec.iattr['type']
else:
i.type = type_data_processing
import re
def replace_names_with_symbols(symbols, args):
for e in args:
for _ in expressions.symbols_of(e):
if _._is_lab:
symbol = _.ref
r = re.match(r'(\d+)([bf])', symbol)
if r:
symbol, direction = r.groups()
idx = symbols.meta['local_labels'][symbol]
if direction == 'f': idx += 1
symbol = '.L%s\02%d'%(symbol,idx)
_.ref = symbols.find_symbol(name = symbol)
from plasmasm.symbols import Line
from plasmasm.compilers import \
switch_detection_x86_update, \
switch_detection_gcc463m32opt, \
switch_detection_gcc346m32opt, \
gcc_label_for_inlined_memcpy
from amoco.arch.x86.parsers import att_syntax
class Instruction(Line, API_AMOCO):
__slots__ = ('section', 'offset', 'bytelen',
'amoco')
CPU = 'I386'
def from_txt(self, txt):
''' text input, in assembly format '''
log.debug("> %s", txt)
if txt.startswith('rep; ret'): txt = 'rep ret'
instr = att_syntax.instr.parseString(txt, True)[0]
att_bug_fsub_fdiv(instr)
set_spec(instr, spec_table)
replace_names_with_symbols(self.symbols, instr.operands)
self.amoco = instr
return self
def from_bin(self, in_str, section):
''' binary input, in assembly format '''
self.section = section
self.offset = in_str.offset
from plasmasm.parse_bin import endofsection_address
end_of_section = endofsection_address(self.symbols, section)
end_of_instr = in_str.offset+cpu_amoco.disassemble.maxlen
if end_of_instr > end_of_section:
end_of_instr = end_of_section
instr = cpu_amoco.disassemble(in_str[self.offset:end_of_instr])
if instr is None:
instr = StubNone(self.offset, in_str[self.offset:self.offset+1])
self.bytelen = instr.length
in_str.offset = self.offset + self.bytelen
self.amoco = instr
return self
def pack(self):
''' binary representation '''
return self.amoco.bytes # Only if unchanged
def txt(self, asm_format=None):
''' text output, to be used by an assembler '''
if asm_format is not None:
asm_format_orig = self.asm_format
self.set_asm_format(asm_format)
if self.asm_format == 'raw' and str(self.amoco) == 'nop ':
txt = 'nop [%r]' % self.amoco.bytes
elif self.asm_format == 'raw':
txt = '%s [%s]' % (self.amoco, self.amoco.spec.hook.__name__)
else:
txt = str(clang_bug_test(self))
if asm_format is not None:
self.set_asm_format(asm_format_orig)
return txt
def labels(self):
''' labels that are referenced in the line '''
res = set()
for arg in self.amoco.operands:
if arg._is_lab:
res.add(arg)
if arg._is_eqn and arg.l._is_lab:
res.add(arg.l)
if arg._is_eqn and arg.r._is_lab:
res.add(arg.r)
if arg._is_mem and hasattr(arg.a.disp, '_is_lab') and arg.a.disp._is_lab:
res.add(arg.a.disp)
if arg._is_mem and arg.a.base._is_lab:
res.add(arg.a.base)
if arg._is_mem and arg.a.base._is_eqn and arg.a.base.l._is_lab:
res.add(arg.a.base.l)
if arg._is_mem and arg.a.base._is_eqn and arg.a.base.r._is_lab:
res.add(arg.a.base.r)
return set([_.ref for _ in res if hasattr(_.ref, 'name')])
def set_asm_format(self, asm_format):
if asm_format is None or asm_format.startswith('att_syntax'):
if asm_format == 'att_syntax clang':
instruction.set_formatter(IA32_MacOSX_ATT)
else:
instruction.set_formatter(IA32_Binutils_ATT)
# AT&T syntax is buggy, and depends on whether it is used by
# binutils or clang, cf. att_bug_fsub_fdiv
elif asm_format.startswith('intel_syntax'):
instruction.set_formatter(IA32_Binutils_Intel)
# Intel syntax is ambiguous, e.g. call eax
# when there is a global variable eax
self.asm_format = asm_format
set_asm_format = classmethod(set_asm_format)
asm_format = None
def _create_reloc(self, a):
''' needed to be able to pack an instruction '''
TODO
def _extract_symbols(self, a):
# Parsing the argument 'a', find if there is a relocation
# to be made, extract the symbols
# Output: relocation type (None/False/True), label(s)
TODO
def list_relocs(self):
''' needed to create a relocatable ELF '''
TODO
# Methods for binary parser
def create_label_imm(self):
''' Replace immediate values that may be labels '''
from plasmasm.parse_bin import label_for_address
if switch_detection_x86_update(self):
return
address = switch_detection_gcc463m32opt(self)
if address is not None:
section = self.symbols.get_sectionname(address)
label = self.symbols.find_symbol(section = section, address = address)
log.debug("... TABLE(imm) %r", label)
self.api_set_imm_label(1, address, label)
return
for idx in range(self.api_nb_arg()):
value = self.api_get_imm(idx)
label = label_for_address(self.symbols, value)
if label is not None:
assert label.address == value
self.api_set_imm_label(idx, value, label)
gcc_label_for_inlined_memcpy(self)
def create_label_rel(self):
''' Replace relative addresses for call/jmp/jcc '''
if self.opname == 'call' or self.opname.startswith('j'):
idx = 0
value = self.api_get_cst(idx)
else:
return
if value is None:
return
props = { 'address': (self.offset+self.bytelen+value)%(1<<cpu_addrsize),
'section': self.section }
label_imm = self.symbols.find_symbol(**props)
if label_imm is None:
NON_REGRESSION_FOUND
return
if label_imm.is_symbol() and self.bytelen < 5:
# If the argument is not 4 bytes long, create a new label
# and keep the same stack; if we don't do this, then instead
# of generating a relative jump, the assembler will generate
# a jump with relocation; it is the same semantics, but breaks
# non-regression tests asking that the generated .o is the same
# as the original one
# Non-regression: jcmarker.o from libjpeg-6b / gcc 4.6.3
old_stack = label_imm.stack
props['name'] = self.symbols.new_name(**props)
label_imm = self.symbols.find_symbol(**props)
label_imm.stack = old_stack
self.api_set_imm_label(idx, value, label_imm)
def apply_reloc(self, pos, reloc):
''' 'reloc' is a relocation at offset 'pos'
This function modifies the argument impacted by the relocation '''
# Step 1: find which arg is impacted
pos -= self.offset
b, = struct.unpack("B", self.amoco.bytes[pos:pos+1])
b = struct.pack("B", (1+b)%256)
o = cpu_amoco.disassemble(self.amoco.bytes)
patched = self.amoco.bytes[:pos] + b + self.amoco.bytes[pos+1:]
p = cpu_amoco.disassemble(patched)
if o is None or p is None or o.mnemonic != p.mnemonic:
log.error("Relocation changes instruction! %s => %s", o, p)
log.error(" at offset %r with reloc %r", pos, reloc)
log.error(" for '%s' at %s, address=%s",
self, self.section, self.offset)
return
# To find if an argument has changed, we compute the difference
# and test if it is non-zero
argpos = None
for idx, (oa, na) in enumerate(zip(o.operands, p.operands)):
try:
d = na - oa
except ValueError:
log.error("Invalid relocation effect")
log.error(" incompatible sizes %s %s", na, oa)
log.error(" reloc %r for '%s'", reloc, self)
return
if d._is_cst and int(d) == 0:
# Not changed
continue
if argpos is not None:
log.error("Relocation touches many arguments")
log.error(" reloc %r for '%s'", reloc, self)
return
argpos = idx
if argpos is None:
log.error("Relocation touches no argument")
log.error(" reloc %r for '%s'", reloc, self)
log.error("ARGPOS %s", argpos)
return
# Step 2: modify the argument by using the reloc data
address = switch_detection_gcc463m32opt(self)
if address is None:
address = switch_detection_gcc346m32opt(self)
if self.amoco.operands[argpos]._is_cst:
offset = int(self.amoco.operands[argpos])
if offset >= (1<<(cpu_addrsize-1)):
offset -= 1<<cpu_addrsize # Signed
self.amoco.operands[argpos] -= offset
elif self.amoco.operands[argpos]._is_mem:
base = self.amoco.operands[argpos].a.base
if base._is_cst:
offset = int(base)
self.amoco.operands[argpos].a.base -= offset
else:
if base._is_eqn and base.op.symbol == '+':
pass
# We may want to extract the constant from an operation
# (reg+imm), but normally it is stored as (base+disp)
offset = self.amoco.operands[argpos].a.disp
self.amoco.operands[argpos].a.disp -= offset
else:
log.error("Arg of type %s", self.amoco.operands[argpos].__class__)
return
if address is None:
from plasmasm.get_symbols import analyze_reloc
label, label_dif, offset, size = analyze_reloc(self,
reloc, offset, pos, self.bytelen)
else:
# Special case: offset to a switch table
r_type, data = reloc
# Some coherency checks
from elfesteem import elf, pe
if r_type == ('ELF', elf.EM_386, elf.R_386_32):
assert data['section'] == '.rodata'
elif r_type == ('COFF', pe.IMAGE_FILE_MACHINE_I386,
pe.IMAGE_REL_I386_DIR32):
assert data['section'] == '.rdata'
else:
log.error("Unknown reloc type: %s", reloc)
log.error("for: %s", self)
return
label = self.symbols.find_symbol(
section=data['section'], address=address)
label_dif = None
offset -= address
size = cpu_addrsize
log.debug("... TABLE(rel) %r", label)
self.dst = [[label]]
ext_label = expressions.lab(label, size=size)
if label_dif is not None:
ext_label -= expressions.lab(label_dif, size=size)
if offset != 0:
ext_label = ext_label + offset
if self.amoco.operands[argpos]._is_cst:
self.amoco.operands[argpos] += ext_label
elif self.amoco.operands[argpos]._is_mem and self.amoco.operands[argpos].a.base._is_cst:
self.amoco.operands[argpos].a.base += ext_label
elif self.amoco.operands[argpos]._is_mem:
self.amoco.operands[argpos].a.disp += ext_label
else:
NEVER
#if self.amoco.operands[argpos]._is_lab and \
# self.opname in [ 'call', 'jmp' ]:
# self.amoco.misc['dst'] = label
class InstructionCFG(Instruction):
__slots__ = ('flow', 'dst')
def _set_flow(self):
if self.opname == 'call': self.flow = 'sub'
elif self.opname == 'ret': self.flow = 'ret'
elif self.opname == 'retn': self.flow = 'ret'
elif self.opname == 'ud2': self.flow = 'ret'
elif self.opname == 'jmp': self.flow = 'jmp'
elif self.opname.startswith('j'): self.flow = 'jcc'
elif self.opname == 'loop': TODO
elif self.opname == 'iret': TODO
elif self.opname == 'int': TODO
else: self.flow = None
def _set_dst(self):
if hasattr(self, 'dst'):
# Already set by switch detection
return
if self.flow is None:
self.dst = []
elif self.flow == 'ret':
self.dst = [None]
elif self.flow in [ 'sub', 'jmp', 'jcc' ]:
self.dst = [ self.api_get_symbol(0) ]
else:
raise ValueError("Flow %s unknown"%self.flow)
if self.flow == 'sub' and len(self.dst) == 1 \
and hasattr(self, 'offset') \
and getattr(self.dst[0], 'address', None) == self.offset+self.bytelen:
# Detection of clang or gcc 3.x computation of GOT offset
# "call Ln" and "Ln: pop reg" and "add GOT"
self.flow = 'PIC'
def evaluate_lines(self, lines, in_str):
return evaluate_lines(self, lines, in_str)
def get_touched(e, indirect=False):
# If indirect==True, registers read to determine addresses in e
# If indirect==False, other registers read/written when e is read/written
t = set()
if e._is_def == 0: # top
# some flags may have undetermined values, e.g. for sar edx, 31
# some semantics are not implemented, e.g. shld edi, ebx, cl
pass
elif e._is_slc:
t.update(get_touched(e.x, indirect))
elif e._is_cmp:
for s in e.parts:
t.update(get_touched(e.parts[s], indirect))
elif e._is_cst:
pass
elif e._is_lab:
pass
elif e._is_reg:
if not indirect:
t.update([e])
elif e._is_mem:
t.update(get_touched(e.a, indirect))
elif e._is_ptr:
if not indirect:
t.update(['MEM'])
else:
t.update(get_touched(e.base, False))
elif e._is_tst:
t.update(get_touched(e.tst, False))
t.update(get_touched(e.l, indirect))
t.update(get_touched(e.r, indirect))
elif e._is_eqn:
if e.l is not None:
t.update(get_touched(e.l, indirect))
if e.r is not None:
t.update(get_touched(e.r, indirect))
else:
raise ValueError("in get_touched %s %s"%(type(e),e))
return t
def get_rw(m):
r = set()
w = set()
for dst, src in m:
w.update(get_touched(dst, False))
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
return r, w
def is_mmx(line, env):
if line.opname.startswith('cvt'):
return True
for reg in env.mmregs + env.xmmregs: # Loop, and use 'is', because
for arg in line.amoco.operands: # membership test with 'in' uses
if arg is reg: return True # '==' which is redefined and buggy
return False
def add_semantics_missing(line, r, w, env, get_touched):
# Some bugs of amoco emulation; we modify r and w
reg_flags = list(get_touched(env.cf))[0] # eflags/rflags in 32/64-bit mode
# flags are not read, for these instructions
if line.opname in ('cmp', 'test', 'inc', 'dec', 'add', 'sub', 'mul', 'imul',
'neg', 'and', 'or', 'bsf', 'bsr',
'aaa', 'aad', 'aam', 'aas', 'daa', 'das',
'bt'):
r.remove(reg_flags)
if line.opname in ('rol', 'ror'):
r.discard(reg_flags)
# No semantics for div in amoco
if line.opname in ('div', 'idiv'):
r.update(get_touched(env.eax))
w.update(get_touched(env.eax))
arg = line.amoco.operands[0]
if not (arg._is_slc and arg.size == 8):
# not 8-bit
r.update(get_touched(env.edx))
w.update(get_touched(env.edx))
r.update(get_touched(arg, False))
r.update(get_touched(arg, True))
# Incomplete semantics
if line.opname == 'bt':
for arg in line.amoco.operands:
r.update(get_touched(arg, False))
r.update(get_touched(arg, True))
if line.opname in ('shld', 'shrd') and line.amoco.operands[2]._is_slc:
dst = line.amoco.operands[0]
src = line.amoco.operands[1]
w.update(get_touched(dst, False))
r.update(get_touched(dst, False))
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
r.update(get_touched(env.ecx))
if line.opname in ('ldmxcsr', 'stmxcsr', 'xsave', 'xrstor', 'xsaveopt',
'clflush', 'lfence', 'mfence', 'sfence'):
# reads or writes registers that are not in amoco's model,
# e.g. Processor Extended States
pass
# No semantics for fpu operations in amoco
if line.opname.startswith('f'):
fpu_s = env.fpu_status
fpu_c = env.fpu_control
# NB: we don't include in the following table the modification of C1
# when there is a FPU stack overflow, because it depends on the value
# of other status flags
fpu_table = { # stack, read, written
(2,'fcomi'): (0, (env.st(0),1), (reg_flags,)),
(1,'fcomi'): (0, (env.st(0),0), (reg_flags,)),
(2,'fcomip'): (1, (env.st(0),1), (reg_flags,)),
(1,'fcomip'): (1, (env.st(0),0), (reg_flags,)),
(2,'fucomi'): (0, (env.st(0),1), (reg_flags,)),
(1,'fucomi'): (0, (env.st(0),0), (reg_flags,)),
(2,'fucomip'): (1, (env.st(0),1), (reg_flags,)),
(1,'fucomip'): (1, (env.st(0),0), (reg_flags,)),
(1,'fcom'): (0, (env.st(0),0), (fpu_s,)),
(1,'fcomp'): (0, (env.st(0),0), (fpu_s,)),
(0,'fcompp'): (2, (env.st(0),env.st(1)), (fpu_s,)),
(1,'fucom'): (0, (env.st(0),0), (fpu_s,)),
(1,'fucomp'): (1, (env.st(0),0), (fpu_s,)),
(0,'fucompp'): (2, (env.st(0),env.st(1)), (fpu_s,)),
(0,'fldz'): (0, (fpu_c,), (env.st(0),)),
(0,'fld1'): (0, (fpu_c,), (env.st(0),)),
(0,'fldl2t'): (0, (fpu_c,), (env.st(0),)),
(0,'fldl2e'): (0, (fpu_c,), (env.st(0),)),
(0,'fldpi'): (0, (fpu_c,), (env.st(0),)),
(0,'fldlg2'): (0, (fpu_c,), (env.st(0),)),
(0,'fldln2'): (0, (fpu_c,), (env.st(0),)),
(0,'fxam'): (0, (env.st(0),), (fpu_s,)),
(0,'fabs'): (0, (env.st(0),), (env.st(0),)),
(0,'frndint'): (0, (env.st(0),fpu_c), (env.st(0),)),
(0,'fsqrt'): (0, (env.st(0),), (env.st(0),)),
(0,'fchs'): (0, (env.st(0),), (env.st(0),)),
(0,'fptan'): (-1, (env.st(0),), (env.st(0),)),
(0,'fpatan'): (1, (env.st(0),), (env.st(0),)),
(0,'fprem'): (0, (env.st(0),env.st(1)), (env.st(0),fpu_s)),
(0,'fprem1'): (0, (env.st(0),env.st(1)), (env.st(0),fpu_s)),
(1,'fld'): (-1, (0,), (env.st(0),)),
(1,'fild'): (-1, (0,), (env.st(0),)),
(1,'fst'): (0, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fstp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fist'): (0, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fistp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fisttp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fxch'): (0, (env.st(0),0), (env.st(0),0)),
(1,'fiadd'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fisub'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fisubr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fimul'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fidiv'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fidivr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fadd'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fsub'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fsubr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fmul'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fdiv'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fdivr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(2,'fadd'): (0, (0,1), (fpu_s,0,)),
(2,'fsub'): (0, (0,1), (fpu_s,0,)),
(2,'fsubr'): (0, (0,1), (fpu_s,0,)),
(2,'fmul'): (0, (0,1), (fpu_s,0,)),
(2,'fdiv'): (0, (0,1), (fpu_s,0,)),
(2,'fdivr'): (0, (0,1), (fpu_s,0,)),
(2,'faddp'): (1, (0,1), (fpu_s,0,)),
(2,'fsubp'): (1, (0,1), (fpu_s,0,)),
(2,'fsubrp'): (1, (0,1), (fpu_s,0,)),
(2,'fmulp'): (1, (0,1), (fpu_s,0,)),
(2,'fdivp'): (1, (0,1), (fpu_s,0,)),
(2,'fdivrp'): (1, (0,1), (fpu_s,0,)),
(1,'fbstp'): (0, (env.st(0),fpu_c), (0,)),
(1,'fldcw'): (0, (0,), (fpu_c,)),
(1,'fnstcw'): (0, (fpu_c,), (0,)),
(1,'fnstsw'): (0, (fpu_s,), (0,)),
(0,'fsave'): (0, (fpu_c,fpu_s), ()),
(0,'fnsave'): (0, (fpu_c,fpu_s), ()),
(1,'fstenv'): (0, (fpu_c,fpu_s), (0,)),
(1,'fnstenv'): (0, (fpu_c,fpu_s), (0,)),
(0,'finit'): (0, (), (fpu_c,fpu_s)),
(0,'frstor'): (0, (), (fpu_c,fpu_s)),
(0,'fnclex'): (0, (), (fpu_s,)),
(1,'fxsave'): (0, (fpu_s,), ()),
(1,'fxrstor'): (0, (), (fpu_s,)),
}
try:
key = (len(line.amoco.operands), line.opname)
stack_pop, reg_r, reg_w = fpu_table[key]
except KeyError:
if line.opname.startswith('fcmov'):
stack_pop, reg_r, reg_w = 0, (reg_flags,1), (fpu_s,env.st(0),)
else:
stack_pop, reg_r, reg_w = 0, (), ()
log.error("fpu_table: %r missing",key)
if stack_pop == -1: # push on FPU stack
r.update([env.st(_) for _ in range(7)])
w.update([env.st(1+_) for _ in range(7)])
elif stack_pop == 1: # pop on FPU stack
# bug for faddp %st(7) and similar: because of stack_pop
# the register %st(6) is written instead of %st(7)
r.update([env.st(1+_) for _ in range(7)])
w.update([env.st(_) for _ in range(7)])
elif stack_pop == 2: # pop twice on FPU stack
r.update([env.st(2+_) for _ in range(6)])
w.update([env.st(_) for _ in range(6)])
for reg in reg_r:
if isinstance(reg, int):
r.update(get_touched(line.amoco.operands[reg],False))
r.update(get_touched(line.amoco.operands[reg],True))
else: r.add(reg)
for reg in reg_w:
if isinstance(reg, int):
w.update(get_touched(line.amoco.operands[reg],False))
r.update(get_touched(line.amoco.operands[reg],True))
else: w.add(reg)
# No semantics for MMX/SSE operations in amoco
if is_mmx(line, env):
dst = line.amoco.operands[0]
src = line.amoco.operands[1]
w.update(get_touched(dst, False))
r.update(get_touched(dst, False)) # Not for all MMX operations
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
if line.opname.startswith('ucomi'): w.add(reg_flags)
elif 0xF2 in line.prefix or (0xF3 in line.prefix and line.opname != 'ret'):
# True rep/repz/repnz
r.update(get_touched(env.ecx))
w.update(get_touched(env.ecx))
class InstructionRW(InstructionCFG):
__slots__ = ('rw',)
def _set_rw(self):
m = mapper()
self.amoco(m)
r, w = get_rw(m)
add_semantics_missing(self, r, w, env, get_touched)
self.rw = r, w
def reg_name(r):
return str(r)
reg_name = staticmethod(reg_name)
class InstructionDEAD(InstructionRW):
__slots__ = ('pic', 'stack', 'dead', 'immutable')
from amoco.cas import expressions
def evaluate_lines(instr, lines, in_str):
# Run the emulation of the basic bloc
machine = mapper()
def print_machine(machine):
return sorted(str(machine).split("\n"))
for line in lines:
# eip is the next instruction: basic bloc may have been merged,
# but conditional jumps are not taken
machine[env.eip] = env.cst(line.offset,cpu_addrsize)
try:
line.amoco(machine)
except NotImplementedError:
return (('Not implemented', line, print_machine(machine)), [None])
except NameError:
return (('Cannot be emulated (name)', line, print_machine(machine)), [None])
except amoco.arch.core.InstructionError:
return (('Cannot be emulated', line, print_machine(machine)), [None])
except TypeError:
return (('Not callable', line, print_machine(machine)), [None])
if line.opname == 'call':
# amoco emulation pushes eip+i.length
# we prefer to push the label of the next basic bloc
label = instr.symbols.find_symbol(
section=line.section,
address=line.offset+line.amoco.length)
machine[env.mem(env.esp,cpu_addrsize)] = expressions.lab(label, size=cpu_addrsize)
retval = machine[env.eip]
msg, val = evaluate(retval,
machine, instr.symbols.find_symbols, instr, in_str)
if val is None:
return ((str(retval.__class__), retval, print_machine(machine)), [None])
elif val == [None]:
return ((msg, retval, print_machine(machine)), [None])
else:
return (msg, val)
# Interface for expressions
class expr(object):
def get_cst(e):
NON_REGRESSION_FOUND
if e is not None and e._is_cst:
return int(e)
get_cst = staticmethod(get_cst)
def get_lab(e):
if e is not None and e._is_lab:
return e.ref
get_lab = staticmethod(get_lab)
def get_lab_imm(e):
if e is not None and e._is_cst:
return None, int(e)
if e is not None and e._is_lab:
return e.ref, 0
if e is not None and e._is_eqn and e.op.symbol == '+' \
and e.l._is_lab \
and e.r._is_cst:
return e.l.ref, int(e.r)
return None, None
get_lab_imm = staticmethod(get_lab_imm)
def get_reg(e):
if e is not None and e._is_reg and not e._is_lab:
return e.ref
get_reg = staticmethod(get_reg)
def get_mem(e):
if e is None:
return None
if not e._is_mem:
return None
return e.a.base+e.a.disp
get_mem = staticmethod(get_mem)
def get_eqn(e):
NON_REGRESSION_FOUND
if e is not None and e._is_eqn:
return True
get_eqn = staticmethod(get_eqn)
def get_tst(e):
if e is not None and e._is_tst:
return e.l, e.r
get_tst = staticmethod(get_tst)
def evaluate(address, machine, find, instr, in_str):
# Generates a list of labels, each label being a possible value
# for the expression 'address'
log.debug("EVALUATE %s\n\t%s", address.__class__.__name__, address)
address = remove_got(address, instr.symbols)
v = expr.get_reg(address)
if v is not None: return 'REG', [ None ]
v = expr.get_lab(address)
if v is not None: return 'ID', [ v ]
v = expr.get_mem(address)
if v is not None:
# Lookup at some address
return evaluate_mem(v, machine, find, instr, in_str)
v = test_clang_switch_array(address)
if v is not None:
L1, L2 = v
L1 = expr.get_lab(L1)
if not hasattr(L1, 'lines'):
# Switch table needs to be parsed later
# Switch table already detected by pattern matching in compilers.py
NON_REGRESSION_FOUND
log.debug("Parse switch table later %r", L1)
pic_base, ptr_size, tbl_size = L1.switch_table
assert ptr_size == 4
assert L2 == "-%s"%pic_base
return 'SWITCH', 'TABLE'
msg = 'ARRAY'
lines = [ _.value[0] for _ in L1.lines ]
table = []
for s in lines:
if not hasattr(s, 'name'):
s = False
elif not s.name.endswith(L2):
msg = 'INCOHERENT'
continue
else:
s = find(name = s.name[:-len(L2)])[0]
if not s in table: table.append(s)
if not hasattr(L1, 'size'):
# Switch table not complete
if not None in table: table.append(None)
return msg, table
v = expr.get_tst(address)
if v is not None:
msg_l, res_l = evaluate(v[0], machine, find, instr, in_str)
msg_r, res_r = evaluate(v[1], machine, find, instr, in_str)
if res_l is None or res_r is None:
return None, None
return "%s+%s"%(msg_l,msg_r), res_l+res_r
log.debug("Need better analysis of %s:%s", address.__class__.__name__, address)
return None, None
def evaluate_mem(address, machine, find, instr, in_str):
log.debug("EVALUATE_MEM %s\n\t%s", address.__class__.__name__, address)
v = expr.get_reg(address)
if v is not None:
return 'MEM_REG', [ None ]
table, offset = expr.get_lab_imm(address)
if offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val is not None:
return msg, val
v = expr.get_mem(address)
if v is not None:
return 'MEM_MEM', [ None ]
return array_detection(address, machine, find, instr, in_str)
def array_detection(input, machine, find, instr, in_str):
log.debug("ARRAY_DETECT %s\n\t%s", input.__class__.__name__, input)
dst_lst = []
# Is it an element of an array?
# Find the multiplication, replace it by 'index_in_array'
index_var = env.ext('index_in_array',size=cpu_addrsize)
item_len = 0
if input.op.symbol == '+' and input.l._is_eqn:
if input.l.op.symbol == '+' and input.l.l._is_eqn and \
input.l.l.op.symbol == '*' and input.l.l.r._is_cst:
item_len = int(input.l.l.r)
input.l.l = index_var
elif input.l.op.symbol == '+' and input.l.r._is_eqn and \
input.l.r.op.symbol == '*' and input.l.r.r._is_cst:
item_len = int(input.l.r.r)
input.l.r = index_var
elif input.l.op.symbol == '*' and input.l.r._is_cst:
item_len = int(input.l.r)
input.l = index_var
elif input.l.op.symbol == '<<':
item_len = 1 << int(input.l.r)
input.l = index_var
elif input.op.symbol == '+' and input.r._is_eqn:
if input.r.op.symbol == '*' and input.r.r._is_cst:
item_len = int(input.r.r)
if input.r.l._is_ptr and input.r.l.disp == 0 and \
input.r.l.base._is_eqn and input.r.l.base.op.symbol == '+' and \
input.r.l.base.r._is_eqn and input.r.l.base.r.op.symbol == '*' \
and input.r.l.base.r.r._is_cst \
and input.r.l.base.l == input.r.l.base.r.l:
item_len *= 1 + int(input.r.l.base.r.r)
input.r = index_var
elif input.r.op.symbol == '<<':
NON_REGRESSION_FOUND
item_len = 1 << int(input.r.r)
input.r = index_var
if item_len == 0:
msg = 'MEM_EXP - NOT AN ARRAY'
return msg, [None]
log.debug(" ARRAY of %d-byte items", item_len)
# Usually 4-byte items
# Can be 8-byte items e.g. for ceval.o from python2.4.5 / gcc 4.6.3
# Can be 12-byte items e.g. for deflate.o from zlib 1.2.8 / gcc 4.6.3
invalid_indexes = 0
index_in_array = -item_len
while invalid_indexes < 4:
index_in_array += item_len
m2 = mapper()
m2[index_var] = env.cst(index_in_array, size=cpu_addrsize)
address_in_array = input.eval(m2)
log.debug(" x[%d] at %s:%s",
index_in_array//item_len,
address_in_array.__class__.__name__,
address_in_array)
msg, val = 'NOT FOUND', None
table, offset = expr.get_lab_imm(address_in_array)
if val is None and offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val is None:
mapper.assume_no_aliasing = True
offset = machine.M(env.mem(address_in_array))
mapper.assume_no_aliasing = False
offset = remove_got(offset, instr.symbols)
v = expr.get_lab(offset)
if v:
msg, val = 'MEM', [ v ]
table, offset = expr.get_lab_imm(expr.get_mem(offset))
if offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val == 'TABLE':
return msg, val
if val in (None, [None]):
log.debug(" ----> %s", msg)
invalid_indexes += 1
continue
for label in val:
if label.name.endswith('@GOTOFF'):
# to make this work also with executables, we will need to
# change our API and get the offset value that will have to
# be substracted; removing @GOTOFF is not enough!
label = find(name = label.name[:-7])[0]
log.debug(" => %s", label)
if not label in dst_lst:
dst_lst.append(label)
if dst_lst == []:
return 'MEM_EXP', [None]
return 'ARRAY', dst_lst
def deref_table(table, offset, instr, in_str):
pool = instr.symbols
log.debug("DEREF %s at %s", table, offset)
if table is None:
return deref_address(offset, pool, in_str)
if getattr(table, 'section', None) in ['.got.plt', '.got', '.idata']:
assert offset == 0
return 'GOT_PLT', [ table ]
if not hasattr(table, 'lines'):
# 'table' has not been parsed; will be later
return 'MEM_TABLE %s not parsed (offset %d)' % (table, offset), 'TABLE'
if offset < table.bytelen:
# Offset in a table
sz = 0
for line in table.lines:
if sz == offset: break
sz += line.bytelen
else:
line = None
if getattr(line, 'type', None) == 'long' and \
hasattr(line.value[0], 'name'):
label = line.value[0]
if label.name.startswith('_GLOBAL_OFFSET_TABLE_+[.-'):
label = label.reference
return 'MEM_ID', [ label ]
else:
return 'MEM_TABLE %s[%d]=%s' % (table, offset, line), [ None ]
if not hasattr(table, 'address'):
# Non-regression: gp.o from pari-2.5.5 / gcc 4.6.3
NON_REGRESSION_FOUND
return 'MEM_LAB_IMM %r offset=%s' % (table, offset), [ None ]
return deref_address(table.address + offset, pool, in_str)
import struct
def deref_address(offset, pool, in_str):
log.debug("DEREF_ADDRESS %#x", offset)
if offset == 0:
# Non-regression: cjpeg.o from libjpeg-6b / gcc 3.2.3
# relocated value
NON_REGRESSION_FOUND
return 'NULL', [ None ]
# Read from file (mapped in memory)
# Should not happen, the data sections should have been parsed and
# labels should have been created
# However, compilers sometimes generate (idx*4)+(label-4) rather than
# ((idx-1)*4)+label, and therefore 'label' is hidden
section = pool.get_sectionname(offset)
if section in [".data"]:
address = struct.unpack("I", in_str[offset:offset+4])[0]
a_section = pool.get_sectionname(address)
if a_section in [".text", ".plt"]:
label_list = pool.find_symbols(address = address)
if len(label_list): return 'MEM_VAL', label_list
if section in [".got"]:
label = pool.find_symbols(address = offset)
if label == []:
NON_REGRESSION_FOUND
return 'MEM_LAB_IMM %r address=%s' % (table, offset), [ None ]
if label[0].name.startswith('.rel.dyn.'):
label = pool.find_symbol(name = label[0].name[9:])
else:
label = label[0]
NON_REGRESSION_FOUND
return 'MEM_INT GOT', [ label ]
if section in [".idata"]:
NON_REGRESSION_FOUND
label = pool.find_symbol(address = offset)
if label.name.startswith('msvcrt.dll'):
return 'MSVCRT', [ label ]
return 'MEM_INT', [ label ]
return 'NOT IN TABLE [%s:%#x]' % (section, offset), None
def remove_got(address, pool):
if '@GOT' in str(address):
# When the expression contains @GOT or @GOTOFF, one should cancel
# the PIC offset
# This trick works only for relocatable objects :-(
v = remove_pic_offset(address, pool)
if v is not None:
log.debug("REMOVE GOT => %s", v)
return v
return address
def remove_pic_offset(e, pool):
log.debug("DETECT PIC FROM %s:%s", e.__class__.__name__, e)
if e._is_tst:
label_l = remove_pic_offset(e.l, pool)
label_r = remove_pic_offset(e.r, pool)
if label_l is None or label_r is None:
return None
return env.tst(e.tst, label_l, label_r)
# M32[M32[M32[PIC_OFFSET+toto@GOT]]+cte]
# => M32[M32[toto]+cte]
if e._is_mem \
and e.a.base._is_mem \
and e.a.base.a.disp == 0 \
and e.a.base.a.base._is_mem:
label = remove_pic_offset(e.a.base.a.base, pool)
if label is None:
return None
return env.mem(env.mem(label), disp=e.a.disp)
# M32[M32[PIC_OFFSET+toto@GOT]+cte]
# => M32[toto+cte]
if e._is_mem and e.a.base._is_mem:
label = remove_pic_offset(e.a.base, pool)
if label is None:
return None
return env.mem(label, disp=e.a.disp)
# M32[M32[PIC_OFFSET+toto@GOT]+formula]
# => M32[toto+formula]
if e._is_mem \
and e.a.base._is_eqn \
and e.a.base.op.symbol == '+' \
and e.a.base.l._is_mem:
label = remove_pic_offset(e.a.base.l, pool)
if label is None: return
return env.mem(label+e.a.base.r, disp=e.a.disp)
if e._is_mem and not hasattr(e.a.disp, '_is_lab'):
log.debug("BASE %s; DISP %s; TODO", e.a.base, e.a.disp)
return None
# M32[PIC_OFFSET+toto@GOT]
# => toto
if e._is_mem \
and e.a.disp._is_lab \
and e.a.disp.ref.name.endswith('@GOT'):
label_name = e.a.disp.ref.name[:-4]
pic_data = e.a.base
if not check_pic_data(pic_data):
NON_REGRESSION_FOUND
log.debug("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
return None
return env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
# M32[PIC_OFFSET+toto@GOTOFF]
# => M32[toto]
if e._is_mem \
and e.a.disp._is_lab \
and e.a.disp.ref.name.endswith('@GOTOFF'):
label = remove_pic_offset(e.a, pool)
if label is None: return
# Not sound: usually is a reference to somewhere in a data section
# that may change at runtime
return env.mem(label)
# (M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
if e._is_ptr \
and e.disp == 0:
return remove_pic_offset(e.base, pool)
# (PIC_OFFSET+toto@GOTOFF)
# => toto
if e._is_ptr \
and e.disp._is_lab \
and e.disp.ref.name.endswith('@GOTOFF'):
label_name = e.disp.ref.name[:-7]
pic_data = e.base
if not check_pic_data(pic_data):
log.debug("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
return None
return env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
# (PIC_OFFSET+M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF])
# (M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
# (-M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
# => M32[toto+INDEX_IN_TABLE]
# @GOTOFF will be removed later from the deref value
# to make this work also with executables, we will need to change
# our API and return the offset that will have to be substracted
if e._is_eqn and e.op.symbol == '+':
base, index, pic_data, pic_data_dup = extract_base_index(e)
if base is None:
log.error("Unknown base %s", e)
return None
if pic_data != pic_data_dup:
log.error("Inconsistent PIC %s != %s", pic_data, pic_data_dup)
return None
label_name = base.disp.ref.name[:-7]
if not check_pic_data(pic_data):
log.error("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
# Don't abort, for now, improvement of pic_tracking needed
label = env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
return env.mem(index, disp=label)
def extract_base_index(e):
# M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET
# e.l.a.base.l e.l.a.base.r e.l.a.disp + e.r
if (e.l._is_mem and
e.l.a.disp._is_lab and
e.l.a.base._is_eqn and
e.l.a.base.op.symbol == '+'):
return e.l.a, e.l.a.base.l, e.l.a.base.r, e.r
# PIC_OFFSET+M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]
# e.l + e.r.a.base.l e.r.a.base.r e.r.a.disp
if (e.r._is_mem and
e.r.a.disp._is_lab and
e.r.a.base._is_eqn and
e.r.a.base.op.symbol == '+'):
return e.r.a, e.r.a.base.l, e.r.a.base.r, e.l
# (-M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
if (e.l._is_eqn and
e.l.op.symbol == '-' and
e.l.l is None and
e.l.r._is_mem and
e.l.r.a.disp._is_lab and
e.l.r.a.base._is_eqn and
e.l.r.a.base.op.symbol == '+'
):
return e.l.r.a, e.l.r.a.base.l, e.l.r.a.base.r, e.r
return None, None, None, None
def check_pic_data(pic):
pic = str(pic)
if pic == '(@_GLOBAL_OFFSET_TABLE_+M32(esp))':
# gcc 4.x PIC
# The backtracking went back to the start of the function, where
# the PIC offset is computed as @_GLOBAL_OFFSET_TABLE_+M32(esp)
# after a call to __i686.get_pc_thunk.?x
return True
if pic == '(@_GLOBAL_OFFSET_TABLE_+ebx)':
# gcc 4.x PIC
# The backtracking went after returning from __i686.get_pc_thunk.bx
return True
if pic == 'ebx':
# gcc 4.x PIC
# The backtracking went not far but ebx may contain the PIC offset
# This is a risky hypothesis, yet it seems to work
return True
if pic == 'ecx':
# gcc 4.x PIC
# The backtracking went not far but ecx may contain the PIC offset
# This is a risky hypothesis, yet it seems to work
return True
return False
def test_clang_switch_array(address):
# Expression of the form M32[L1-L2+r2+(r1*4)]+r2
# L1 is the label of the table
# r1 is the index in the table (register, sometimes shifted)
# r2 stores the address of label L2 (register, immediate, ...)
if not address._is_eqn:
return None
if not address.op.symbol == '+':
return None
if address.l._is_mem and getattr(address.l.a.disp, '_is_eqn', False):
mem_expr, r2 = address.l, address.r
elif address.r._is_mem and getattr(address.r.a.disp, '_is_eqn', False):
r2, mem_expr = address.l, address.r
else:
return None
if mem_expr.a.base._is_eqn and mem_expr.a.base.op.symbol == '+' and \
mem_expr.a.base.r == r2:
r1_4 = mem_expr.a.base.l
elif mem_expr.a.base._is_eqn and mem_expr.a.base.op.symbol == '+' and \
mem_expr.a.base.l == r2:
r1_4 = mem_expr.a.base.r
else:
return None
if not r1_4._is_eqn or not r1_4.r._is_cst or r1_4.r != 4:
return None
if mem_expr.a.disp._is_eqn and mem_expr.a.disp.op.symbol == '+' and \
mem_expr.a.disp.r._is_lab and mem_expr.a.disp.l._is_eqn and \
mem_expr.a.disp.l.op.symbol == '-' and \
mem_expr.a.disp.l.l is None and mem_expr.a.disp.l.r._is_lab:
L1 = mem_expr.a.disp.r
L2 = '-%s' % mem_expr.a.disp.l.r.ref
else:
return None
# Now that everything has been verified, we scan the array
log.debug("CLANG SWITCH %s%s %s %s", L1, L2, r1_4, r2)
return L1, L2
|
py
|
1a58f5169277912f4392ee4045fe8759473e375a
|
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import json
import os
import pickle
import re
import shutil
import tempfile
import unittest
from collections import OrderedDict
from itertools import takewhile
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union
from huggingface_hub import HfApi
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AlbertTokenizerFast,
BertTokenizer,
BertTokenizerFast,
PreTrainedTokenizer,
PreTrainedTokenizerBase,
PreTrainedTokenizerFast,
SpecialTokensMixin,
Trainer,
TrainingArguments,
is_tf_available,
is_torch_available,
)
from transformers.testing_utils import (
ENDPOINT_STAGING,
PASS,
USER,
get_tests_dir,
is_pt_tf_cross_test,
is_staging_test,
require_tf,
require_tokenizers,
require_torch,
slow,
)
from transformers.tokenization_utils import AddedToken, Trie
if is_torch_available():
import torch.nn as nn
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel, TFPreTrainedModel
NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"]
SMALL_TRAINING_CORPUS = [
["This is the first sentence.", "This is the second one."],
["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."],
]
def filter_non_english(_, pretrained_name: str):
"""Filter all the model for non-english language"""
return not any([lang in pretrained_name for lang in NON_ENGLISH_TAGS])
def filter_roberta_detectors(_, pretrained_name: str):
return "detector" not in pretrained_name
def merge_model_tokenizer_mappings(
model_mapping: Dict["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]],
tokenizer_mapping: Dict["PretrainedConfig", Tuple["PreTrainedTokenizer", "PreTrainedTokenizerFast"]],
) -> Dict[
Union["PreTrainedTokenizer", "PreTrainedTokenizerFast"],
Tuple["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]],
]:
configurations = list(model_mapping.keys())
model_tokenizer_mapping = OrderedDict([])
for configuration in configurations:
if configuration in model_mapping and configuration in tokenizer_mapping:
model = model_mapping[configuration]
tokenizer = tokenizer_mapping[configuration][0]
tokenizer_fast = tokenizer_mapping[configuration][1]
model_tokenizer_mapping.update({tokenizer: (configuration, model)})
if tokenizer_fast is not None:
model_tokenizer_mapping.update({tokenizer_fast: (configuration, model)})
return model_tokenizer_mapping
class TokenizerTesterMixin:
tokenizer_class = None
rust_tokenizer_class = None
test_slow_tokenizer = True
test_rust_tokenizer = True
space_between_special_tokens = False
from_pretrained_kwargs = None
from_pretrained_filter = None
from_pretrained_vocab_key = "vocab_file"
test_seq2seq = True
# set to True to test a sentencepiece tokenizer
test_sentencepiece = False
# set to True to ignore casing when testing a sentencepiece tokenizer
# test_sentencepiece must also be set to True
test_sentencepiece_ignore_case = False
def setUp(self) -> None:
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
if self.test_rust_tokenizer:
tokenizers_list = [
(
self.rust_tokenizer_class,
pretrained_name,
self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {},
)
for pretrained_name in self.rust_tokenizer_class.pretrained_vocab_files_map[
self.from_pretrained_vocab_key
].keys()
if self.from_pretrained_filter is None
or (self.from_pretrained_filter is not None and self.from_pretrained_filter(pretrained_name))
]
self.tokenizers_list = tokenizers_list[:1] # Let's just test the first pretrained vocab for speed
else:
self.tokenizers_list = []
with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip()
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return input_txt, input_txt
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
def get_tokenizers(self, fast=True, **kwargs) -> List[PreTrainedTokenizerBase]:
if fast and self.test_rust_tokenizer and self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
elif fast and self.test_rust_tokenizer:
return [self.get_rust_tokenizer(**kwargs)]
elif self.test_slow_tokenizer:
return [self.get_tokenizer(**kwargs)]
else:
raise ValueError("This tokenizer class has no tokenizer to be tested.")
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def tokenizer_integration_test_util(
self,
expected_encoding: Dict,
model_name: str,
revision: str = None,
sequences: List[str] = None,
decode_kwargs: Dict[str, Any] = None,
padding: bool = True,
):
"""
Util for integration test.
Text is tokenized and then reverted back to text. Both results are then checked.
Args:
expected_encoding:
The expected result of the tokenizer output.
model_name:
The model name of the tokenizer to load and use.
revision:
The full git revision number of the model. This is to pin the
tokenizer config and to avoid that tests start to fail if the
config gets changed upstream.
sequences:
Can overwrite the texts that are used to check the tokenizer.
This is useful if the tokenizer supports non english languages
like france.
decode_kwargs:
Additional args for the ``decode`` function which reverts the
tokenized text back to a string.
padding:
Activates and controls padding of the tokenizer.
"""
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
if sequences is None:
sequences = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained "
"models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
if self.test_sentencepiece_ignore_case:
sequences = [sequence.lower() for sequence in sequences]
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained(
model_name,
revision=revision, # to pin the tokenizer version
)
encoding = tokenizer(sequences, padding=padding)
decoded_sequences = [
tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"]
]
encoding_data = encoding.data
self.assertDictEqual(encoding_data, expected_encoding)
for expected, decoded in zip(sequences, decoded_sequences):
if self.test_sentencepiece_ignore_case:
expected = expected.lower()
self.assertEqual(expected, decoded)
def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int):
# Ensure we match max_length
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
# Ensure the number of padded tokens is the same
padded_tokens_r = list(takewhile(lambda i: i == pad_token_id, reversed(input_r)))
padded_tokens_p = list(takewhile(lambda i: i == pad_token_id, reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(
self,
input_r: dict,
input_p: dict,
max_length: int,
pad_token_id: int,
model_main_input_name: str = "input_ids",
):
for i_r in input_r.values():
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]):
self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id)
for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
self.assertSequenceEqual(i_r, i_p)
@staticmethod
def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences):
# Switch from batch_encode_plus format: {'input_ids': [[...], [...]], ...}
# to the list of examples/ encode_plus format: [{'input_ids': [...], ...}, {'input_ids': [...], ...}]
return [
{value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences.keys()}
for i in range(len(batch_encode_plus_sequences["input_ids"]))
]
# TODO: this test can be combined with `test_sentencepiece_tokenize_and_convert_tokens_to_string` after the latter is extended to all tokenizers.
def test_tokenize_special_tokens(self):
"""Test `tokenize` with special tokens."""
tokenizers = self.get_tokenizers(fast=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
SPECIAL_TOKEN_1 = "[SPECIAL_TOKEN_1]"
SPECIAL_TOKEN_2 = "[SPECIAL_TOKEN_2]"
# TODO:
# Can we combine `unique_no_split_tokens` and `all_special_tokens`(and properties related to it)
# with one variable(property) for a better maintainability?
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True)
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]})
token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1)
token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2)
self.assertEqual(len(token_1), 1)
self.assertEqual(len(token_2), 1)
self.assertEqual(token_1[0], SPECIAL_TOKEN_1)
self.assertEqual(token_2[0], SPECIAL_TOKEN_2)
# TODO: this test could be extended to all tokenizers - not just the sentencepiece
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
"""Test ``_tokenize`` and ``convert_tokens_to_string``."""
if not self.test_sentencepiece:
return
tokenizer = self.get_tokenizer()
text = "This is text to test the tokenizer."
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens = tokenizer.tokenize(text)
self.assertTrue(len(tokens) > 0)
# check if converting back to original text works
reverse_text = tokenizer.convert_tokens_to_string(tokens)
if self.test_sentencepiece_ignore_case:
reverse_text = reverse_text.lower()
self.assertEqual(reverse_text, text)
def test_subword_regularization_tokenizer(self) -> None:
if not self.test_sentencepiece:
return
# Subword regularization is only available for the slow tokenizer.
sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
self.assertTrue(hasattr(tokenizer, "sp_model_kwargs"))
self.assertIsNotNone(tokenizer.sp_model_kwargs)
self.assertTrue(isinstance(tokenizer.sp_model_kwargs, dict))
self.assertEqual(tokenizer.sp_model_kwargs, sp_model_kwargs)
self.check_subword_sampling(tokenizer)
def test_pickle_subword_regularization_tokenizer(self) -> None:
if not self.test_sentencepiece:
return
"""Google pickle __getstate__ __setstate__ if you are struggling with this."""
# Subword regularization is only available for the slow tokenizer.
sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1}
tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs)
tokenizer_bin = pickle.dumps(tokenizer)
del tokenizer
tokenizer_new = pickle.loads(tokenizer_bin)
self.assertTrue(hasattr(tokenizer_new, "sp_model_kwargs"))
self.assertIsNotNone(tokenizer_new.sp_model_kwargs)
self.assertTrue(isinstance(tokenizer_new.sp_model_kwargs, dict))
self.assertEqual(tokenizer_new.sp_model_kwargs, sp_model_kwargs)
self.check_subword_sampling(tokenizer_new)
def test_model_input_names_signature(self):
accepted_model_main_input_names = [
"input_ids", # nlp models
"input_values", # speech models
]
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
# first name of model_input_names has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
self.assertTrue(tokenizer.model_input_names[0] in accepted_model_main_input_names)
def test_rust_tokenizer_signature(self):
if not self.test_rust_tokenizer:
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
self.assertIn("tokenizer_file", signature.parameters)
self.assertIsNone(signature.parameters["tokenizer_file"].default)
def test_tokenizer_slow_store_full_signature(self):
if not self.test_slow_tokenizer:
return
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizer_fast_store_full_signature(self):
if not self.test_rust_tokenizer:
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
tokenizer = self.get_rust_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty and parameter_name not in [
"vocab_file",
"merges_file",
"tokenizer_file",
]:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence, _ = self.get_input_output_texts(tokenizer)
# We don't have an exact equivalence on `tokenize()` between Rust and Slow
# Slow tokenizer only split tokens, Rust tokenizers will replace with <unk>
# tokens = tokenizer.tokenize(sequence)
# rust_tokens = rust_tokenizer.tokenize(sequence)
# self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(sequence, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenizers_common_properties(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
attributes_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, attr + "_id"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens"))
self.assertTrue(hasattr(tokenizer, "additional_special_tokens_ids"))
attributes_list = [
"model_max_length",
"init_inputs",
"init_kwargs",
]
if not isinstance(tokenizer, PreTrainedTokenizerFast):
attributes_list += [
"added_tokens_encoder",
"added_tokens_decoder",
]
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
# Test that we can also use the non-legacy saving format for fast tokenizers
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn("bim", after_vocab)
self.assertIn("bambam", after_vocab)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_pickle_tokenizer(self):
"""Google pickle __getstate__ __setstate__ if you are struggling with this."""
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertIsNotNone(tokenizer)
text = "Munich and Berlin are nice cities"
subwords = tokenizer.tokenize(text)
filename = os.path.join(self.tmpdirname, "tokenizer.bin")
with open(filename, "wb") as handle:
pickle.dump(tokenizer, handle)
with open(filename, "rb") as handle:
tokenizer_new = pickle.load(handle)
subwords_loaded = tokenizer_new.tokenize(text)
self.assertListEqual(subwords, subwords_loaded)
@require_tokenizers
def test_pickle_added_tokens(self):
tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self):
# TODO(thom) activate fast tokenizer tests once Rust tokenizers accepts white spaces in added tokens.
tokenizers = [self.get_tokenizer(do_lower_case=True)] if self.test_slow_tokenizer else []
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case:
continue
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
toks0 = tokenizer.tokenize(text) # toks before adding new_toks
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"]
added = tokenizer.add_tokens(new_toks)
self.assertEqual(added, 2)
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2))
self.assertListEqual(toks, toks2)
if not isinstance(tokenizer, PreTrainedTokenizerFast):
# Python tokenizers can have added tokens with spaces inside them
# cf https://github.com/huggingface/tokenizers/issues/302
self.assertNotEqual(len(toks), len(toks0)) # toks0 should be longer
# Check that none of the special tokens are lowercased
sequence_with_special_tokens = "A " + " yEs ".join(tokenizer.all_special_tokens) + " B"
tokenized_sequence = tokenizer.tokenize(sequence_with_special_tokens)
for special_token in tokenizer.all_special_tokens:
self.assertTrue(special_token in tokenized_sequence)
tokenizers = [self.get_tokenizer(do_lower_case=True)] if self.test_slow_tokenizer else []
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if hasattr(tokenizer, "do_lower_case") and tokenizer.do_lower_case:
continue
special_token = tokenizer.all_special_tokens[0]
text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token
text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"]
toks0 = tokenizer.tokenize(text) # toks before adding new_toks
added = tokenizer.add_tokens(new_toks)
self.assertIn(added, [2, 4])
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2)) # Length should still be the same
self.assertNotEqual(toks[1], toks2[1]) # But at least the first non-special tokens should differ
if not isinstance(tokenizer, PreTrainedTokenizerFast):
# Python tokenizers can have added tokens with spaces inside them
# cf https://github.com/huggingface/tokenizers/issues/302
self.assertNotEqual(len(toks), len(toks0)) # toks0 should be longer
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, ids = self.get_clean_sequence(tokenizer)
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False)
encoded = tokenizer.encode(text, add_special_tokens=False)
input_encoded = tokenizer.encode(input_text, add_special_tokens=False)
special_token_id = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(encoded, input_encoded + special_token_id)
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# new_toks = ["[ABC]", "[DEF]"] # TODO(thom) add this one back when Rust toks are ready: , "GHI IHG"]
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]" # TODO(thom) add back cf above: "[ABC] [DEF] [ABC] GHI IHG [DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_pretrained_model_lists(self):
# We should have at least one default checkpoint for each tokenizer
# We should specify the max input length as well (used in some part to list the pretrained checkpoints)
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
self.assertEqual(
len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]),
len(self.tokenizer_class.max_model_input_sizes),
)
weights_list = list(self.tokenizer_class.max_model_input_sizes.keys())
weights_lists_2 = []
for file_id, map_list in self.tokenizer_class.pretrained_vocab_files_map.items():
weights_lists_2.append(list(map_list.keys()))
for weights_list_2 in weights_lists_2:
self.assertListEqual(weights_list, weights_list_2)
def test_mask_output(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
seq_0 = "Test this method."
seq_1 = "With these inputs."
information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0, return_token_type_ids=True)
self.assertIn(0, output["token_type_ids"])
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = "With these inputs."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = "With these inputs."
sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer.encode(seq_0, add_special_tokens=False)
total_length = len(sequence)
assert total_length > 4, "Issue with the testing sequence, please update it it's too short"
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
assert (
total_length1 > model_max_length
), "Issue with the testing sequence, please update it it's too short"
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Overflowing tokens
stride = 2
information = tokenizer(
seq_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence[:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :])
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False)
assert len(seq0_tokens) > 2 + stride
seq_1 = "This is another sentence to be encoded."
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
if abs(len(seq0_tokens) - len(seq1_tokens)) <= 2:
seq1_tokens = seq1_tokens + seq1_tokens
seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False)
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
assert len(seq1_tokens) > 2 + stride
smallest = seq1_tokens if len(seq0_tokens) > len(seq1_tokens) else seq0_tokens
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
assert len(seq_2) > model_max_length
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
assert total_length1 < model_max_length - 10, "Issue with the testing sequence, please update it."
assert total_length2 > model_max_length, "Issue with the testing sequence, please update it."
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer(
[seq_2], [seq_1], padding=padding_state, truncation=truncation_state
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"]), model_max_length)
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation="only_second")
self.assertEqual(len(output["input_ids"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
truncated_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[:-2] + tokenizer.encode(
seq_1, add_special_tokens=False
)
truncated_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False)
+ tokenizer.encode(seq_1, add_special_tokens=False)[:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[
-(2 + stride) :
] + tokenizer.encode(seq_1, add_special_tokens=False)
overflow_second_sequence = (
tokenizer.encode(seq_0, add_special_tokens=False)
+ tokenizer.encode(seq_1, add_special_tokens=False)[-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens[-(2 + stride) :])
information_second_truncated = tokenizer(
seq_0,
seq_1,
max_length=len(sequence) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
self.assertEqual(len(truncated_sequence), len(sequence) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens[-(2 + stride) :])
# def test_encode_input_type(self):
# tokenizers = self.get_tokenizers(do_lower_case=False)
# for tokenizer in tokenizers:
# with self.subTest(f"{tokenizer.__class__.__name__}"):
# sequence = "Let's encode this sequence"
# tokens = sequence.split() # tokenizer.tokenize(sequence)
# # input_ids = tokenizer.convert_tokens_to_ids(tokens)
# formatted_input = tokenizer.encode(sequence, add_special_tokens=True, add_prefix_space=False)
# self.assertEqual(
# tokenizer.encode(tokens, is_split_into_words=True, add_special_tokens=True), formatted_input
# )
# # This is not supported with the Rust tokenizers
# # self.assertEqual(tokenizer.encode(input_ids, add_special_tokens=True), formatted_input)
# def test_swap_special_token(self):
# tokenizers = self.get_tokenizers(do_lower_case=False)
# for tokenizer in tokenizers:
# with self.subTest(f"{tokenizer.__class__.__name__}"):
# # Our mask token
# mask = "<mask>"
# # We take a single word in the middle of the vocabulary
# all_tokens = sorted(tokenizer.get_vocab().keys())
# word = tokenizer.decode(tokenizer.encode(all_tokens[len(all_tokens)//2], add_special_tokens=False)[:1])
# sequence_0 = "Encode " + word + " sequence"
# sequence_masked_0 = "Encode " + mask + " sequence"
# sequence_1 = word + " this sequence"
# sequence_masked_1 = mask + " this sequence"
# # Add tokens so that masked token isn't split
# # tokens = [AddedToken(t, lstrip=True, normalized=False) for t in sequence.split()]
# # tokenizer.add_tokens(tokens)
# tokenizer.add_special_tokens(
# {"mask_token": AddedToken(mask, normalized=False)}
# ) # Eat left space on Byte-level BPE tokenizers
# mask_ind = tokenizer.convert_tokens_to_ids(mask)
# # Test first masked sequence
# encoded_0 = tokenizer.encode(sequence_0, add_special_tokens=False)
# encoded_masked = tokenizer.encode(sequence_masked_0, add_special_tokens=False)
# assert len(encoded_masked) == len(encoded_0)
# mask_loc = encoded_masked.index(mask_ind)
# encoded_masked[mask_loc] = encoded_0[mask_loc]
# self.assertEqual(encoded_masked, encoded_0)
# # Test second masked sequence
# encoded_1 = tokenizer.encode(sequence_1, add_special_tokens=False)
# encoded_masked = tokenizer.encode(sequence_masked_1, add_special_tokens=False)
# assert len(encoded_masked) == len(encoded_1)
# mask_loc = encoded_masked.index(mask_ind)
# encoded_masked[mask_loc] = encoded_1[mask_loc]
# self.assertEqual(encoded_masked, encoded_1)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
# Testing single inputs
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
sequence_0, add_special_tokens=True, return_special_tokens_mask=True # , add_prefix_space=False
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
sequence_1 = "This one too please."
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
sequence_0,
sequence_1,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(sequence, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(sequence)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be remove when `pad_to_max_length` will e deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
sequence, max_length=sequence_length + padding_size, pad_to_max_length=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer("This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
"This",
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
sequence,
padding=True,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
not_padded_sequence = tokenizer.encode_plus(
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
assert sequence_length + padding_size == right_padded_sequence_length
assert input_ids + [padding_idx] * padding_size == right_padded_input_ids
assert special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
assert sequence_length + padding_size == left_padded_sequence_length
assert [padding_idx] * padding_size + input_ids == left_padded_input_ids
assert [1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [token_type_padding_idx] * padding_size == right_padded_token_type_ids
assert [token_type_padding_idx] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
assert attention_mask + [0] * padding_size == right_padded_attention_mask
assert [0] * padding_size + attention_mask == left_padded_attention_mask
def test_separate_tokenizers(self):
# This tests that tokenizers don't impact others. Unfortunately the case where it fails is when
# we're loading an S3 configuration from a pre-trained identifier, and we have no way of testing those today.
tokenizers = self.get_tokenizers(random_argument=True)
new_tokenizers = self.get_tokenizers(random_argument=False)
for tokenizer, new_tokenizer in zip(tokenizers, new_tokenizers):
with self.subTest(f"{tokenizer.__class__.__name__}"):
assert tokenizer.init_kwargs["random_argument"] is True
assert tokenizer.init_kwargs["random_argument"] is True
assert new_tokenizer.init_kwargs["random_argument"] is False
def test_get_vocab(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(["asdfasdfasdfasdf"])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
def test_conversion_reversible(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab = tokenizer.get_vocab()
for word, ind in vocab.items():
if word == tokenizer.unk_token:
continue
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# Test not batched
encoded_sequences_1 = tokenizer.encode_plus(sequences[0])
encoded_sequences_2 = tokenizer(sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1])
encoded_sequences_2 = tokenizer(sequences[0], sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
encoded_sequences_1 = tokenizer.batch_encode_plus(sequences)
encoded_sequences_2 = tokenizer(sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched pairs
encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences)))
encoded_sequences_2 = tokenizer(sequences, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer.encode_plus(sequence, max_length=maximum_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
sequences, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
sequences, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@require_tokenizers
def test_added_token_are_matched_longest_first(self):
if not self.test_slow_tokenizer:
self.skipTest("This test is only for slow tokenizers")
return
tokenizers = self.get_tokenizers(fast=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
try:
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokenizer.add_tokens([AddedToken("extra_id_100")])
except Exception:
# Canine cannot add tokens which are not codepoints
self.skipTest("Cannot add those Added tokens")
# XXX: This used to split on `extra_id_1` first we're matching
# longest first now.
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.add_tokens([AddedToken("extra_id_100")])
tokenizer.add_tokens([AddedToken("extra_id_1")])
tokens = tokenizer.tokenize("This is some extra_id_100")
self.assertIn("extra_id_100", tokens)
@require_tokenizers
def test_added_token_serializable(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
new_token = AddedToken("new_token", lstrip=True)
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(tmp_dir_name)
tokenizer.from_pretrained(tmp_dir_name)
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_pretokenized_inputs(self):
# Test when inputs are pretokenized
tokenizers = self.get_tokenizers(do_lower_case=False) # , add_prefix_space=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if hasattr(tokenizer, "add_prefix_space") and not tokenizer.add_prefix_space:
continue
# Prepare a sequence from our tokenizer vocabulary
sequence, ids = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20)
# sequence = " " + sequence # To be sure the byte-level tokenizers are feeling good
token_sequence = sequence.split()
# sequence_no_prefix_space = sequence.strip()
# Test encode for pretokenized inputs
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
# Test encode_plus for pretokenized inputs
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
# Test batch_encode_plus for pretokenized inputs
sequence_batch = [sequence.strip()] * 2 + [sequence.strip() + " " + sequence.strip()]
token_sequence_batch = [s.split() for s in sequence_batch]
sequence_batch_cleaned_up_spaces = [" " + " ".join(s) for s in token_sequence_batch]
output = tokenizer.batch_encode_plus(
token_sequence_batch, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.batch_encode_plus(
sequence_batch_cleaned_up_spaces, add_special_tokens=False
)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(
token_sequence_batch, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.batch_encode_plus(
sequence_batch_cleaned_up_spaces, add_special_tokens=True
)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
# Test encode for pretokenized inputs pairs
output = tokenizer.encode(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
# Test encode_plus for pretokenized inputs pairs
output = tokenizer.encode_plus(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(
token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
# Test batch_encode_plus for pretokenized inputs pairs
sequence_pair_batch = [(sequence.strip(), sequence.strip())] * 2 + [
(sequence.strip() + " " + sequence.strip(), sequence.strip())
]
token_sequence_pair_batch = [tuple(s.split() for s in pair) for pair in sequence_pair_batch]
sequence_pair_batch_cleaned_up_spaces = [
tuple(" " + " ".join(s) for s in pair) for pair in token_sequence_pair_batch
]
output = tokenizer.batch_encode_plus(
token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False
)
output_sequence = tokenizer.batch_encode_plus(
sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False
)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(
token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True
)
output_sequence = tokenizer.batch_encode_plus(
sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True
)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
string_sequence = "Testing the prepare_for_model method."
ids = tokenizer.encode(string_sequence, add_special_tokens=False)
prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True)
input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_batch_encode_plus_overflowing_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
string_sequences = ["Testing the prepare_for_model method.", "Test"]
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
tokenizer.batch_encode_plus(
string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3
)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
sequences,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
sequences,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding="longest", return_tensors="tf")
encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def _check_no_pad_token_padding(self, tokenizer, sequences):
# if tokenizer does not have pad_token_id, an error should be thrown
if tokenizer.pad_token_id is None:
with self.assertRaises(ValueError):
if isinstance(sequences, list):
tokenizer.batch_encode_plus(sequences, padding="longest")
else:
tokenizer.encode_plus(sequences, padding=True)
# add pad_token_id to pass subsequent tests
tokenizer.add_special_tokens({"pad_token": "<PAD>"})
def check_subword_sampling(
self,
tokenizer: PreTrainedTokenizer,
text: str = None,
) -> None:
"""
Check if the tokenizer generates different results when subword regularization is enabled.
Subword regularization augments training data with subword sampling.
This has a random component.
Args:
tokenizer: The tokenizer to check.
text: The text to use for the checks.
"""
text = "This is a test for subword regularization." if text is None else text
if self.test_sentencepiece_ignore_case:
text = text.lower()
tokens_list = []
for _ in range(5):
tokens_list.append(tokenizer.tokenize(text))
# the list of different pairs of tokens_list
combinations = itertools.combinations(tokens_list, 2)
# check of sampling is done
subword_sampling_found = False
for combination in combinations:
if combination[0] != combination[1]:
subword_sampling_found = True
self.assertTrue(subword_sampling_found)
# check if converting back to original text works
for tokens in tokens_list:
if self.test_sentencepiece_ignore_case:
self.assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())
else:
self.assertEqual(text, tokenizer.convert_tokens_to_string(tokens))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="pt")
# Ensure that the BatchEncoding.to() method works.
encoded_sequence.to(model.device)
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt")
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
# if self.test_rust_tokenizer:
# fast_tokenizer = self.get_rust_tokenizer()
# encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="pt")
# batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt")
# # This should not fail
# model(**encoded_sequence_fast)
# model(**batch_encoded_sequence_fast)
@require_tf
@slow
def test_tf_encode_plus_sent_to_model(self):
from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
assert model.config.vocab_size >= len(tokenizer)
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="tf")
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="tf")
# This should not fail
model(encoded_sequence)
model(batch_encoded_sequence)
# TODO: Check if require_torch is the best to test for numpy here ... Maybe move to require_flax when available
@require_torch
@slow
def test_np_encode_plus_sent_to_model(self):
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="np")
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="np")
# TODO: add forward through JAX/Flax when PR is merged
# This is currently here to make flake8 happy !
if encoded_sequence is None:
raise ValueError("Cannot convert list to numpy tensor on encode_plus()")
if batch_encoded_sequence is None:
raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus()")
if self.test_rust_tokenizer:
fast_tokenizer = self.get_rust_tokenizer()
encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="np")
batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus(
[sequence, sequence], return_tensors="np"
)
# TODO: add forward through JAX/Flax when PR is merged
# This is currently here to make flake8 happy !
if encoded_sequence_fast is None:
raise ValueError("Cannot convert list to numpy tensor on encode_plus() (fast)")
if batch_encoded_sequence_fast is None:
raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus() (fast)")
@require_torch
def test_prepare_seq2seq_batch(self):
if not self.test_seq2seq:
return
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei "
'pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu '
"vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
batch = tokenizer.prepare_seq2seq_batch(
src_texts=src_text,
tgt_texts=tgt_text,
max_length=3,
max_target_length=10,
return_tensors="pt",
src_lang="en_XX", # this should be ignored (for all but mbart) but not cause an error
)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
batch = tokenizer.prepare_seq2seq_batch(
src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt"
)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
batch_encoder_only = tokenizer.prepare_seq2seq_batch(
src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt"
)
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
def test_is_fast(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Check is_fast is set correctly
self.assertTrue(tokenizer_r.is_fast)
if self.test_slow_tokenizer:
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertFalse(tokenizer_p.is_fast)
def test_fast_only_inputs(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Ensure None raise an error
self.assertRaises(TypeError, tokenizer_r.tokenize, None)
self.assertRaises(TypeError, tokenizer_r.encode, None)
self.assertRaises(TypeError, tokenizer_r.encode_plus, None)
self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None)
def test_alignement_methods(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(
batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1
)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1
)
# Assert char_to_token
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
# Assert char_to_word
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
# Assert word_to_chars
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(
batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1
)
# Assert token_to_sequence
self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0)
self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0)
self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0)
# Pair of input sequences
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
pair_words = ["Amazing", "example", "full", "of", "inspiration"]
pair_text = " ".join(pair_words)
batch_size = 3
index_word_in_first_seq = words.index("inspiration")
index_word_in_pair_seq = pair_words.index("inspiration")
index_char_in_first_seq = text.find("inspiration")
index_char_in_pair_seq = pair_text.find("inspiration")
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False)
pair_batch_encoding = tokenizer_r.batch_encode_plus(
[(text, pair_text)] * batch_size, add_special_tokens=False
)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# Assert word_to_tokens
self.assertNotEqual(
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start
],
pair_encoding["input_ids"][
pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start
],
)
self.assertNotEqual(
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start
],
)
# Assert char_to_token
self.assertNotEqual(
pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)],
pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0)
],
pair_batch_encoding["input_ids"][1][
pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1)
],
)
# Assert char_to_word
self.assertNotEqual(
pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0),
pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)],
pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)],
)
self.assertNotEqual(
pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0),
pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1),
)
self.assertEqual(
words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)],
pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)],
)
# Assert word_to_chars
self.assertNotEqual(
pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start,
pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start],
)
self.assertNotEqual(
pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start,
pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start,
)
self.assertEqual(
text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start],
pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start],
)
# Assert token_to_sequence
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True)
pair_sequence_ids = [
pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"]))
]
self.assertIn(0, pair_sequence_ids)
self.assertIn(1, pair_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_sequence_ids)
pair_batch_encoding = tokenizer_r.batch_encode_plus(
[(text, pair_text)] * batch_size, add_special_tokens=True
)
pair_batch_sequence_ids = [
pair_batch_encoding.token_to_sequence(1, i)
for i in range(len(pair_batch_encoding["input_ids"][0]))
]
self.assertIn(0, pair_batch_sequence_ids)
self.assertIn(1, pair_batch_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_batch_sequence_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Ensure basic input match
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
# Ensure truncation match
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_num_special_tokens_to_add_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(
tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False)
)
self.assertEqual(
tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True)
)
def test_max_length_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def test_special_tokens_map_equal(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Assert the set of special tokens match.
self.assertSequenceEqual(
tokenizer_p.special_tokens_map.items(),
tokenizer_r.special_tokens_map.items(),
)
def test_add_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
vocab_size = len(tokenizer_r)
self.assertEqual(tokenizer_r.add_tokens(""), 0)
self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 3)
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
self.assertRaises(
AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
)
self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
self.assertEqual(
tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
)
self.assertIn("<testtoken3>", tokenizer_r.special_tokens_map["additional_special_tokens"])
self.assertIsInstance(tokenizer_r.special_tokens_map["additional_special_tokens"], list)
self.assertGreaterEqual(len(tokenizer_r.special_tokens_map["additional_special_tokens"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 8)
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = "Wonderful no inspiration example with subtoken"
pair = "Along with an awesome pair"
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
tokens_with_offsets = tokenizer_r.encode_plus(
text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequence it can returns different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
return
tokens = tokenizer.encode_plus(
"HuggingFace is solving NLP one commit at a time",
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
# Mono sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
# Multi sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
def test_compare_pretokenized_inputs(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
if hasattr(tokenizer_p, "add_prefix_space") and not tokenizer_p.add_prefix_space:
continue # Too hard to test for now
# Input string
pretokenized_input_simple = "This is a sample input".split()
pretokenized_input_pair = "This is a sample pair".split()
# Test encode for pretokenized inputs
output_r = tokenizer_r.encode(
pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False
)
output_p = tokenizer_p.encode(
pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False
)
self.assertEqual(output_p, output_r)
kwargs = {
"is_split_into_words": True,
# "return_token_type_ids": True, # Use the defaults for each tokenizers
# "return_attention_mask": True, # Use the defaults for each tokenizers
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
# "add_special_tokens": False,
}
batch_kwargs = {
"is_split_into_words": True,
# "return_token_type_ids": True, # Use the defaults for each tokenizers
# "return_attention_mask": True, # Use the defaults for each tokenizers
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
# "add_special_tokens": False,
}
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair]
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test encode for pretokenized inputs pairs
output_r = tokenizer_r.encode(
pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True
)
output_p = tokenizer_p.encode(
pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True
)
self.assertEqual(output_p, output_r)
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [
pretokenized_input_simple + pretokenized_input_pair,
pretokenized_input_pair,
]
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
def test_create_token_type_ids(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
# Generate output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# # Input string
# input_simple = tokenizer_p.tokenize("This is a sample input", add_special_tokens=False)
# input_pair = tokenizer_p.tokenize("This is a sample pair", add_special_tokens=False)
# # Generate output
# output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
# output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
# self.assertEqual(output_p, output_r)
# # Generate pair output
# output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
# output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
# self.assertEqual(output_p, output_r)
# Input tokens id
input_simple = tokenizer_p.encode("This is a sample input", add_special_tokens=False)
input_pair = tokenizer_p.encode("This is a sample pair", add_special_tokens=False)
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_padding(self, max_length=50):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode("This is a simple input", padding="longest")
input_p = tokenizer_p.encode("This is a simple input", padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True)
input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
input_r = tokenizer_r.encode_plus(
"This is a simple input", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
"This is a simple input", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
pad_to_max_length=True,
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
pad_to_max_length=True,
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"],
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding="longest"
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding=True
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
# rename encoded batch to "inputs"
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
# Renaming `input_ids` to `inputs`
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_save_pretrained(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=True
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it save with the same files
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=False
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(
sentence,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
sentence,
add_special_tokens=True,
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
# pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True)
for text in ["", " "]:
# tokenize()
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(
len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add
)
# encode()
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(
len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add
)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
def test_compare_prepare_for_model(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
string_sequence = "Asserting that both tokenizers are equal"
python_output = tokenizer_p.prepare_for_model(
tokenizer_p.encode(string_sequence, add_special_tokens=False)
)
rust_output = tokenizer_r.prepare_for_model(
tokenizer_r.encode(string_sequence, add_special_tokens=False)
)
for key in python_output:
self.assertEqual(python_output[key], rust_output[key])
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
r_output = tokenizer_r.encode("Hey this is a <special> token")
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
p_output = tokenizer_p.encode("Hey this is a <special> token")
cr_output = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file:
tokenizer_config = json.load(json_file)
special_tokens_map["additional_special_tokens"] = ["an_additional_special_token"]
tokenizer_config["additional_special_tokens"] = ["an_additional_special_token"]
with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile:
json.dump(tokenizer_config, outfile)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(
tmp_dir,
)
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens
)
self.assertIn("an_additional_special_token", tokenizer_without_change_in_init.get_vocab())
self.assertEqual(
["an_additional_special_token"],
tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])
),
)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
new_added_tokens = [AddedToken("a_new_additional_special_token", lstrip=True)]
tokenizer = tokenizer_class.from_pretrained(
tmp_dir,
additional_special_tokens=new_added_tokens,
)
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"],
tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])
),
)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
(
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}"
),
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
# Test we can use the new tokenizer with something not seen during training
inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."])
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "This is the first sentence"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_tokenizer_mismatch_warning(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
with self.assertLogs("transformers", level="WARNING") as cm:
try:
if self.tokenizer_class == BertTokenizer:
AlbertTokenizer.from_pretrained(pretrained_name)
else:
BertTokenizer.from_pretrained(pretrained_name)
except (TypeError, AttributeError):
# Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned,
# here we just check that the warning has been logged before the error is raised
pass
finally:
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function is called from."
)
)
try:
if self.rust_tokenizer_class == BertTokenizerFast:
AlbertTokenizerFast.from_pretrained(pretrained_name)
else:
BertTokenizerFast.from_pretrained(pretrained_name)
except (TypeError, AttributeError):
# Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned,
# here we just check that the warning has been logged before the error is raised
pass
finally:
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function is called from."
)
)
@require_torch
def test_saving_tokenizer_trainer(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
with tempfile.TemporaryDirectory() as tmp_dir:
# Save the fast tokenizer files in a temporary directory
tokenizer_old = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True)
tokenizer_old.save_pretrained(tmp_dir, legacy_format=False) # save only fast version
# Initialize toy model for the trainer
model = nn.Module()
# Load tokenizer from a folder without legacy files
tokenizer = self.rust_tokenizer_class.from_pretrained(tmp_dir)
training_args = TrainingArguments(output_dir=tmp_dir, do_train=True, no_cuda=True)
trainer = Trainer(model=model, args=training_args, tokenizer=tokenizer)
# Should not raise an error
trainer.save_model(os.path.join(tmp_dir, "checkpoint"))
self.assertIn("tokenizer.json", os.listdir(os.path.join(tmp_dir, "checkpoint")))
@is_staging_test
class TokenizerPushToHubTester(unittest.TestCase):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def setUpClass(cls):
cls._api = HfApi(endpoint=ENDPOINT_STAGING)
cls._token = cls._api.login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
try:
cls._api.delete_repo(token=cls._token, name="test-tokenizer")
except HTTPError:
pass
try:
cls._api.delete_repo(token=cls._token, name="test-tokenizer-org", organization="valid_org")
except HTTPError:
pass
def test_push_to_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
tokenizer.save_pretrained(
os.path.join(tmp_dir, "test-tokenizer"), push_to_hub=True, use_auth_token=self._token
)
new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
def test_push_to_hub_in_organization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
tokenizer.save_pretrained(
os.path.join(tmp_dir, "test-tokenizer-org"),
push_to_hub=True,
use_auth_token=self._token,
organization="valid_org",
)
new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
class TrieTest(unittest.TestCase):
def test_trie(self):
trie = Trie()
trie.add("Hello 友達")
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}})
trie.add("Hello")
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}})
def test_trie_split(self):
trie = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"])
trie.add("[CLS]")
trie.add("extra_id_1")
trie.add("extra_id_100")
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"])
def test_trie_single(self):
trie = Trie()
trie.add("A")
self.assertEqual(trie.split("ABC"), ["A", "BC"])
self.assertEqual(trie.split("BCA"), ["BC", "A"])
def test_trie_final(self):
trie = Trie()
trie.add("TOKEN]")
trie.add("[SPECIAL_TOKEN]")
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"])
|
py
|
1a58f5360574e681e9f43391027b6e6934e5cad1
|
from passrotate.provider import Provider, ProviderOption, PromptType, register_provider
from passrotate.forms import get_form
import requests
from urllib.parse import urlparse
class Twitter(Provider):
"""
[twitter.com]
username=Your Twitter username
"""
name = "Twitter"
domains = [
"twitter.com",
"m.twitter.com"
]
options = {
"username": ProviderOption(str, "Your Twitter username")
}
def __init__(self, options):
self.username = options["username"]
def prepare(self, old_password):
self._session = requests.Session()
r = self._session.get("https://mobile.twitter.com/login")
tk = self._session.cookies.get("_mb_tk")
if not tk or r.status_code != 200:
return False
r = self._session.post("https://mobile.twitter.com/sessions", data={
"authenticity_token": tk,
"session[username_or_email]": self.username,
"session[password]": old_password,
"remember_me": 0,
"wfa": 1,
"redirect_after_login": "/home"
})
url = urlparse(r.url)
if url.path == "/login/error":
raise Exception("Current password for Twitter is incorrect")
if url.path == "/account/locked":
raise Exception("Twitter has locked us out of further login attempts. Wait 60 minutes and try again.")
while url.path == "/account/login_verification":
data = get_form(r.text)
challenge_type = data.get("challenge_type")
if challenge_type == "Sms":
response = self.prompt("Enter your SMS authorization code", PromptType.sms)
else:
raise Exception("Unsupported two-factor method '{}'".format(challenge_type))
data.update({ "challenge_response": response })
r = self._session.post(
"https://mobile.twitter.com/account/login_verification",
data=data)
url = urlparse(r.url)
r = self._session.get("https://twitter.com")
r = self._session.get("https://twitter.com/settings/password")
self._form = get_form(r.text, id="password-form")
def execute(self, old_password, new_password):
self._form.update({
"current_password": old_password,
"user_password": new_password,
"user_password_confirmation": new_password,
})
r = self._session.post("https://twitter.com/settings/passwords/update",
data=self._form, headers={
"origin": "https://twitter.com",
"referer": "https://twitter.com/settings/password"
})
register_provider(Twitter)
|
py
|
1a58f6763eb1ef71b85c8d4ef307f06cecb5b450
|
# coding: utf-8
import copy
import numpy as np
from flearn.common.distiller import DFDistiller, KDLoss
from .strategy import ParentStrategy
from .utils import convert_to_tensor
class DF(ParentStrategy):
"""
Ensemble distillation for robust model fusion in federated learning
[1] Lin T, Kong L, Stich S U, et al. Ensemble distillation for robust model fusion in federated learning[J]. arXiv preprint arXiv:2006.07242, 2020.
"""
def __init__(self, model_base, strategy):
super().__init__(strategy)
self.model_base = model_base
def server_post_processing(self, ensemble_params_lst, ensemble_params, **kwargs):
w_glob = convert_to_tensor(ensemble_params["w_glob"])
agg_weight_lst, w_local_lst = self.server_pre_processing(ensemble_params_lst)
teacher_lst = []
for w_local in w_local_lst:
self.model_base.load_state_dict(convert_to_tensor(w_local))
teacher_lst.append(copy.deepcopy(self.model_base))
self.model_base.load_state_dict(w_glob)
student = copy.deepcopy(self.model_base)
kd_loader, device = kwargs.pop("kd_loader"), kwargs.pop("device")
temperature = kwargs.pop("T")
distiller = DFDistiller(
kd_loader,
device,
kd_loss=KDLoss(temperature),
)
molecular = np.sum(agg_weight_lst)
weight_lst = [w / molecular for w in agg_weight_lst]
# agg_weight_lst:应该依照每个模型在验证集上的性能来进行分配
ensemble_params["w_glob"] = distiller.multi(
teacher_lst, student, kwargs.pop("method"), weight_lst=weight_lst, **kwargs
)
return ensemble_params
def server(self, ensemble_params_lst, round_, **kwargs):
"""
kwargs: dict
{
"lr": 学习率,
"T": 蒸馏超参,温度
"epoch": 蒸馏训练轮数
"method": 多个教师蒸馏一个学习的方法,avg_logits, avg_losses
"kd_loader": 蒸馏数据集,仅需输入,无需标签
}
"""
ensemble_params = super().server(ensemble_params_lst, round_)
return self.server_post_processing(
ensemble_params_lst, ensemble_params, **kwargs
)
|
py
|
1a58f70d815552637c90e60049ebbd6f5140b69e
|
import arcpy
#from arcpy.sa import * #If anything is ever not defined see if it is part of arcpy.as
import glob
import os
import sys
import csv
import traceback
import numpy
##from scipy import stats
try:
import mysql.connector
from mysql.connector import errorcode
except ImportError:
print('No MySQL support. Use SQLite database or install MySQL')
import sqlite3
import datetime
import json
import shutil
import subprocess
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
##Checkout needed Extentions
class SpatialLicenseError(Exception):
pass
class GeostatsLicenseError(Exception):
pass
try:
if arcpy.CheckExtension('Spatial') == "Available":
arcpy.CheckOutExtension('Spatial')
else:
raise SpatialLicenseError
except SpatialLicenseError:
arcpy.AddError("Spatial License is unavailable")
sys.exit(1) ## Terminate script
try:
if arcpy.CheckExtension('Geostats') == "Available":
arcpy.CheckOutExtension('Geostats')
else:
raise GeostatsLicenseError
except GeostatsLicenseError:
arcpy.AddError("Geostats License is unavailable")
sys.exit(1) ## Terminate script
data = {'sql_ph': '%s'}
##Set up workspaces
scratchWS = arcpy.env.scratchFolder
arcpy.env.workspace = scratchWS
arcpy.env.scratchWorkspace = scratchWS
arcpy.AddMessage('Scratch Workspace: ' + scratchWS)
scratchGDB = arcpy.env.scratchGDB
arcpy.env.overwriteOutput = True
##Add workspace to data dict
data['scratch_ws'] = scratchWS
data['scratch_gdb'] = scratchGDB
date_now = datetime.datetime.now()
s_now = date_now.strftime('%Y%d%b_%H%M%S')
os.makedirs(scratchWS + '/Output_' + s_now)
outFolder = '{0}/Output_{1}'.format(scratchWS, s_now)
data['out_folder'] = outFolder
##arcpy.AddMessage('Output Folder: ' + outFolder)
##Define Functions
def roundTime(dt, roundTo=60):
seconds = (dt - dt.min).seconds
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
def selectWatershed(watershed):
''' Initialize all Relevant Data from the Geodatabase based on chosen watershed '''
stations = '' # Feature class of station meta data/locations
elev_tiff = '' # Needed for wind speed. Cannot have NoData cells
dem = '' # Needed for almost all/elev_tiff can also be used for this
view_factor = '' # Needed for Thermal radiation
search_radius = ''
db = ''
if watershed == 'Johnston Draw':
arcpy.AddMessage('Johnston Draw Watershed')
base_path = r'C:\ReynoldsCreek\Input_Data'
stations = r'{0}\Input_Data.gdb\station_locations_jd'.format(base_path)
stations_soil = r'{0}\Input_Data.gdb\station_locations_jd'.format(base_path)
elev_tiff = r'{0}\jd_elevation_filled.tif'.format(base_path)
dem = elev_tiff
view_factor = r'{0}\jd_view_factor.tif'.format(base_path)
search_radius = '1000'
db = '{0}/jd_data.db'.format(base_path)
data['sql_ph'] = '?'
elif watershed == 'Reynolds Creek':
arcpy.AddMessage('Reynolds Creek Watershed')
base_path = r'C:\ReynoldsCreek\Input_Data'
stations = r'{0}\Input_Data.gdb\station_locations_rc'.format(base_path)
stations_soil = r'{0}\Input_Data.gdb\station_locations_rc_soil'.format(base_path)
elev_tiff = r'{0}\rc_elev_filled.tif'.format(base_path)
dem = elev_tiff
view_factor = r'{0}\rc_view_factor.tif'.format(base_path)
search_radius = '10000'
db = r'{0}\rc_data.db'.format(base_path)
data['sql_ph'] = '?'
elif watershed == 'Valles Caldera':
arcpy.AddMessage('Valles Caldera Watershed')
base_path = r'C:\ReynoldsCreek\Input_Data'
stations = r'{0}\Input_Data.gdb\station_locations_vc'.format(base_path)
stations_soil = r'{0}\Input_Data.gdb\station_locations_vc'.format(base_path)
elev_tiff = r'{0}\vc_elev_filled.tif'.format(base_path)
dem = elev_tiff
view_factor = ''
search_radius = '21500'
db = r'{0}\vc_data.db'.format(base_path)
data['sql_ph'] = '?'
##elif watershed == 'TESTING':
## arcpy.AddMessage('Testing watershed')
## file_path = os.path.dirname(os.path.abspath(__file__))
## base_path = r'{0}\demo_data'.format(file_path)
## stations = '{0}\demo_sites.shp'.format(base_path)
## elev_tiff = '{0}\demo_data.tif'.format(base_path)
## dem = '{0}\demo_data.tif'.format(base_path)
## view_factor = '{0}\demo_data_vf.tif'.format(base_path)
## db = '{0}\demo.db'.format(base_path)
## search_radius = '1000'
## data['sql_ph'] = '?'
return stations, stations_soil, elev_tiff, dem, view_factor, search_radius, db
def ConnectDB(db, username = 'root', passwd = ''):
'''connect to MySQL database'''
if len(db.split('.')) == 1:
try:
cnx = mysql.connector.connect(user=username, password=passwd,
host='localhost',
database=db,
buffered=True)
return cnx
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
arcpy.AddMessage('Something is wrong with your user name or password')
elif err.errno == errorcode.ER_BAD_DB_ERROR:
arcpy.AddMessage('Database does not exist')
else:
arcpy.AddMessage(err)
else:
arcpy.AddMessage('Connection successful')
## Connect to sqlite3 database
elif db.split('.')[-1] == 'db':
cnx = sqlite3.connect(db)
return cnx
def ParameterList(param_dict, rows, table_type):
'''Append all data to the end of the parameter list'''
if table_type == 'climate':
for row in rows:
if data['watershed'] == 'Johnston Draw' or data['watershed'] == 'TESTING':
param_dict['site_key'].append(row[0])
param_dict['date_time'].append(row[1])
param_dict['air_temperature'].append(row[8])
param_dict['vapor_pressure'].append(row[10])
param_dict['dew_point'].append(row[11])
param_dict['solar_radiation'].append(row[12])
param_dict['wind_speed'].append(row[13])
param_dict['wind_direction'].append(row[14])
elif data['watershed'] == 'Reynolds Creek' or data['watershed'] == 'Valles Caldera':
param_dict['site_key'].append(row[0])
param_dict['date_time'].append(row[1])
param_dict['air_temperature'].append(row[9])
param_dict['vapor_pressure'].append(row[11])
param_dict['dew_point'].append(row[12])
param_dict['solar_radiation'].append(row[13])
param_dict['wind_speed'].append(row[14])
param_dict['wind_direction'].append(row[15])
elif table_type == 'precip':
for row in rows:
if data['watershed'] == 'Johnston Draw' or data['watershed'] == 'TESTING':
param_dict['site_key'].append(row[0])
param_dict['ppts'].append(row[2])
param_dict['pptu'].append(row[3])
param_dict['ppta'].append(row[4])
elif data['watershed'] == 'Reynolds Creek' or data['watershed'] == 'Valles Caldera':
param_dict['site_key'].append(row[0])
param_dict['ppts'].append(row[2])
param_dict['pptu'].append(row[3])
param_dict['ppta'].append(row[4])
elif table_type == 'soil_temperature':
for row in rows:
if data['watershed'] == 'Johnston Draw':
param_dict['site_key'].append(row[0])
param_dict['stm005'].append(row[3]) # column 3 is soil temp at 5 cm depth
if data['watershed'] == 'Reynolds Creek':
param_dict['site_key'].append(row[0])
param_dict['stm005'].append(row[4]) # column 3 is soil temp at 5 cm depth
elif table_type == 'snow_depth':
for row in rows:
if data['watershed'] == 'Johnston Draw' or data['watershed'] == 'Reynolds Creek' or data['watershed'] == 'TESTING':
param_dict['site_key'].append(row[0])
param_dict['zs'].append(row[-1])
##arcpy.AddMessage(param_dict)
return param_dict
def BuildClimateTable(params, num):
arcpy.management.CreateTable(data['scratch_gdb'], 'climate_table')
table = data['scratch_gdb'] + '/climate_table'
keys = [] # Holds data types collected (wind speed, air temperature, etc) to add to table
for key in params:
if key == 'site_key':
ftype = 'TEXT'
elif key == 'date_time':
ftype = 'DATE'
else:
ftype = 'FLOAT'
arcpy.management.AddField(in_table = table,
field_name = key,
field_type = ftype)
keys.append(key)
in_cursor = arcpy.InsertCursor(table)
#print keys
#print params
#Add data from rows into climate table
for j in range(0, num):
row = in_cursor.newRow()
for k in range(0, len(keys)):
# keys[x] = site_key, air_temperature, etc.
# params[keys[k][j] = value (ie -2.5)
row.setValue(keys[k], params[ keys[k] ][j])
in_cursor.insertRow(row)
del in_cursor
## del row
return table
def DataTable(parameter, data_table, multi_fields = []):
''' Create paramater scratch table to be used for interpolation '''
scratch_data = []
temp_table1 = parameter + '_table'
temp_table2 = 'in_memory/' + parameter + '_table2'
##===============================================================
##
## These checks really need some work.
##
##===============================================================
if len(multi_fields) == 2: #Simplify these checks somehow
#Thermal radation stats_fields
# format - [['air_temperature', 'MEAN'], ['vapor_pressure', 'MEAN']]
stats_fields = []
clause = '{0} > -500 AND {1} > -500'.format(multi_fields[0], multi_fields[1])
for l in multi_fields:
stats_fields.append([l, 'MEAN'])
elif len(multi_fields) == 3:
# Wind speed
stats_fields = []
clause = '{0} > -500 AND {1} > -500 AND {2} > -500'.format(multi_fields[0], multi_fields[1], multi_fields[2])
for l in multi_fields:
stats_fields.append([l, 'MEAN'])
else: # regular parameters
stats_fields = parameter + ' MEAN'
clause = parameter + ' > -500'
# Make new temporary table
out = arcpy.management.MakeTableView(in_table = data_table,
out_view = temp_table1,
where_clause = clause)
scratch_data.append(temp_table1)
out_mem = arcpy.analysis.Statistics(in_table = temp_table1,
out_table = temp_table2,
statistics_fields = stats_fields,
case_field = 'site_key')
# Copy stats to tempStations feature class
if parameter == 'stm005':
###====================================
###
### Soil temperature feature class already has elevation data for all feature classes
###
###====================================
arcpy.env.extent = data['station_locations_soil']
temp_stations = arcpy.management.CopyFeatures(in_features = data['station_locations_soil'],
out_feature_class = data['scratch_gdb'] + '/tempStations')
else:
temp_stations = arcpy.management.CopyFeatures(in_features = data['fc_stations_elev'],
out_feature_class = data['scratch_gdb'] + '/tempStations')
# Join stats to temp stations feature class
if len(multi_fields) > 0: #Thermal radiation and wind speed
tr_fields = []
for l in multi_fields:
tr_fields.append('MEAN_' + l)
arcpy.management.JoinField(in_data = temp_stations,
in_field = 'Site_key',
join_table = temp_table2,
join_field = 'site_key',
fields = tr_fields)
else: # Regular paramters
arcpy.management.JoinField(in_data = temp_stations,
in_field = 'Site_Key',
join_table = temp_table2,
join_field = 'site_key',
fields = 'MEAN_' + parameter)
# Delete rows from feature class that have negative or null elevations
cursor = arcpy.UpdateCursor(temp_stations)
if parameter == 'stm005':
arcpy.AddMessage('Soil temperature')
arcpy.env.extent = data['station_locations_soil']
else:
for row in cursor:
if (row.getValue('RASTERVALU') < 0 or
row.getValue('RASTERVALU') == 'None' or
row.getValue('RASTERVALU') is None ):
cursor.deleteRow(row)
else:
row.setValue('RASTERVALU', round(row.getValue('RASTERVALU'), 2))
cursor.updateRow(row)
del cursor
del row
# Delete rows from feature class that have null values for paramter
cursor = arcpy.UpdateCursor(temp_stations)
if len(multi_fields) == 2: #thermal Radiation check
for row in cursor:
val0 = 'MEAN_' + multi_fields[0]
val1 = 'MEAN_' + multi_fields[1]
if row.isNull(val0) or row.isNull(val1):
cursor.deleteRow(row)
if len(multi_fields) == 3: # Wind speed
for row in cursor:
val0 = 'MEAN_' + multi_fields[0]
val1 = 'MEAN_' + multi_fields[1]
val2 = 'MEAN_' + multi_fields[2]
if row.isNull(val0) or row.isNull(val1) or row.isNull(val2):
cursor.deleteRow(row)
else:
for row in cursor:
if row.isNull('MEAN_' + parameter):
cursor.deleteRow(row)
else:
row.setValue('MEAN_' + parameter, round(row.getValue('MEAN_' + parameter), 2))
cursor.updateRow(row)
del cursor
del row
DeleteScratchData(scratch_data)
return temp_stations
def DetrendedMethod(parameter, data_table, date_stamp, out_ras):
arcpy.AddMessage('Detrended Kriging')
resid_raster = data['scratch_gdb'] + '/' + parameter
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_ras, date_stamp, data['file_format'])
# Add unique ID field to temporary data table for use in OLS function
arcpy.management.AddField(in_table = data_table,
field_name = 'Unique_ID',
field_type = 'SHORT',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
arcpy.management.CalculateField(in_table = data_table,
field = 'Unique_ID',
expression = '!OBJECTID!',
expression_type = 'PYTHON_9.3')
#Run ordinary least squares of temporary data table
coef_table = arcpy.management.CreateTable(data['scratch_gdb'], 'coef_table_' + parameter)
ols = arcpy.stats.OrdinaryLeastSquares(Input_Feature_Class = data_table,
Unique_ID_Field = 'Unique_ID',
Output_Feature_Class = 'in_memory/fcResid',
Dependent_Variable = 'MEAN_' + parameter,
Explanatory_Variables = 'RASTERVALU',
Coefficient_Output_Table = coef_table)
intercept = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[0]
slope = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[1]
#Calculate residuals and add them to temporary data table
arcpy.management.AddField(in_table = data_table,
field_name = 'residual',
field_type = 'DOUBLE',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
cursor = arcpy.UpdateCursor(data_table)
for row in cursor:
row_math = row.getValue('MEAN_' + parameter) - ((slope * row.getValue('RASTERVALU')) + intercept)
row.setValue('residual', row_math)
cursor.updateRow(row)
del cursor
del row
#Run ordinary kriging on residuals
#Dewpoint/Vapor pressure kriging model
k_model = KrigingModelOrdinary('SPHERICAL', 460, 3686, .1214, .2192)
#Air temp kriging model
#k_model = KrigingModelOrdinary('LINEAR', 37.061494)
radius = RadiusFixed(10000, 1)
outKrig = arcpy.sa.Kriging(in_point_features = data_table,
z_field = 'residual',
kriging_model = k_model,
cell_size = data['output_cell_size'],
search_radius = radius)
outKrig.save(resid_raster)
return_raster = arcpy.Raster(resid_raster) + (arcpy.Raster(data['dem']) * slope + intercept)
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(return_raster, out_raster_name)
else:
return_raster.save(out_raster_name)
#Delete scratch/residual data.
del outKrig
del k_model
del radius
arcpy.management.Delete(resid_raster)
return out_raster_name
def IDWMethod(parameter, data_table, date_stamp, out_ras):
arcpy.AddMessage('Inverse Distance Weighted')
scratch_raster = '{0}/{1}'.format(data['scratch_gdb'], parameter)
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_ras, date_stamp, data['file_format'])
idw_out = arcpy.sa.Idw(in_point_features = data_table,
z_field = 'MEAN_' + parameter,
cell_size = data['elev_tiff'],
power = 2)
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(idw_out, out_raster_name)
else:
idw_out.save(out_raster_name)
arcpy.AddMessage('Out Raster {0}'.format(out_raster_name))
return out_raster_name
def EBKMethod(parameter, data_table, date_stamp, out_ras):
arcpy.AddMessage('Empirical Bayesian Kriging')
scratch_raster = '{0}/{1}'.format(data['scratch_gdb'], parameter)
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_ras, date_stamp, data['file_format'])
#arcpy.AddMessage(data_table)
arcpy.ga.EmpiricalBayesianKriging(in_features = data_table,
z_field = 'MEAN_' + parameter,
out_raster = scratch_raster,
cell_size = data['output_cell_size'],
transformation_type = 'EMPIRICAL',
max_local_points = '100',
overlap_factor = '1',
number_semivariograms = '100',
search_neighborhood = 'NBRTYPE=SmoothCircular RADIUS={0} SMOOTH_FACTOR=0.2'.format(data['search_radius']),
output_type = 'PREDICTION',
quantile_value = '0.5',
threshold_type = 'EXCEED',
semivariogram_model_type='WHITTLE_DETRENDED')
#Mask output to size of original DEM
## For some reason this is no longer a problem.
## Extract By Mask does not run well on newer versions of arcmap so it is not used.
#outExtract = ExtractByMask(scratch_raster, data['dem'])
outExtract = arcpy.Raster(scratch_raster)
if(data['file_format'] =='ASC'):
arcpy.conversion.RasterToASCII(outExtract, out_raster_name)
else:
outExtract.save(out_raster_name)
arcpy.management.Delete(scratch_raster)
return out_raster_name
def CombinedMethod(parameter, data_table, date_stamp, out_ras):
arcpy.AddMessage('Combined Method')
scratch_raster = '{0}/{1}'.format(data['scratch_gdb'], parameter)
resid_raster = '{0}/{1}_{2}'.format(data['scratch_gdb'], parameter, 'residual')
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_ras, date_stamp, data['file_format'])
# Add unique ID field to temporary data table for use in OLS function
arcpy.management.AddField(in_table = data_table,
field_name = 'Unique_ID',
field_type = 'SHORT',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
arcpy.management.CalculateField(in_table = data_table,
field = 'Unique_ID',
expression = '!OBJECTID!',
expression_type = 'PYTHON_9.3')
#Run ordinary least squares of temporary data table
coef_table = arcpy.management.CreateTable(data['scratch_gdb'], 'coef_table_' + parameter)
ols = arcpy.stats.OrdinaryLeastSquares(Input_Feature_Class = data_table,
Unique_ID_Field = 'Unique_ID',
Output_Feature_Class = 'in_memory/fcResid',
Dependent_Variable = 'MEAN_' + parameter,
Explanatory_Variables = 'RASTERVALU',
Coefficient_Output_Table = coef_table)
intercept = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[0]
slope = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[1]
#Calculate residuals and add them to temporary data table
arcpy.management.AddField(in_table = data_table,
field_name = 'residual',
field_type = 'DOUBLE',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
cursor = arcpy.UpdateCursor(data_table)
for row in cursor:
row_math = row.getValue('MEAN_' + parameter) - ((slope * row.getValue('RASTERVALU')) + intercept)
row.setValue('residual', row_math)
cursor.updateRow(row)
del cursor
del row
arcpy.ga.EmpiricalBayesianKriging(in_features = data_table,
z_field = 'MEAN_' + parameter,
out_raster = resid_raster,
cell_size = data['output_cell_size'],
transformation_type = 'EMPIRICAL',
max_local_points = '100',
overlap_factor = '1',
number_semivariograms = '100',
search_neighborhood = 'NBRTYPE=SmoothCircular RADIUS=10000.9518700025 SMOOTH_FACTOR=0.2',
output_type = 'PREDICTION',
quantile_value = '0.5',
threshold_type = 'EXCEED',
semivariogram_model_type='WHITTLE_DETRENDED')
out_extract = arcpy.sa.ExtractByMask(resid_raster, data['dem'])
out_extract.save(scratch_raster)
#Add back elevation trends and save final raster
output_raster = arcpy.Raster(scratch_raster) + (arcpy.Raster(data['dem']) * slope + intercept)
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(output_raster, out_raster_name)
else:
output_raster.save(out_raster_name)
arcpy.management.Delete(scratch_raster)
arcpy.management.Delete(resid_raster)
return out_raster_name
def Interpolate(parameter, scratch_table, date_stamp, out_name):
'''Interpolate using the chosen method'''
raster = ''
if data['kriging_method'] == 'Detrended':
raster = DetrendedMethod(parameter, scratch_table, date_stamp, out_name)
#raster.save(data['out_folder'] + '/' + param + '.tif')
elif data['kriging_method'] == 'Combined':
raster = CombinedMethod(parameter, scratch_table, date_stamp, out_name)
elif data['kriging_method'] == 'IDW':
raster = IDWMethod(parameter, scratch_table, date_stamp, out_name)
else:
try:
raster = EBKMethod(parameter, scratch_table, date_stamp, out_name)
except arcpy.ExecuteError:
arcpy.AddMessage(arcpy.GetMessages(2))
return raster
def OLS(parameter, scratch_table, date_stamp, out_name):
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_name, date_stamp, data['file_format'])
#Run ordinary least squares on scratch_table
coef_table = arcpy.management.CreateTable(data['scratch_gdb'], 'coef_table')
if parameter == 'stm005':
exp_var = 'Elevation'
else:
exp_var = 'RASTERVALU'
arcpy.management.AddField(in_table = scratch_table,
field_name = 'Unique_ID',
field_type = 'SHORT',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
arcpy.management.CalculateField(in_table = scratch_table,
field = 'Unique_ID',
expression = '!OBJECTID!',
expression_type = 'PYTHON_9.3')
ols = arcpy.stats.OrdinaryLeastSquares(Input_Feature_Class = scratch_table,
Unique_ID_Field = 'Unique_ID',
Output_Feature_Class = 'in_memory/fcResid',
Dependent_Variable = 'MEAN_' + parameter,
Explanatory_Variables = exp_var,
Coefficient_Output_Table = coef_table)
intercept = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[0]
slope = list((row.getValue('Coef') for row in arcpy.SearchCursor(coef_table, fields='Coef')))[1]
arcpy.env.extent = data['ext_elev']
return_raster = arcpy.Raster(data['dem']) * slope + intercept
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(return_raster, out_raster_name)
else:
return_raster.save(out_raster_name)
return out_raster_name
def AirTemperature(clim_tab, date_stamp):
arcpy.AddMessage('Air Temperature')
param = 'air_temperature'
out_raster_title = 'T_a'
scratch_table = DataTable(param, clim_tab)
#arcpy.management.CopyRows(scratch_table, data['scratch_gdb'] + '/temp_ta')
#Kriging
raster = Interpolate(param, scratch_table, date_stamp, out_raster_title)
#Delete tempStations when done.
#arcpy.management.Delete(scratch_table)
return raster
def DewPoint(clim_tab, date_stamp):
arcpy.AddMessage('Dewpoint Temperature')
param = 'dew_point'
scratch_table = DataTable(param, clim_tab)
out_raster_title = 'T_pp'
#arcpy.management.CopyRows(scratch_table, data['scratch_gdb'] + '/temp_dp')
#Kriging
raster = Interpolate(param, scratch_table, date_stamp, out_raster_title)
#Delete tempStations when done
arcpy.management.Delete(scratch_table)
return raster
def PercentSnow(dew_point, date_stamp):
inRas = arcpy.Raster(dew_point)
outRas = '{0}/percent_snow_{1}.{2}'.format(data['out_folder'], date_stamp, data['file_format'])
out_snow_ras = arcpy.sa.Con(inRas < -5.0, 1.0,
arcpy.sa.Con((inRas >= -5.0) & (inRas < -3.0), 1.0,
arcpy.sa.Con((inRas >= -3.0) & (inRas < -1.5), 1.0,
arcpy.sa.Con((inRas >= -1.5) & (inRas < -0.5), 1.0,
arcpy.sa.Con((inRas >= -0.5) & (inRas < 0.0), 0.75,
arcpy.sa.Con((inRas >= 0.0) & (inRas < 0.5), 0.25,
arcpy.sa.Con(inRas >= 0.5,0.0)))))))
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(out_snow_ras, outRas)
else:
arcpy.management.CopyRaster(in_raster = out_snow_ras,
out_rasterdataset=outRas,
pixel_type = '32_BIT_FLOAT')
return outRas
def SnowDensity(dew_point, date_stamp):
inRas = arcpy.Raster(dew_point)
outRas = '{0}/rho_snow_{1}.{2}'.format(data['out_folder'], date_stamp, data['file_format'])
out_snow_density = arcpy.sa.Con(inRas < -5.0, 1.0,
arcpy.sa.Con((inRas >= -5.0) & (inRas < -3.0), 1.0,
arcpy.sa.Con((inRas >= -3.0) & (inRas < -1.5), 1.0,
arcpy.sa.Con((inRas >= -1.5) & (inRas < -0.5), 1.0,
arcpy.sa.Con((inRas >= -0.5) & (inRas < 0.0), 0.75,
arcpy.sa.Con((inRas >= 0.0) & (inRas < 0.5), 0.25,
arcpy.sa.Con(inRas >= 0.5,0.0)))))))
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(out_snow_density, outRas)
else:
arcpy.management.CopyRaster(in_raster = out_snow_density,
out_rasterdataset=outRas,
pixel_type = '32_BIT_FLOAT')
return outRas
def VaporPressure(clim_tab, date_stamp):
arcpy.AddMessage('Vapor Pressure')
param = 'vapor_pressure'
scratch_table = DataTable(param, clim_tab)
out_raster_title = 'e_a'
#arcpy.management.CopyRows(scratch_table, data['scratch_gdb'] + '/temp_ta')
#Kriging
raster = Interpolate(param, scratch_table, date_stamp, out_raster_title)
#Delete tempStations when done.
arcpy.management.Delete(scratch_table)
return raster
def SolarRadiation(clim_tab, date_stamp, date_time, time_step):
arcpy.AddMessage('Solar Radiation')
scratch_data = []
param = 'solar_radiation'
out_raster_title = 'S_n'
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_raster_title, date_stamp, data['file_format'])
#set up area solar radiation tool parameters and run the tool
#Set up time parameters
day_of_year = date_time.timetuple().tm_yday
i_sr_start = int(date_time.strftime('%H'))
i_sr_end = i_sr_start + data['time_step']
in_twd = TimeWithinDay(day_of_year, i_sr_start, i_sr_end)
sky_size = 200
try:
out_global_radiation = arcpy.sa.AreaSolarRadiation(data['dem'], '', sky_size, in_twd)
#out_global_radiation = out_global_radiation / data['time_step']
except arcpy.ExecuteError:
msgs = arcpy.GetMessages(2)
#arcpy.AddMessage(msgs)
if 'Failed to open raster dataset' in msgs or 'Error in creating sun map' in msgs:
arcpy.AddMessage("Skip night hours")
return
#Set up scratch data table
scratch_table = DataTable(param, clim_tab)
scratch_data.append(scratch_table)
glob_rad_raster = data['scratch_gdb'] + '/glob_rad_raster'
sim_points = data['scratch_gdb'] + '/simPoints'
scratch_data.append(glob_rad_raster)
scratch_data.append(sim_points)
#Correct global radiation raster for cloud conditions
#Extract simulated global radiation values to station location feature class
arcpy.management.AlterField(in_table = scratch_table,
field = 'RASTERVALU',
new_field_name = 'Elevation')
arcpy.sa.ExtractValuesToPoints(in_point_features = scratch_table,
in_raster = out_global_radiation,
out_point_features = sim_points,
interpolate_values = 'NONE',
add_attributes = 'VALUE_ONLY')
arcpy.management.AddField(in_table = sim_points,
field_name = 'ratio',
field_type = 'FLOAT',
field_is_nullable = 'NULLABLE',
field_is_required = 'NON_REQUIRED')
arcpy.management.CalculateField(in_table = sim_points,
field = 'ratio',
expression = '!MEAN_solar_radiation!/ !RASTERVALU!',
expression_type = 'PYTHON_9.3')
#convert 'ration' field to numpy array
na = arcpy.da.TableToNumPyArray(sim_points, 'ratio')
#calculate average ratio
d_mean_ratio = numpy.mean(na['ratio'])
d_mean_ratio2 = numpy.asscalar(d_mean_ratio)
#multiply simulated raster by average ratio
out_global_radiation_corrected = out_global_radiation * d_mean_ratio2
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(out_global_radiation, out_raster_name)
else:
out_global_radiation_corrected.save(out_raster_name)
arcpy.management.Delete(scratch_table)
return out_raster_name
def ThermalRadiation(clim_tab, date_stamp, in_air, in_vap, in_surface_temp):
arcpy.AddMessage('Thermal Radiation')
param = 'thermal_radiation'
out_raster_title = 'I_lw'
out_file = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_raster_title, date_stamp, data['file_format'])
z = data['dem']
vf = data['view_factor']
T_a = in_air
vp = in_vap
fields = ['air_temperature', 'vapor_pressure']
scratch_table = DataTable(param, clim_tab, multi_fields=fields)
P_m = 0.0 # Reference Air Pressure (Vapor pressure)
T_m = 0.0 # Reference Air Temp
z_m = 0.0 # Reference elevation
T_s = in_surface_temp
cursor = arcpy.UpdateCursor(scratch_table)
for row in cursor:
z_m = row.getValue('RASTERVALU')
P_m = row.getValue('MEAN_vapor_pressure')
T_m = row.getValue('MEAN_air_temperature')
cursor.deleteRow(row)
break
del cursor
del row
arcpy.AddMessage("P_m: " + str(P_m))
arcpy.AddMessage("T_m: " + str(T_m))
arcpy.AddMessage("z_m: " + str(z_m))
arcpy.AddMessage("T_s: " + str(T_s))
# Constants
g = 9.8 # Gravity
m = 0.0289 # Molecular Weight of dry air
R = 8.3143 # Gas constant
sigma = 5.6697 * 10 ** -8 # Stefan-Boltzmann constant
epsilon_s = 0.95 # Surface emissivity
gamma = -0.006 # temperature lapse rate (K m^-1)
# convert temperature parameters to Kelvin
T_m = T_m + 274.15
T_s = T_s + 274.15
T_a = arcpy.sa.Float(Raster(T_a) + 274.15)
# convert vapor pressure to mb
P_m = P_m * 0.01
vp = arcpy.sa.Float(Raster(vp) * 0.01)
#Correct air temperature and vapor pressure rasters (Marks and Dozier (1979), pg. 164)
#(4) corrected air temperature
T_prime = T_a + (0.0065 * arcpy.Raster(z))
#saturated vapor pressure from original air temperature (T_a)
e_sa = arcpy.sa.Float(6.11 * 10**((7.5*arcpy.sa.Float(T_a))/(237.3 + arcpy.sa.Float(T_a))))
#saturated vapor pressure from corrected air temperature (T_prime)
e_sprime = arcpy.sa.Float(6.11 * 10**((7.5*arcpy.sa.Float(T_a))/(237.3 + arcpy.sa.Float(T_a))))
rh = arcpy.sa.Float(vp / e_sa) #(5) relative humidity
e_prime = arcpy.sa.Float(rh * e_sprime) #(6) corrected vapor pressure
#Pressure at a given elevation (Marks and Dozier (1979), pg. 168-169)
term1 = ((-g*m)/(R*gamma))
delta_z = arcpy.Raster(z) - z_m
term2 = ((T_m + gamma * delta_z)) / T_m
lnTerm = arcpy.sa.Ln(term2)
expTerm = arcpy.sa.Exp(term1 * lnTerm)
P_a = P_m * expTerm #(10) air pressure
#effective emissivity (Marks and Dozier (1979), pg. 164)
epsilon_a = arcpy.sa.Float((1.24 * (e_prime / T_prime)**(1/7)) * (P_a / 1013.0)) #(7)
#Incoming longwave radiation (Marks and Dozier (1979), pg. 164)
term3 = arcpy.sa.Float((epsilon_a * sigma * (T_a ** 4)) * vf)
term4 = arcpy.sa.Float(epsilon_s * sigma * (T_s ** 4))
term5 = (1 - arcpy.Raster(vf))
output_thermal_radiation = arcpy.sa.Float(term3 + (term4 * term5)) #(9)
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(output_thermal_radiation, out_file)
else:
output_thermal_radiation.save(out_file)
return out_file
def PrecipitationMass(precip_tab, date_stamp):
arcpy.AddMessage('Precipitation mass')
param = 'ppta'
out_raster_title = 'm_pp'
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_raster_title, date_stamp, data['file_format'])
scratch_table = DataTable(param, precip_tab)
if data['watershed'] == 'Johnston Draw':
cursor = arcpy.SearchCursor(scratch_table)
x = []
y = []
for row in cursor:
x.append(row.getValue('RASTERVALU'))
y.append(row.getValue('MEAN_ppta'))
del cursor
del row
A = numpy.vstack([x,numpy.ones(len(x))]).T
slope, intercept = numpy.linalg.lstsq(A, y)[0]
arcpy.AddMessage('Slope {0}, Intercept {1}'.format(slope, intercept))
if slope != 0.0 and intercept != 0.0:
#Create final raster
arcpy.env.extent = data['ext_elev']
raster = (arcpy.Raster(data['dem']) * slope + intercept)
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(raster, out_raster_name)
else:
raster.save(out_raster_name)
return out_raster_name
else:
return
else:
raster = Interpolate(param, scratch_table, date_stamp, out_raster_title)
#Delete tempStations when done
arcpy.management.Delete(scratch_table)
return raster
def SoilTemperature(soil_tab, date_stamp):
arcpy.AddMessage('Soil Temperature')
param = 'stm005'
out_raster_title = 'T_g'
#Create Scratch Table --
# this is different from the rest in that it does not delete no elevation
scratch_table = DataTable(param, soil_tab)
raster = OLS(param, scratch_table, date_stamp, out_raster_title)
arcpy.management.Delete(scratch_table)
return raster
def SnowDepth(snow_tab, date_stamp):
arcpy.AddMessage('Snow depth')
param = 'zs'
out_raster_title = 'zs'
scratch_table = DataTable(param, snow_tab)
cursor = arcpy.SearchCursor(scratch_table)
values = []
for row in cursor:
values.append(row.getValue('MEAN_zs'))
del cursor
del row
average = numpy.mean(values)
count = int(arcpy.management.GetCount(scratch_table).getOutput(0))
if count >= 10 and average > 0:
raster = Interpolate(param, scratch_table, date_stamp, out_raster_title)
else:
if count < 10:
arcpy.AddMessage('Not enough data for snow depth. Try a different time step.')
if average == 0:
arcpy.AddMessage('No snow on the ground. Try a different time step if needed.')
arcpy.management.Delete(scratch_table)
return raster
def SnowCoverTemperature(date_stamp):
arcpy.AddMessage('Upper Layer')
ul_param = 'T_s_0'
avg_param = 'T_s'
ul_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], ul_param, date_stamp, data['file_format'])
avg_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], avg_param, date_stamp, data['file_format'])
if len(data['ul_interp_values']['features']) <= 1:
upper_layer_temperature = -0.0008 * arcpy.Raster(data['dem']) + 0.1053
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(upper_layer_temperature, ul_raster_name)
else:
upper_layer_temperature.save(ul_raster_name)
else:
ls_elevation = []
ls_temperature = []
for rec in data['ul_interp_values']['features']:
ls_elevation.append(rec['attributes']['Elevation'])
ls_density.append(rec['attributes']['Temperature'])
lr_results = stats.linregress(ls_elevation, ls_density)
slope_ul = lr_results[0]
intercept_ul = lr_results[1]
upper_layer_temperature = slope_ul * arcpy.Raster(data['dem']) + intercept_ul
if(data['file_format'] == 'ASC'):
arcpy.conversion.RasterToASCII(upper_layer_temperature, ul_raster_name)
else:
upper_layer_temperature.save(ul_raster_name)
if len(data['ll_interp_values']['features']) <=1:
lower_layer_temperature = -0.0008 * arcpy.Raster(data['dem']) + 1.3056
else:
ls_elevation = []
ls_temperature = []
for rec in data['ll_interp_values']['features']:
ls_elevation.append(rec['attributes']['Elevation'])
ls_temperature.append(rec['attributes']['Temperature'])
lr_results = stats.linregress(ls_elevation, ls_temperature)
slope_ll = lr_results[0]
intercept_ll = lr_results[1]
lower_layer_temperature = slope_ll * arcpy.Raster(data['dem']) + intercept_ll
#average snowcover temperature is the average of the upper and lower layer temperatures
avg_sc_temp = arcpy.sa.CellStatistics([upper_layer_temperature, lower_layer_temperature], 'MEAN', 'NODATA')
if data['file_format'] == 'ASC':
arcpy.conversion.RasterToASCII(avg_sc_temp, avg_raster_name)
else:
avg_sc_temp.save(avg_raster_name)
return ul_raster_name, avg_raster_name
def SnowDensityInterpolation(date_stamp):
arcpy.AddMessage('Snow Density Interpolation')
param = 'rho'
out_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'], param, date_stamp, data['file_format'])
if len(data['density_interp_values']['features']) <= 1:
snow_density_raster = -0.0395 * arcpy.Raster(data['dem']) + 405.26
if data['file_format'] == 'ASC':
arcpy.conversion.RasterToASCII(snow_density_raster, out_raster_name)
else:
snow_density_raster.save(out_raster_name)
else: # This will not work until we get scypy loaded
ls_elevation = []
ls_density = []
for rec in data['density_interp_values']['features']:
ls_elevation.append(rec['attributes']['Elevation'])
ls_density.append(rec['attributes']['Density'])
lr_results = stats.linregress(ls_elevation, ls_density)
slope = lr_results[0]
intercept = lr_results[1]
snow_density_raster = slope * arcpy.Raster(data['dem']) + intercept
snow_density_raster.save(out_raster_name)
return out_raster_name
def Constants(rl, h2o, date_stamp):
arcpy.AddMessage('Constants')
rl_param = 'z_0'
h2o_param = 'h2o_sat'
rl_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'],rl_param,date_stamp, data['file_format'])
h2o_raster_name = '{0}/{1}_{2}.{3}'.format(data['out_folder'],h2o_param,date_stamp, data['file_format'])
desc = arcpy.Describe(data['dem'])
coord_system = desc.spatialReference
rl_constant = CreateConstantRaster(rl, 'FLOAT', data['output_cell_size'])
arcpy.management.DefineProjection(rl_constant, coord_system)
if data['file_format'] == 'ASC':
arcpy.conversion.RasterToASCII(rl_constant, rl_raster_name)
else:
rl_constant.save(rl_raster_name)
h2o_constant = CreateConstantRaster(h2o, 'FLOAT', data['output_cell_size'])
arcpy.management.DefineProjection(h2o_constant, coord_system)
if data['file_format'] == 'ASC':
arcpy.conversion.RasterToASCII(h2o_constant, h2o_raster_name)
else:
h2o_constant.save(h2o_raster_name)
return rl_raster_name, h2o_raster_name
def WindSpeed(clim_tab, date_stamp, in_date_time):
arcpy.AddMessage('Wind Speed')
scratch_data = []
param = 'wind_speed'
out_raster_title = 'u'
out_file = '{0}/{1}_{2}.{3}'.format(data['out_folder'], out_raster_title, date_stamp, data['file_format'])
fields = ['wind_speed', 'wind_direction', 'air_temperature']
scratch_table = DataTable(param, clim_tab, multi_fields=fields)
ninja_path = 'Upload text'
#ninja_path = 'C:/WindNinja/WindNinja-3.1.1/bin/WindNinja_cli.exe' # comment to upload
wind_date = in_date_time.split(" ")[0]
wind_time = in_date_time.split(" ")[1]
ls_wind_date = wind_date.split("-")
ls_wind_time = wind_time.split(":")
wind_year = ls_wind_date[0]
wind_month = ls_wind_date[1]
wind_day = ls_wind_date[2]
wind_hour = ls_wind_time[0]
wind_minute = ls_wind_time[1]
#Build station csv file from SQL data
# Add coordinates to station feature class
arcpy.management.AddGeometryAttributes(scratch_table, 'POINT_X_Y_Z_M')
#Loop through stations in station feature class and write parameter values to a csv file
csv_filename = data['scratch_ws'] + '/wn_stations.csv'
with open(csv_filename, 'wb') as csvFile:
a = csv.writer(csvFile)
a.writerow(['Station_Name', 'Coord_Sys(PROJCS,GEOGCS)', 'Datum(WGS84,NAD83,NAD27)',
'Lat/YCoord', 'Lon/XCoord', 'Height', 'Height_Units(meters,feet)', 'Speed',
'Speed_Units(mph,kph,mps)', 'Direction(degrees)', 'Temperature',
'Temperature_Units(F,C)', 'Cloud_Cover(%)', 'Radius_of_Influence',
'Radius_of_Influence_Units(miles,feet,meters,km)'])
cursor = arcpy.SearchCursor(scratch_table)
for row in cursor:
a.writerow([row.getValue("Site_Key"), 'PROJCS', 'NAD83', row.getValue("Point_Y"),
row.getValue("Point_X"), '3', 'meters', row.getValue("MEAN_wind_speed"),
'mps', row.getValue("MEAN_wind_direction"), row.getValue("MEAN_air_temperature"),
'C', '0', '-1', 'miles'])
csvFile.close()
#List arguments for WindNinja CLI
args = []
# Comment forme here to end of Args for upload
# args = [ninja_path,
# "--initialization_method", "pointInitialization",
# "--elevation_file", data['elev_tiff'], #elevation raster (cannot contain any "no-data" values)
# "--match_points", "false", #match simulations to points (simulation fails if set to true)
# "--year", wind_year,
# "--month", wind_month,
# "--day", wind_day,
# "--hour", wind_hour,
# "--minute", wind_minute,
# "--mesh_resolution", data['output_cell_size'], #Resolution of model calculations
# "--vegetation", "brush", #Vegetation type (can be 'grass', 'brush', or 'trees')
# "--time_zone", "America/Boise", #time zone of target simulation
# "--diurnal_winds", "true", #consider diurnal cycles in calculations
# "--write_goog_output", "false", #write kml output (boolean: true/false)
# "--write_shapefile_output", "false", #write shapefile output (boolean: true/false)
# "--write_farsite_atm", "false", #write fire behavior file (boolean: true/false)
# "--write_ascii_output", "true", #write ascii file output (this should always be set to true)
# "--ascii_out_resolution", "-1", #resolution of output (-1 means same as mesh_resolution)
# "--units_ascii_out_resolution", "m",
# "--units_mesh_resolution", "m", #units of resolution of model calculations (should be "m" for meters)
# "--units_output_wind_height", "m", #units of output wind height
# "--output_speed_units", "mps",
# "--output_wind_height", "3",
# "--wx_station_filename", csv_filename, #weather station csv file used in point initialization method
# "--output_path", data['scratch_ws']] #path to output
# Last line uncomment for upload
#run the WindNinja_cli.exe (output is written to the same location as elevatoin raster)
arcpy.AddMessage('Calling WindNinja command line interface')
runfile = subprocess.Popen(args, stdout = subprocess.PIPE, bufsize = -1)
runfile.wait()
output = runfile.stdout.read()
if output is None:
arcpy.AddMessage('Results: None returned\n')
else:
arcpy.AddMessage('Results:\n' + output)
#convert ascii file to new grid
for file in os.listdir(data['scratch_ws']):
if file.endswith('_vel.asc'):
path_2_ascii = '{0}/{1}'.format(data['scratch_ws'], file)
scratch_data.append(path_2_ascii)
elif ( file.endswith("_vel.prj") or file.endswith('_ang.asc') or
file.endswith('_ang.prj') or file.endswith('cld.asc') or
file.endswith('_cld.prj') ):
scratch_data.append(data['scratch_ws'] + '/' + file)
# if desired file format is ASC only copy to output folder
if(data['file_format'] == 'ASC'):
shutil.copyfile(path_2_ascii, out_file)
else:
arcpy.conversion.ASCIIToRaster(in_ascii_file=path_2_ascii,
out_raster=out_file,
data_type='FLOAT')
#Get coordinate system information
desc = arcpy.Describe(data['dem'])
coord_system = desc.spatialReference
arcpy.management.DefineProjection(out_file, coord_system)
DeleteScratchData(scratch_data)
return out_file
def ClearBadZeros():
fix_zero = []
for f in glob.glob('m_pp_*.{0}'.format(data['file_format'])):
fix_zero.append(f)
for f in glob.glob('zs_*.{0}'.format(data['file_format'])):
fix_zero.append(f)
for f in fix_zero:
nm = f.split('.')
raster = arcpy.Raster(f)
zero = 0
out_con = arcpy.sa.Con(raster, zero, raster, "VALUE < 0")
if nm[1].lower() == 'asc':
arcpy.management.Delete(raster)
arcpy.conversion.RasterToASCII(out_con, '{0}\\{1}.asc'.format(data['scratch_ws'], nm[0]))
else:
out_con.save('{0}\\{1}.tif'.format(data['scratch_ws'], nm[0]))
def emailer(email, subject, message):
from_addr = '[email protected]'
to_addrs = email
msg = MIMEMultipart()
msg['From'] = from_addr
msg['To'] = to_addrs
msg['Subject'] = subject
message = message
msg.attach(MIMEText(message))
if len(to_addrs) > 2:
username = '[email protected]'
password = ''
dir_path = os.path.dirname(os.path.realpath(__file__))
text_file = dir_path + "/password.txt"
with open(text_file, 'r') as myfile:
password = myfile.read() ## MAKE SURE NOT TO COMMIT THE PASSWORD TO GIT
server = smtplib.SMTP_SSL("smtp.gmail.com:465")
server.login(username,password)
server.sendmail(from_addr, to_addrs, msg.as_string())
server.quit()
def DeleteScratchData(in_list):
#pass
#arcpy.AddMessage("Deleting scratch data")
for path in in_list:
print path
arcpy.management.Delete(path)
# Main Function --- Figure out a way to be run as script or as tool
#======================================================================
def main():
from_date_round = datetime.datetime.strptime(data['from_date'], '%Y-%m-%d %H:%M:%S')
to_date_round = datetime.datetime.strptime(data['to_date'], '%Y-%m-%d %H:%M:%S')
data['from_date'] = roundTime(from_date_round, 60*60)
data['to_date'] = roundTime(to_date_round)
return_ws = selectWatershed(data['watershed'])
data.update({'stations' : return_ws[0],
'stations_soil' : return_ws[1],
'elev_tiff' : return_ws[2],
'dem' : return_ws[3],
'view_factor' : return_ws[4],
'search_radius' : return_ws[5],
'db' : return_ws[6],
})
# Connect to database
db_cnx = ConnectDB(data['db'])
# Scratch and output lists
ls_scratch_data = []
ls_output = []
#Master stations feature class to be copied for each gridding function
data.update({'fc_stations_elev': data['scratch_gdb'] + '/stations_wElev'})
ls_scratch_data.append(data['fc_stations_elev'])
data.update({'station_locations' : data['scratch_gdb'] + '/station_locations'})
data.update({'station_locations_soil' : data['scratch_gdb'] + '/station_locations_soil'})
arcpy.management.CopyFeatures(data['stations'], data['station_locations'])
arcpy.management.CopyFeatures(data['stations_soil'], data['station_locations_soil'])
ls_scratch_data.append(data['station_locations'])
ls_scratch_data.append(data['station_locations_soil'])
data['ext_features'] = arcpy.Describe(data['station_locations']).extent
arcpy.env.cellSize = data['dem']
arcpy.AddMessage(arcpy.Describe(data['dem']).extent)
data.update({'output_cell_size' : arcpy.env.cellSize,
'ext_elev' : arcpy.Describe(data['dem']).extent
})
arcpy.env.extent = data['ext_elev']
arcpy.sa.ExtractValuesToPoints(in_point_features = data['station_locations'],
in_raster = data['dem'],
out_point_features = data['fc_stations_elev'],
interpolate_values = 'NONE',
add_attributes = 'VALUE_ONLY')
delta = datetime.timedelta(hours=data['time_step'])
date_increment = data['from_date']
# date_counter - counter to help setup data for ISNOBAL (saved in date_file.txt)
date_counter = 0
date_file = open('{0}/date_file.txt'.format(data['out_folder']), 'a')
while date_increment < data['to_date']:
arcpy.AddMessage(' ')
arcpy.AddMessage('Current time step: {0}'.format(date_increment))
if any([data['bool_all_tools'], data['bool_air_temperature'],
data['bool_dew_point'], data['bool_vapor_pressure'],
data['bool_wind_speed'], data['bool_solar_radiation'],
data['bool_thermal_radiation']]):
# Run climate data
ls_scratch_data_imd = []
# Paramter lists
parameters = {'site_key' : [],
'date_time' : [],
'air_temperature' : [],
'vapor_pressure' : [],
'dew_point' : [],
'solar_radiation' : [],
'wind_speed' : [],
'wind_direction' : []
}
# Query climage (weather) table
from_date = date_increment.strftime('%Y-%m-%d %H:%M:%S')
time_stamp = date_increment.strftime('%Y%m%d_%H')
to_date_temp = date_increment + delta
to_date = to_date_temp.strftime('%Y-%m-%d %H:%M:%S')
query = ('SELECT * FROM weather WHERE '\
'date_time >= ' + data['sql_ph'] + ' '\
'AND date_time < ' + data['sql_ph'] + ';')
cur = db_cnx.cursor()
cur.execute(query, (from_date,to_date))
rows = cur.fetchall()
i_num_return = len(rows)
##arcpy.AddMessage('Query: ' + query)
#arcpy.AddMessage('Row Count: {0}'.format(i_num_return))
#Build parameter lists into dictionary
parameters = ParameterList(parameters, rows, table_type = 'climate')
cur.close()
# Build Climate table
climate_table = BuildClimateTable(parameters, i_num_return)
ls_scratch_data_imd.append(climate_table)
# Run interpolation tools
if data['bool_air_temperature']:
path_air_temp = AirTemperature(climate_table, time_stamp)
ls_output.append(path_air_temp)
if data['bool_dew_point']:
path_dew_point = DewPoint(climate_table, time_stamp)
path_percent_snow = PercentSnow(path_dew_point, time_stamp)
path_snow_density = SnowDensity(path_dew_point, time_stamp)
ls_output.extend([path_dew_point, path_percent_snow, path_snow_density])
if data['bool_vapor_pressure']:
path_vapor_pressure = VaporPressure(climate_table, time_stamp)
ls_output.append(path_vapor_pressure)
if data['bool_wind_speed']:
path_wind_speed = WindSpeed(climate_table, time_stamp, from_date)
ls_output.append(path_wind_speed)
if data['bool_solar_radiation']:
path_solar_radiation = SolarRadiation(climate_table,
time_stamp,
date_increment,
data['time_step'])
ls_output.append(path_solar_radiation)
if data['bool_thermal_radiation']:
#Query database for average air temperature for current day
sFromTR = date_increment.strftime("%Y-%m-%d")
sQuery2 = ("SELECT AVG(NULLIF(ta , -999)) FROM weather "
"WHERE date_time >= '" + sFromTR + " 00:00:00" + "' "
"AND date_time <= '" + sFromTR + " 23:00:00'")
cur2 = db_cnx.cursor()
cur2.execute(sQuery2)
d_ref_temp = cur2.fetchone()[0]
cur2.close()
path_thermal_radiation = ThermalRadiation(climate_table,
time_stamp,
path_air_temp,
path_vapor_pressure,
d_ref_temp)
ls_output.append(path_thermal_radiation)
DeleteScratchData(ls_scratch_data_imd)
arcpy.management.Delete('in_memory')
if any([data['bool_all_tools'], data['bool_precip_mass']]):
# Run climate data
ls_scratch_data_imd = []
# Initiate parameter lists
parameters = {'site_key' : [],
'ppts' : [],
'pptu' : [],
'ppta' : []}
# Query precip table
from_date = date_increment.strftime('%Y-%m-%d %H:%M:%S')
time_stamp = date_increment.strftime('%Y%m%d_%H')
to_date_temp = date_increment + delta
to_date = to_date_temp.strftime('%Y-%m-%d %H:%M:%S')
query = ('SELECT * FROM precipitation WHERE '\
'date_time >= ' + data['sql_ph'] + ' '\
'AND date_time < ' + data['sql_ph'] + ';')
cur = db_cnx.cursor()
cur.execute(query, (from_date, to_date))
rows = cur.fetchall()
i_num_return = len(rows)
##arcpy.AddMessage('Query: ' + query)
##arcpy.AddMessage('Row Count: {0}'.format(i_num_return))
parameters = ParameterList(parameters, rows, table_type = 'precip')
cur.close()
precip_table = BuildClimateTable(parameters, i_num_return)
ls_scratch_data_imd.append(precip_table)
if data['bool_precip_mass']:
path_precip_mass = PrecipitationMass(precip_table, time_stamp)
ls_output.append(path_precip_mass)
DeleteScratchData(ls_scratch_data_imd)
arcpy.management.Delete('in_memory')
if any([data['bool_all_tools'], data['bool_soil_temperature']]):
ls_scratch_data_imd = []
parameters = {'site_key': [],
'stm005': []}
#Query soil temperature table
# Query precip table
from_date = date_increment.strftime('%Y-%m-%d %H:%M:%S')
time_stamp = date_increment.strftime('%Y%m%d_%H')
to_date_temp = date_increment + delta
to_date = to_date_temp.strftime('%Y-%m-%d %H:%M:%S')
query = ('SELECT * FROM soil_temperature WHERE '\
'date_time >= ' + data['sql_ph'] + ' '\
'AND date_time < ' + data['sql_ph'] + ';')
cur = db_cnx.cursor()
cur.execute(query, (from_date, to_date))
rows = cur.fetchall()
i_num_return = len(rows)
##arcpy.AddMessage('Query: ' + query)
##arcpy.AddMessage('Row Count: {0}'.format(i_num_return))
parameters = ParameterList(parameters, rows, table_type = 'soil_temperature')
cur.close()
soil_table = BuildClimateTable(parameters, i_num_return)
ls_scratch_data_imd.append(soil_table)
if data['bool_soil_temperature']:
path_soil_temp = SoilTemperature(soil_table, time_stamp)
ls_output.append(path_soil_temp)
DeleteScratchData(ls_scratch_data_imd)
arcpy.management.Delete('in_memory')
time_stamp = date_increment.strftime('%Y%m%d_%H')
date_file.write('{0}\t{1}\n'.format(date_counter, time_stamp))
date_counter += 1
date_increment += delta
#Run initial condition functions once
from_date = date_increment.strftime('%Y-%m-%d %H:%M:%S')
time_stamp = date_increment.strftime('%Y%m%d_%H')
to_date_temp = date_increment + delta
to_date = to_date_temp.strftime('%Y-%m-%d %H:%M:%S')
if any([data['bool_all_tools'], data['bool_snow_depth']]):
ls_scratch_data_imd = []
#Initiate parameter dict
parameters = {'site_key': [],
'zs': []
}
query = ('SELECT * FROM snow_depth WHERE '\
'date_time >= ' + data['sql_ph'] + ' '\
'AND date_time < ' + data['sql_ph'] + ';')
cur = db_cnx.cursor()
cur.execute(query, (from_date, to_date))
rows = cur.fetchall()
i_num_return = len(rows)
##arcpy.AddMessage('Query: ' + query)
##arcpy.AddMessage('Row Count: {0}'.format(i_num_return))
#Build parameter lists into dictionary
parameters = ParameterList(parameters, rows, table_type = 'snow_depth')
cur.close()
#Build Climate table
snow_table = BuildClimateTable(parameters, i_num_return)
ls_scratch_data_imd.append(snow_table)
#Run gridding function
if data['bool_snow_depth']:
path_snow_depth = SnowDepth(snow_table, time_stamp)
ls_output.append(path_snow_depth)
DeleteScratchData(ls_scratch_data_imd)
arcpy.management.Delete('in_memory')
if data['bool_snow_properties']:
arcpy.AddMessage('snow Properties')
path_ul_snow_temperature, path_avg_snow_temperature = SnowCoverTemperature(time_stamp)
path_snow_density = SnowDensityInterpolation(time_stamp)
ls_output.extend([path_ul_snow_temperature, path_avg_snow_temperature, path_snow_density])
if data['bool_constants']:
path_rl_constant, path_h2o_constant = Constants(data['rl_constant'], data['h2o_constant'], time_stamp)
ls_output.extend([path_rl_constant, path_h2o_constant])
db_cnx.close()
date_file.close()
ls_scratch_data.append(scratchGDB)
DeleteScratchData(ls_scratch_data)
arcpy.management.Delete('in_memory')
ClearBadZeros() ## Snow depth and precipitation update any values below zero to zero
shutil.make_archive(data['out_folder'],'zip', data['out_folder'])
arcpy.SetParameterAsText(22, data['out_folder'] + '.zip')
if __name__ == '__main__':
#Dictionary to hold all user input data.
data.update({'watershed' : arcpy.GetParameterAsText(0),
'file_format' : arcpy.GetParameterAsText(1),
'from_date' : arcpy.GetParameterAsText(2),
'to_date' : arcpy.GetParameterAsText(3),
'time_step' : int(arcpy.GetParameterAsText(4)),
'kriging_method' : arcpy.GetParameterAsText(5),
'bool_all_tools' : arcpy.GetParameter(6),
'bool_air_temperature' : arcpy.GetParameter(7),
'bool_constants' : arcpy.GetParameter(8),
'rl_constant' : arcpy.GetParameter(9),
'h2o_constant' : arcpy.GetParameter(10),
'bool_dew_point' : arcpy.GetParameter(11),
'bool_precip_mass' : arcpy.GetParameter(12),
'bool_snow_depth' : arcpy.GetParameter(13),
'bool_snow_properties' : arcpy.GetParameter(14),
'll_interp_values' : json.loads(arcpy.GetParameter(15).JSON),
'ul_interp_values' : json.loads(arcpy.GetParameter(16).JSON),
'density_interp_values' : json.loads(arcpy.GetParameter(17).JSON),
'bool_soil_temperature' : arcpy.GetParameter(18),
'bool_solar_radiation' : arcpy.GetParameter(19),
'bool_thermal_radiation' : arcpy.GetParameter(20),
'bool_vapor_pressure' : arcpy.GetParameter(21),
'bool_wind_speed' : arcpy.GetParameter(22),
'email_address' : arcpy.GetParameterAsText(24),
})
# main()
try:
main()
except:
arcpy.AddMessage("Error")
subject = "[VWCSIT] There was an error"
message = arcpy.GetMessages(0)
arcpy.AddError(message)
emailer(data['email_address'], subject, message)
else:
subject = "[VWCSIT] Processing Complete"
message = "Download the output at <>\n\n"
message += arcpy.GetMessages(0)
emailer(data['email_address'], subject, message)
## import cProfile
## import pstats
## pr = cProfile.Profile()
## pr.enable()
## main()
## pr.disable()
## ps = pstats.Stats(pr).sort_stats('cumulative')
## ps.print_stats(25)
|
py
|
1a58f7a2d3c7fc6d25bdc6b0b567e54994818e9f
|
import webloader
from bs4 import BeautifulSoup as soup
def get_company_credentials(url):
html = webloader.load(url)
return html_to_list(html)
def html_to_list(html):
page_soup = soup(html, "html.parser")
table = page_soup.find("div", {"class": "govspeak"}).table.findAll("tr")
table_list = []
for row in table:
cells = row.findAll("td")
table_row = []
for cell in cells:
table_row.append(cell.contents[0])
table_list.append(table_row)
return table_list[1:-1]
|
py
|
1a58f8290df7ab41888adf3a7cdf3744b5abe80d
|
import os
import platform
from retriever.lib.models import Engine, no_cleanup
class engine(Engine):
"""Engine instance for MySQL."""
name = "MySQL"
abbreviation = "mysql"
datatypes = {
"auto": "INT(5) NOT NULL AUTO_INCREMENT",
"int": "INT",
"bigint": "BIGINT",
"double": "DOUBLE",
"decimal": "DECIMAL",
"char": ("TEXT", "VARCHAR"),
"bool": "BOOL",
}
max_int = 4294967295
required_opts = [("user",
"Enter your MySQL username",
"root"),
("password",
"Enter your password",
""),
("host",
"Enter your MySQL host",
"localhost"),
("port",
"Enter your MySQL port",
3306),
("database_name",
"Format of database name",
"{db}"),
("table_name",
"Format of table name",
"{db}.{table}"),
]
def create_db_statement(self):
createstatement = "CREATE DATABASE IF NOT EXISTS " + self.database_name()
return createstatement
def insert_data_from_file(self, filename):
"""Calls MySQL "LOAD DATA LOCAL INFILE" statement to perform a bulk
insert."""
self.get_cursor()
ct = len([True for c in self.table.columns if c[1][0][:3] == "ct-"]) != 0
if (self.table.cleanup.function == no_cleanup
and not self.table.fixed_width
and not ct
and (not hasattr(self.table, "do_not_bulk_insert") or not self.table.do_not_bulk_insert)
):
print ("Inserting data from " + os.path.basename(filename) + "...")
columns = self.table.get_insert_columns()
statement = """
LOAD DATA LOCAL INFILE '""" + filename.replace("\\", "\\\\") + """'
INTO TABLE """ + self.table_name() + """
FIELDS TERMINATED BY '""" + self.table.delimiter + """'
OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\\n'
IGNORE """ + str(self.table.header_rows) + """ LINES
(""" + columns + ")"
try:
self.cursor.execute(statement)
except Exception as e:
print "Failed bulk insert (%s), inserting manually" % e
self.disconnect() # If the execute fails the database connection can get hung up
return Engine.insert_data_from_file(self, filename)
else:
return Engine.insert_data_from_file(self, filename)
def table_exists(self, dbname, tablename):
"""Checks to see if the given table exists"""
if not hasattr(self, 'existing_table_names'):
self.cursor.execute("SELECT table_schema, table_name FROM information_schema.tables WHERE table_schema NOT IN ('mysql', 'information_schema', 'performance_schema');")
self.existing_table_names = set()
for schema, table in self.cursor:
self.existing_table_names.add((schema.lower(), table.lower()))
return (dbname.lower(), tablename.lower()) in self.existing_table_names
def get_connection(self):
"""Gets the db connection."""
args = {'host': self.opts['host'],
'port': int(self.opts['port']),
'user': self.opts['user'],
'passwd': self.opts['password']}
import pymysql as dbapi
import pymysql.constants.CLIENT as client
args['client_flag'] = client.LOCAL_FILES
self.get_input()
return dbapi.connect(**args)
|
py
|
1a58f878e871515ddeaaabb06714d6f911cadce3
|
''' High-level error handling and exception raising routines and classes. '''
__all__ = ['validate', 'validate_eval', 'raise_eval', 'format_msg',
'ParsingException', 'EvaluationException', 'DocumentException', 'WebException']
import log
from exceptions import Exception
def validate(id, sline, expression, message):
''' If expression is False, logs critical error and raises ParsingException. '''
if not expression:
msg = format_msg(message, sline.string, sline.number)
log.critical(id, msg)
raise ParsingException(msg)
def validate_eval(id, sline, expression, message):
''' If expression is False, logs critical error and raises EvaluationException. '''
if not expression:
msg = format_msg(message, sline.string, sline.number)
log.critical(id, msg)
raise EvaluationException(msg)
def raise_eval(id, sline, message):
''' Raises EvaluationException with message and line number. '''
msg = format_msg(message, sline.string, sline.number)
log.critical(id, msg)
raise EvaluationException(msg)
def format_msg(message, line, lnum):
''' Fromats and returns error message with line number. '''
return '{0}, line {2}: {1}'.format(message, line.strip(), lnum+1)
class ExceptionWithArgs(Exception):
''' Generic exception with message. '''
def __init__(self, *args):
self.args = [a for a in args]
class ParsingException(ExceptionWithArgs):
''' Raised by synatax tree nodes, during parsing. '''
pass
class EvaluationException(ExceptionWithArgs):
''' Raised by synatax tree nodes, during evaluation. '''
pass
class DocumentException(ExceptionWithArgs):
''' Raised by document doc_loader. '''
pass
class WebException(ExceptionWithArgs):
''' Raised by web client. '''
pass
|
py
|
1a58f880bea64a0a94b97a1b7140f4442d402cb5
|
from flask_caching import Cache
cache = Cache()
def clear_config():
from CTFd.utils import _get_config, get_app_config
cache.delete_memoized(_get_config)
cache.delete_memoized(get_app_config)
def clear_standings():
from CTFd.utils.scores import get_standings
cache.delete_memoized(get_standings)
def clear_pages():
from CTFd.utils.config.pages import get_page, get_pages
cache.delete_memoized(get_pages)
cache.delete_memoized(get_page)
|
py
|
1a58f8e0ccbec98ade2d5d51d64c67e7750fdc9e
|
#!/usr/bin/env python
# Copyright (c) 2014-2016 The ACB coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
|
py
|
1a58f8f0b9aadfbf123bcf207972c63f51b971bf
|
def min_operations(nums):
operations_count = 0
if not nums or len(nums) == 1:
return 0
i = 0
while i < len(nums) - 1:
if nums[i + 1] <= nums[i]:
diff = nums[i] - nums[i + 1]
nums[i + 1] += diff + 1
operations_count += diff + 1
else:
i += 1
return operations_count
print(min_operations([1, 5, 2, 4, 1]))
print(min_operations([1, 1, 1]))
print(min_operations([8]))
print(min_operations([10, 2, 5]))
print(min_operations([3, 5]))
print(min_operations([7, 6, 1, 9, 9, 10]))
|
py
|
1a58f98d4072d89dbee5d1261164b5957a42d631
|
from setuptools import setup
setup(
name='quilt3_package_browse',
version='0.0.1',
py_modules=['index'],
)
|
py
|
1a58f99a0cfbbc00c060950481ed5d5ba9a1e57d
|
#
# Copyright (c) 2006-2019, RT-Thread Development Team
#
# SPDX-License-Identifier: Apache-2.0
#
# Change Logs:
# Date Author Notes
# 2019-03-21 Bernard the first version
# 2019-04-15 armink fix project update error
#
import os
import sys
import glob
from utils import *
from utils import _make_path_relative
from utils import xml_indent
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from building import *
MODULE_VER_NUM = 5
source_pattern = ['*.c', '*.cpp', '*.cxx', '*.s', '*.S', '*.asm']
def OSPath(path):
import platform
if type(path) == type('str'):
if platform.system() == 'Windows':
return path.replace('/', '\\')
else:
return path.replace('\\', '/')
else:
if platform.system() == 'Windows':
return [item.replace('/', '\\') for item in path]
else:
return [item.replace('\\', '/') for item in path]
# collect the build source code path and parent path
def CollectPaths(paths):
all_paths = []
def ParentPaths(path):
ret = os.path.dirname(path)
if ret == path or ret == '':
return []
return [ret] + ParentPaths(ret)
for path in paths:
# path = os.path.abspath(path)
path = path.replace('\\', '/')
all_paths = all_paths + [path] + ParentPaths(path)
all_paths = list(set(all_paths))
return sorted(all_paths)
'''
Collect all of files under paths
'''
def CollectFiles(paths, pattern):
files = []
for path in paths:
if type(pattern) == type(''):
files = files + glob.glob(path + '/' + pattern)
else:
for item in pattern:
# print('--> %s' % (path + '/' + item))
files = files + glob.glob(path + '/' + item)
return sorted(files)
def CollectAllFilesinPath(path, pattern):
files = []
for item in pattern:
files += glob.glob(path + '/' + item)
list = os.listdir(path)
if len(list):
for item in list:
if item.startswith('.'):
continue
if item == 'bsp':
continue
if os.path.isdir(os.path.join(path, item)):
files = files + CollectAllFilesinPath(os.path.join(path, item), pattern)
return files
'''
Exclude files from infiles
'''
def ExcludeFiles(infiles, files):
in_files = set([OSPath(file) for file in infiles])
exl_files = set([OSPath(file) for file in files])
exl_files = in_files - exl_files
return exl_files
# caluclate the exclude path for project
def ExcludePaths(rootpath, paths):
ret = []
files = os.listdir(OSPath(rootpath))
for file in files:
if file.startswith('.'):
continue
fullname = os.path.join(OSPath(rootpath), file)
if os.path.isdir(fullname):
# print(fullname)
if not fullname in paths:
ret = ret + [fullname]
else:
ret = ret + ExcludePaths(fullname, paths)
return ret
rtt_path_prefix = '"${workspace_loc://${ProjName}//'
def ConverToRttEclipsePathFormat(path):
return rtt_path_prefix + path + '}"'
def IsRttEclipsePathFormat(path):
if path.startswith(rtt_path_prefix):
return True
else:
return False
# all libs added by scons should be ends with five whitespace as a flag
rtt_lib_flag = 5 * " "
def ConverToRttEclipseLibFormat(lib):
return str(lib) + str(rtt_lib_flag)
def IsRttEclipseLibFormat(path):
if path.endswith(rtt_lib_flag):
return True
else:
return False
def IsCppProject():
return GetDepend('RT_USING_CPLUSPLUS')
def HandleToolOption(tools, env, project, reset):
is_cpp_prj = IsCppProject()
BSP_ROOT = os.path.abspath(env['BSP_ROOT'])
CPPDEFINES = project['CPPDEFINES']
paths = [ConverToRttEclipsePathFormat(RelativeProjectPath(env, os.path.normpath(i)).replace('\\', '/')) for i in
project['CPPPATH']]
compile_include_paths_options = []
compile_include_files_options = []
compile_defs_options = []
linker_scriptfile_option = None
linker_script_option = None
linker_nostart_option = None
linker_libs_option = None
linker_paths_option = None
linker_newlib_nano_option = None
for tool in tools:
if tool.get('id').find('compile') != 1:
options = tool.findall('option')
# find all compile options
for option in options:
if option.get('id').find('compiler.include.paths') != -1 or option.get('id').find(
'compiler.option.includepaths') != -1:
compile_include_paths_options += [option]
elif option.get('id').find('compiler.include.files') != -1 or option.get('id').find(
'compiler.option.includefiles') != -1:
compile_include_files_options += [option]
elif option.get('id').find('compiler.defs') != -1 or option.get('id').find(
'compiler.option.definedsymbols') != -1:
compile_defs_options += [option]
if tool.get('id').find('linker') != -1:
options = tool.findall('option')
# find all linker options
for option in options:
# the project type and option type must equal
if is_cpp_prj != (option.get('id').find('cpp.linker') != -1):
continue
if option.get('id').find('linker.scriptfile') != -1:
linker_scriptfile_option = option
elif option.get('id').find('linker.option.script') != -1:
linker_script_option = option
elif option.get('id').find('linker.nostart') != -1:
linker_nostart_option = option
elif option.get('id').find('linker.libs') != -1:
linker_libs_option = option
elif option.get('id').find('linker.paths') != -1 and env.has_key('LIBPATH'):
linker_paths_option = option
elif option.get('id').find('linker.usenewlibnano') != -1:
linker_newlib_nano_option = option
# change the inclue path
for option in compile_include_paths_options:
# find all of paths in this project
include_paths = option.findall('listOptionValue')
for item in include_paths:
if reset is True or IsRttEclipsePathFormat(item.get('value')):
# clean old configuration
option.remove(item)
# print('c.compiler.include.paths')
paths = sorted(paths)
for item in paths:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': item})
# change the inclue files (default) or definitions
for option in compile_include_files_options:
# add '_REENT_SMALL' to CPPDEFINES when --specs=nano.specs has select
if linker_newlib_nano_option is not None and linker_newlib_nano_option.get(
'value') == 'true' and '_REENT_SMALL' not in CPPDEFINES:
CPPDEFINES += ['_REENT_SMALL']
file_header = '''
#ifndef RTCONFIG_PREINC_H__
#define RTCONFIG_PREINC_H__
/* Automatically generated file; DO NOT EDIT. */
/* RT-Thread pre-include file */
'''
file_tail = '\n#endif /*RTCONFIG_PREINC_H__*/\n'
rtt_pre_inc_item = '"${workspace_loc:/${ProjName}/rtconfig_preinc.h}"'
# save the CPPDEFINES in to rtconfig_preinc.h
with open('rtconfig_preinc.h', mode='w+') as f:
f.write(file_header)
for cppdef in CPPDEFINES:
f.write("#define " + cppdef.replace('=', ' ') + '\n')
f.write(file_tail)
# change the c.compiler.include.files
files = option.findall('listOptionValue')
find_ok = False
for item in files:
if item.get('value') == rtt_pre_inc_item:
find_ok = True
break
if find_ok is False:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': rtt_pre_inc_item})
if len(compile_include_files_options) == 0:
for option in compile_defs_options:
defs = option.findall('listOptionValue')
project_defs = []
for item in defs:
if reset is True:
# clean all old configuration
option.remove(item)
else:
project_defs += [item.get('value')]
if len(project_defs) > 0:
cproject_defs = set(CPPDEFINES) - set(project_defs)
else:
cproject_defs = CPPDEFINES
# print('c.compiler.defs')
cproject_defs = sorted(cproject_defs)
for item in cproject_defs:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': item})
# update linker script config
if linker_scriptfile_option is not None:
option = linker_scriptfile_option
linker_script = 'link.lds'
items = env['LINKFLAGS'].split(' ')
if '-T' in items:
linker_script = items[items.index('-T') + 1]
linker_script = ConverToRttEclipsePathFormat(linker_script)
listOptionValue = option.find('listOptionValue')
if listOptionValue != None:
listOptionValue.set('value', linker_script)
else:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': linker_script})
# scriptfile in stm32cubeIDE
if linker_script_option is not None:
option = linker_script_option
items = env['LINKFLAGS'].split(' ')
if '-T' in items:
linker_script = ConverToRttEclipsePathFormat(items[items.index('-T') + 1]).strip('"')
option.set('value', linker_script)
# update nostartfiles config
if linker_nostart_option is not None:
option = linker_nostart_option
if env['LINKFLAGS'].find('-nostartfiles') != -1:
option.set('value', 'true')
else:
option.set('value', 'false')
# update libs
if linker_libs_option is not None:
option = linker_libs_option
# remove old libs
for item in option.findall('listOptionValue'):
if IsRttEclipseLibFormat(item.get("value")):
option.remove(item)
# add new libs
if env.has_key('LIBS'):
for lib in env['LIBS']:
formatedLib = ConverToRttEclipseLibFormat(lib)
SubElement(option, 'listOptionValue', {
'builtIn': 'false', 'value': formatedLib})
# update lib paths
if linker_paths_option is not None:
option = linker_paths_option
# remove old lib paths
for item in option.findall('listOptionValue'):
if IsRttEclipsePathFormat(item.get('value')):
# clean old configuration
option.remove(item)
# add new old lib paths
for path in env['LIBPATH']:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': ConverToRttEclipsePathFormat(
RelativeProjectPath(env, path).replace('\\', '/'))})
return
def UpdateProjectStructure(env, prj_name):
bsp_root = env['BSP_ROOT']
rtt_root = env['RTT_ROOT']
project = etree.parse('.project')
root = project.getroot()
if rtt_root.startswith(bsp_root):
linkedResources = root.find('linkedResources')
if linkedResources == None:
linkedResources = SubElement(root, 'linkedResources')
links = linkedResources.findall('link')
# delete all RT-Thread folder links
for link in links:
if link.find('name').text.startswith('rt-thread'):
linkedResources.remove(link)
if prj_name:
name = root.find('name')
if name == None:
name = SubElement(root, 'name')
name.text = prj_name
out = open('.project', 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
return
def GenExcluding(env, project):
rtt_root = os.path.abspath(env['RTT_ROOT'])
bsp_root = os.path.abspath(env['BSP_ROOT'])
coll_dirs = CollectPaths(project['DIRS'])
all_paths_temp = [OSPath(path) for path in coll_dirs]
all_paths = []
# add used path
for path in all_paths_temp:
if path.startswith(rtt_root) or path.startswith(bsp_root):
all_paths.append(path)
if bsp_root.startswith(rtt_root):
# bsp folder is in the RT-Thread root folder, such as the RT-Thread source code on GitHub
exclude_paths = ExcludePaths(rtt_root, all_paths)
elif rtt_root.startswith(bsp_root):
# RT-Thread root folder is in the bsp folder, such as project folder which generate by 'scons --dist' cmd
check_path = []
exclude_paths = []
# analyze the primary folder which relative to BSP_ROOT and in all_paths
for path in all_paths:
if path.startswith(bsp_root):
folders = RelativeProjectPath(env, path).split('\\')
if folders[0] != '.' and '\\' + folders[0] not in check_path:
check_path += ['\\' + folders[0]]
# exclue the folder which has managed by scons
for path in check_path:
exclude_paths += ExcludePaths(bsp_root + path, all_paths)
else:
exclude_paths = ExcludePaths(rtt_root, all_paths)
exclude_paths += ExcludePaths(bsp_root, all_paths)
paths = exclude_paths
exclude_paths = []
# remove the folder which not has source code by source_pattern
for path in paths:
# add bsp and libcpu folder and not collect source files (too more files)
if path.endswith('rt-thread\\bsp') or path.endswith('rt-thread\\libcpu'):
exclude_paths += [path]
continue
set = CollectAllFilesinPath(path, source_pattern)
if len(set):
exclude_paths += [path]
exclude_paths = [RelativeProjectPath(env, path).replace('\\', '/') for path in exclude_paths]
all_files = CollectFiles(all_paths, source_pattern)
src_files = project['FILES']
exclude_files = ExcludeFiles(all_files, src_files)
exclude_files = [RelativeProjectPath(env, file).replace('\\', '/') for file in exclude_files]
env['ExPaths'] = exclude_paths
env['ExFiles'] = exclude_files
return exclude_paths + exclude_files
def RelativeProjectPath(env, path):
project_root = os.path.abspath(env['BSP_ROOT'])
rtt_root = os.path.abspath(env['RTT_ROOT'])
if path.startswith(project_root):
return _make_path_relative(project_root, path)
if path.startswith(rtt_root):
return 'rt-thread/' + _make_path_relative(rtt_root, path)
# TODO add others folder
print('ERROR: the ' + path + ' not support')
return path
def HandleExcludingOption(entry, sourceEntries, excluding):
old_excluding = []
if entry != None:
old_excluding = entry.get('excluding').split('|')
sourceEntries.remove(entry)
value = ''
for item in old_excluding:
if item.startswith('//'):
old_excluding.remove(item)
else:
if value == '':
value = item
else:
value += '|' + item
for item in excluding:
# add special excluding path prefix for RT-Thread
item = '//' + item
if value == '':
value = item
else:
value += '|' + item
SubElement(sourceEntries, 'entry',
{'excluding': value, 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': ""})
def UpdateCproject(env, project, excluding, reset, prj_name):
excluding = sorted(excluding)
cproject = etree.parse('.cproject')
root = cproject.getroot()
cconfigurations = root.findall('storageModule/cconfiguration')
for cconfiguration in cconfigurations:
tools = cconfiguration.findall('storageModule/configuration/folderInfo/toolChain/tool')
HandleToolOption(tools, env, project, reset)
sourceEntries = cconfiguration.find('storageModule/configuration/sourceEntries')
entry = sourceEntries.find('entry')
HandleExcludingOption(entry, sourceEntries, excluding)
# update refreshScope
if prj_name:
prj_name = '/' + prj_name
configurations = root.findall('storageModule/configuration')
for configuration in configurations:
resource = configuration.find('resource')
configuration.remove(resource)
SubElement(configuration, 'resource', {'resourceType': "PROJECT", 'workspacePath': prj_name})
# write back to .cproject
out = open('.cproject', 'w')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n')
out.write('<?fileVersion 4.0.0?>')
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def TargetEclipse(env, reset=False, prj_name=None):
global source_pattern
print('Update eclipse setting...')
if not os.path.exists('.cproject'):
print('no eclipse CDT project found!')
return
project = ProjectInfo(env)
# update the project file structure info on '.project' file
UpdateProjectStructure(env, prj_name)
# generate the exclude paths and files
excluding = GenExcluding(env, project)
# update the project configuration on '.cproject' file
UpdateCproject(env, project, excluding, reset, prj_name)
print('done!')
return
|
py
|
1a58fadbc64626f235cabfdbd61c8126611db784
|
"""
API operations allowing clients to determine datatype supported by Galaxy.
"""
from galaxy.web import _future_expose_api_anonymous_and_sessionless as expose_api_anonymous_and_sessionless
from galaxy import exceptions
from galaxy.web.base.controller import BaseAPIController
from galaxy.util import asbool
from galaxy.datatypes.data import Data
import logging
log = logging.getLogger( __name__ )
class DatatypesController( BaseAPIController ):
@expose_api_anonymous_and_sessionless
def index( self, trans, **kwd ):
"""
GET /api/datatypes
Return an object containing upload datatypes.
"""
datatypes_registry = self._datatypes_registry
extension_only = asbool( kwd.get( 'extension_only', True ) )
upload_only = asbool( kwd.get( 'upload_only', True ) )
try:
if extension_only:
if upload_only:
return datatypes_registry.upload_file_formats
else:
return [ ext for ext in datatypes_registry.datatypes_by_extension ]
else:
rval = []
for elem in datatypes_registry.datatype_elems:
if not asbool(elem.get('display_in_upload')) and upload_only:
continue
keys = ['extension', 'description', 'description_url']
dictionary = {}
for key in keys:
dictionary[key] = elem.get(key)
extension = elem.get('extension')
if extension in datatypes_registry.datatypes_by_extension:
composite_files = datatypes_registry.datatypes_by_extension[ extension ].composite_files
if composite_files:
dictionary['composite_files'] = [_.dict() for _ in composite_files.itervalues()]
rval.append(dictionary)
return rval
except Exception as exception:
log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def mapping( self, trans, **kwd ):
'''
GET /api/datatypes/mapping
Return a dictionary of class to class mappings.
'''
try:
ext_to_class_name = dict()
classes = []
for k, v in self._datatypes_registry.datatypes_by_extension.iteritems():
c = v.__class__
ext_to_class_name[k] = c.__module__ + "." + c.__name__
classes.append( c )
class_to_classes = dict()
def visit_bases( types, cls ):
for base in cls.__bases__:
if issubclass( base, Data ):
types.add( base.__module__ + "." + base.__name__ )
visit_bases( types, base )
for c in classes:
n = c.__module__ + "." + c.__name__
types = set( [ n ] )
visit_bases( types, c )
class_to_classes[ n ] = dict( ( t, True ) for t in types )
return dict( ext_to_class_name=ext_to_class_name, class_to_classes=class_to_classes )
except Exception as exception:
log.error( 'could not get datatype mapping: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def sniffers( self, trans, **kwd ):
'''
GET /api/datatypes/sniffers
Return a list of sniffers.
'''
try:
rval = []
for sniffer_elem in self._datatypes_registry.sniffer_elems:
datatype = sniffer_elem.get( 'type' )
if datatype is not None:
rval.append( datatype )
return rval
except Exception as exception:
log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def converters( self, trans, **kwd ):
converters = []
for (source_type, targets) in self._datatypes_registry.datatype_converters.iteritems():
for target_type in targets:
converters.append( {
'source': source_type,
'target': target_type,
'tool_id': targets[ target_type ].id,
} )
return converters
@expose_api_anonymous_and_sessionless
def edam_formats( self, trans, **kwds ):
return self._datatypes_registry.edam_formats
@property
def _datatypes_registry( self ):
return self.app.datatypes_registry
|
py
|
1a58fafbaca63d3030e3a648a5997de5339cd8eb
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'BindingArgs',
'ExprArgs',
'GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpecArgs',
'GoogleCloudDatacatalogV1beta1BigQueryTableSpecArgs',
'GoogleCloudDatacatalogV1beta1ColumnSchemaArgs',
'GoogleCloudDatacatalogV1beta1GcsFilesetSpecArgs',
'GoogleCloudDatacatalogV1beta1SchemaArgs',
'GoogleCloudDatacatalogV1beta1TableSpecArgs',
'GoogleCloudDatacatalogV1beta1ViewSpecArgs',
]
@pulumi.input_type
class BindingArgs:
def __init__(__self__, *,
condition: Optional[pulumi.Input['ExprArgs']] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role: Optional[pulumi.Input[str]] = None):
"""
Associates `members`, or principals, with a `role`.
:param pulumi.Input['ExprArgs'] condition: The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
:param pulumi.Input[str] role: Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
if condition is not None:
pulumi.set(__self__, "condition", condition)
if members is not None:
pulumi.set(__self__, "members", members)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['ExprArgs']]:
"""
The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['ExprArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@pulumi.input_type
class ExprArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
expression: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
:param pulumi.Input[str] description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
:param pulumi.Input[str] expression: Textual representation of an expression in Common Expression Language syntax.
:param pulumi.Input[str] location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
:param pulumi.Input[str] title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if expression is not None:
pulumi.set(__self__, "expression", expression)
if location is not None:
pulumi.set(__self__, "location", location)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def expression(self) -> Optional[pulumi.Input[str]]:
"""
Textual representation of an expression in Common Expression Language syntax.
"""
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpecArgs:
def __init__(__self__):
"""
Spec for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs/partitioned-tables#partitioning_versus_sharding
"""
pass
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1BigQueryTableSpecArgs:
def __init__(__self__, *,
table_spec: Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1TableSpecArgs']] = None,
view_spec: Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1ViewSpecArgs']] = None):
"""
Describes a BigQuery table.
:param pulumi.Input['GoogleCloudDatacatalogV1beta1TableSpecArgs'] table_spec: Spec of a BigQuery table. This field should only be populated if `table_source_type` is `BIGQUERY_TABLE`.
:param pulumi.Input['GoogleCloudDatacatalogV1beta1ViewSpecArgs'] view_spec: Table view specification. This field should only be populated if `table_source_type` is `BIGQUERY_VIEW`.
"""
if table_spec is not None:
pulumi.set(__self__, "table_spec", table_spec)
if view_spec is not None:
pulumi.set(__self__, "view_spec", view_spec)
@property
@pulumi.getter(name="tableSpec")
def table_spec(self) -> Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1TableSpecArgs']]:
"""
Spec of a BigQuery table. This field should only be populated if `table_source_type` is `BIGQUERY_TABLE`.
"""
return pulumi.get(self, "table_spec")
@table_spec.setter
def table_spec(self, value: Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1TableSpecArgs']]):
pulumi.set(self, "table_spec", value)
@property
@pulumi.getter(name="viewSpec")
def view_spec(self) -> Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1ViewSpecArgs']]:
"""
Table view specification. This field should only be populated if `table_source_type` is `BIGQUERY_VIEW`.
"""
return pulumi.get(self, "view_spec")
@view_spec.setter
def view_spec(self, value: Optional[pulumi.Input['GoogleCloudDatacatalogV1beta1ViewSpecArgs']]):
pulumi.set(self, "view_spec", value)
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1ColumnSchemaArgs:
def __init__(__self__, *,
column: pulumi.Input[str],
type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
subcolumns: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]] = None):
"""
Representation of a column within a schema. Columns could be nested inside other columns.
:param pulumi.Input[str] column: Name of the column.
:param pulumi.Input[str] type: Type of the column.
:param pulumi.Input[str] description: Optional. Description of the column. Default value is an empty string.
:param pulumi.Input[str] mode: Optional. A column's mode indicates whether the values in this column are required, nullable, etc. Only `NULLABLE`, `REQUIRED` and `REPEATED` are supported. Default mode is `NULLABLE`.
:param pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]] subcolumns: Optional. Schema of sub-columns. A column can have zero or more sub-columns.
"""
pulumi.set(__self__, "column", column)
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if subcolumns is not None:
pulumi.set(__self__, "subcolumns", subcolumns)
@property
@pulumi.getter
def column(self) -> pulumi.Input[str]:
"""
Name of the column.
"""
return pulumi.get(self, "column")
@column.setter
def column(self, value: pulumi.Input[str]):
pulumi.set(self, "column", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of the column.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Description of the column. Default value is an empty string.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
Optional. A column's mode indicates whether the values in this column are required, nullable, etc. Only `NULLABLE`, `REQUIRED` and `REPEATED` are supported. Default mode is `NULLABLE`.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def subcolumns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]]:
"""
Optional. Schema of sub-columns. A column can have zero or more sub-columns.
"""
return pulumi.get(self, "subcolumns")
@subcolumns.setter
def subcolumns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]]):
pulumi.set(self, "subcolumns", value)
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1GcsFilesetSpecArgs:
def __init__(__self__, *,
file_patterns: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
Describes a Cloud Storage fileset entry.
:param pulumi.Input[Sequence[pulumi.Input[str]]] file_patterns: Patterns to identify a set of files in Google Cloud Storage. See [Cloud Storage documentation](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames) for more information. Note that bucket wildcards are currently not supported. Examples of valid file_patterns: * `gs://bucket_name/dir/*`: matches all files within `bucket_name/dir` directory. * `gs://bucket_name/dir/**`: matches all files in `bucket_name/dir` spanning all subdirectories. * `gs://bucket_name/file*`: matches files prefixed by `file` in `bucket_name` * `gs://bucket_name/??.txt`: matches files with two characters followed by `.txt` in `bucket_name` * `gs://bucket_name/[aeiou].txt`: matches files that contain a single vowel character followed by `.txt` in `bucket_name` * `gs://bucket_name/[a-m].txt`: matches files that contain `a`, `b`, ... or `m` followed by `.txt` in `bucket_name` * `gs://bucket_name/a/*/b`: matches all files in `bucket_name` that match `a/*/b` pattern, such as `a/c/b`, `a/d/b` * `gs://another_bucket/a.txt`: matches `gs://another_bucket/a.txt` You can combine wildcards to provide more powerful matches, for example: * `gs://bucket_name/[a-m]??.j*g`
"""
pulumi.set(__self__, "file_patterns", file_patterns)
@property
@pulumi.getter(name="filePatterns")
def file_patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Patterns to identify a set of files in Google Cloud Storage. See [Cloud Storage documentation](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames) for more information. Note that bucket wildcards are currently not supported. Examples of valid file_patterns: * `gs://bucket_name/dir/*`: matches all files within `bucket_name/dir` directory. * `gs://bucket_name/dir/**`: matches all files in `bucket_name/dir` spanning all subdirectories. * `gs://bucket_name/file*`: matches files prefixed by `file` in `bucket_name` * `gs://bucket_name/??.txt`: matches files with two characters followed by `.txt` in `bucket_name` * `gs://bucket_name/[aeiou].txt`: matches files that contain a single vowel character followed by `.txt` in `bucket_name` * `gs://bucket_name/[a-m].txt`: matches files that contain `a`, `b`, ... or `m` followed by `.txt` in `bucket_name` * `gs://bucket_name/a/*/b`: matches all files in `bucket_name` that match `a/*/b` pattern, such as `a/c/b`, `a/d/b` * `gs://another_bucket/a.txt`: matches `gs://another_bucket/a.txt` You can combine wildcards to provide more powerful matches, for example: * `gs://bucket_name/[a-m]??.j*g`
"""
return pulumi.get(self, "file_patterns")
@file_patterns.setter
def file_patterns(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "file_patterns", value)
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1SchemaArgs:
def __init__(__self__, *,
columns: pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]):
"""
Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema).
:param pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]] columns: Schema of columns. A maximum of 10,000 columns and sub-columns can be specified.
"""
pulumi.set(__self__, "columns", columns)
@property
@pulumi.getter
def columns(self) -> pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]:
"""
Schema of columns. A maximum of 10,000 columns and sub-columns can be specified.
"""
return pulumi.get(self, "columns")
@columns.setter
def columns(self, value: pulumi.Input[Sequence[pulumi.Input['GoogleCloudDatacatalogV1beta1ColumnSchemaArgs']]]):
pulumi.set(self, "columns", value)
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1TableSpecArgs:
def __init__(__self__):
"""
Normal BigQuery table spec.
"""
pass
@pulumi.input_type
class GoogleCloudDatacatalogV1beta1ViewSpecArgs:
def __init__(__self__):
"""
Table view specification.
"""
pass
|
py
|
1a58fb05355d02c56c2d17a6d5814d5ef7aa411a
|
"""Webroot plugin."""
import argparse
import collections
import json
import logging
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from acme import challenges
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot._internal import cli
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge as AnnotatedChallenge
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
from certbot.plugins import util
from certbot.util import safe_open
logger = logging.getLogger(__name__)
_WEB_CONFIG_CONTENT = """\
<?xml version="1.0" encoding="UTF-8" ?>
<!--Generated by Certbot-->
<configuration>
<system.webServer>
<staticContent>
<mimeMap fileExtension="." mimeType="text/plain" />
</staticContent>
</system.webServer>
</configuration>
"""
# This list references the hashes of all versions of the web.config files that Certbot could
# have generated during an HTTP-01 challenge. If you modify _WEB_CONFIG_CONTENT, you MUST add
# the new hash in this list.
_WEB_CONFIG_SHA256SUMS = ["20c5ca1bd58fa8ad5f07a2f1be8b7cbb707c20fcb607a8fc8db9393952846a97"]
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Webroot Authenticator."""
description = "Place files in webroot directory"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-function-docstring
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
add("path", "-w", default=[], action=_WebrootPathAction,
help="public_html / webroot path. This can be specified multiple "
"times to handle different domains; each domain will have "
"the webroot path that preceded it. For instance: `-w "
"/var/www/example -d example.com -d www.example.com -w "
"/var/www/thing -d thing.net -d m.thing.net` (default: Ask)")
add("map", default={}, action=_WebrootMapAction,
help="JSON dictionary mapping domains to webroot paths; this "
"implies -d for each entry. You may need to escape this from "
"your shell. E.g.: --webroot-map "
'\'{"eg1.is,m.eg1.is":"/www/eg1/", "eg2.is":"/www/eg2"}\' '
"This option is merged with, but takes precedence over, -w / "
"-d entries. At present, if you put webroot-map in a config "
"file, it needs to be on a single line, like: webroot-map = "
'{"example.com":"/var/www"}.')
def auth_hint(self, failed_achalls): # pragma: no cover
return ("The Certificate Authority failed to download the temporary challenge files "
"created by Certbot. Ensure that the listed domains serve their content from "
"the provided --webroot-path/-w and that files created there can be downloaded "
"from the internet.")
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.full_roots: Dict[str, str] = {}
self.performed: DefaultDict[str, Set[AnnotatedChallenge]] = collections.defaultdict(set)
# stack of dirs successfully created by this authenticator
self._created_dirs: List[str] = []
def prepare(self): # pylint: disable=missing-function-docstring
pass
def perform(self, achalls): # pylint: disable=missing-function-docstring
self._set_webroots(achalls)
self._create_challenge_dirs()
return [self._perform_single(achall) for achall in achalls]
def _set_webroots(self, achalls):
if self.conf("path"):
webroot_path = self.conf("path")[-1]
logger.info("Using the webroot path %s for all unmatched domains.",
webroot_path)
for achall in achalls:
self.conf("map").setdefault(achall.domain, webroot_path)
else:
known_webroots = list(set(self.conf("map").values()))
for achall in achalls:
if achall.domain not in self.conf("map"):
new_webroot = self._prompt_for_webroot(achall.domain,
known_webroots)
# Put the most recently input
# webroot first for easy selection
try:
known_webroots.remove(new_webroot)
except ValueError:
pass
known_webroots.insert(0, new_webroot)
self.conf("map")[achall.domain] = new_webroot
def _prompt_for_webroot(self, domain, known_webroots):
webroot = None
while webroot is None:
if known_webroots:
# Only show the menu if we have options for it
webroot = self._prompt_with_webroot_list(domain, known_webroots)
if webroot is None:
webroot = self._prompt_for_new_webroot(domain)
else:
# Allow prompt to raise PluginError instead of looping forever
webroot = self._prompt_for_new_webroot(domain, True)
return webroot
def _prompt_with_webroot_list(self, domain, known_webroots):
path_flag = "--" + self.option_name("path")
while True:
code, index = display_util.menu(
"Select the webroot for {0}:".format(domain),
["Enter a new webroot"] + known_webroots,
cli_flag=path_flag, force_interactive=True)
if code == display_util.CANCEL:
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return None if index == 0 else known_webroots[index - 1] # code == display_util.OK
def _prompt_for_new_webroot(self, domain, allowraise=False):
code, webroot = ops.validated_directory(
_validate_webroot,
"Input the webroot for {0}:".format(domain),
force_interactive=True)
if code == display_util.CANCEL:
if not allowraise:
return None
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return _validate_webroot(webroot) # code == display_util.OK
def _create_challenge_dirs(self):
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
self.full_roots[name] = os.path.join(path, os.path.normcase(
challenges.HTTP01.URI_ROOT_PATH))
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = filesystem.umask(0o022)
try:
# We ignore the last prefix in the next iteration,
# as it does not correspond to a folder path ('/' or 'C:')
for prefix in sorted(util.get_prefixes(self.full_roots[name])[:-1], key=len):
if os.path.isdir(prefix):
# Don't try to create directory if it already exists, as some filesystems
# won't reliably raise EEXIST or EISDIR if directory exists.
continue
try:
# Set owner as parent directory if possible, apply mode for Linux/Windows.
# For Linux, this is coupled with the "umask" call above because
# os.mkdir's "mode" parameter may not always work:
# https://docs.python.org/3/library/os.html#os.mkdir
filesystem.mkdir(prefix, 0o755)
self._created_dirs.append(prefix)
try:
filesystem.copy_ownership_and_apply_mode(
path, prefix, 0o755, copy_user=True, copy_group=True)
except (OSError, AttributeError) as exception:
logger.warning("Unable to change owner and uid of webroot directory")
logger.debug("Error was: %s", exception)
except OSError as exception:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}".format(name, exception))
finally:
filesystem.umask(old_umask)
# On Windows, generate a local web.config file that allows IIS to serve expose
# challenge files despite the fact they do not have a file extension.
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(self.full_roots[name], "web.config")
if os.path.exists(web_config_path):
logger.info("A web.config file has not been created in "
"%s because another one already exists.", self.full_roots[name])
continue
logger.info("Creating a web.config file in %s to allow IIS "
"to serve challenge files.", self.full_roots[name])
with safe_open(web_config_path, mode="w", chmod=0o644) as web_config:
web_config.write(_WEB_CONFIG_CONTENT)
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self.full_roots[achall.domain]
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = filesystem.umask(0o022)
try:
with safe_open(validation_path, mode="wb", chmod=0o644) as validation_file:
validation_file.write(validation.encode())
finally:
filesystem.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-function-docstring
for achall in achalls:
root_path = self.full_roots.get(achall.domain, None)
if root_path is not None:
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(root_path, "web.config")
if os.path.exists(web_config_path):
sha256sum = crypto_util.sha256sum(web_config_path)
if sha256sum in _WEB_CONFIG_SHA256SUMS:
logger.info("Cleaning web.config file generated by Certbot in %s.",
root_path)
os.remove(web_config_path)
else:
logger.info("Not cleaning up the web.config file in %s "
"because it is not generated by Certbot.", root_path)
not_removed: List[str] = []
while self._created_dirs:
path = self._created_dirs.pop()
try:
os.rmdir(path)
except OSError as exc:
not_removed.insert(0, path)
logger.info("Challenge directory %s was not empty, didn't remove", path)
logger.debug("Error was: %s", exc)
self._created_dirs = not_removed
logger.debug("All challenges cleaned up")
class _WebrootMapAction(argparse.Action):
"""Action class for parsing webroot_map."""
def __call__(self, parser, namespace, webroot_map, option_string=None):
for domains, webroot_path in json.loads(webroot_map).items():
webroot_path = _validate_webroot(webroot_path)
namespace.webroot_map.update(
(d, webroot_path) for d in cli.add_domains(namespace, domains))
class _WebrootPathAction(argparse.Action):
"""Action class for parsing webroot_path."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._domain_before_webroot = False
def __call__(self, parser, namespace, webroot_path, option_string=None):
if self._domain_before_webroot:
raise errors.PluginError(
"If you specify multiple webroot paths, "
"one of them must precede all domain flags")
if namespace.webroot_path:
# Apply previous webroot to all matched
# domains before setting the new webroot path
prev_webroot = namespace.webroot_path[-1]
for domain in namespace.domains:
namespace.webroot_map.setdefault(domain, prev_webroot)
elif namespace.domains:
self._domain_before_webroot = True
namespace.webroot_path.append(_validate_webroot(webroot_path))
def _validate_webroot(webroot_path):
"""Validates and returns the absolute path of webroot_path.
:param str webroot_path: path to the webroot directory
:returns: absolute path of webroot_path
:rtype: str
"""
if not os.path.isdir(webroot_path):
raise errors.PluginError(webroot_path + " does not exist or is not a directory")
return os.path.abspath(webroot_path)
|
py
|
1a58fb1dda24fcd5cb4bb49896853e34617cc8cf
|
import random
ANSWERS = [
"of course not you idiot",
"sure, why not",
"do i look like an oracle to you?",
"yes, obviously",
"no",
"yes",
"literally kys",
"absolutely haram",
"idk, probably",
"is grass green? is the sky blue? is taiwan numbah wan?"
]
def is_question(msg):
m = msg.lower()
if (m.startswith("can ") or
m.startswith("could ") or
m.startswith("do ") or
m.startswith("does ") or
m.startswith("is ") or
m.startswith("may ") or
m.startswith("shall ") or
m.startswith("should ") or
m.startswith("would ") or
m.startswith("will ")):
return True
return False
def answer():
i = random.randint(1, len(ANSWERS) - 1)
return ANSWERS[i]
|
py
|
1a58fd269af4917316704eee227ac54d197d9076
|
if __name__ == '__main__':
import sys
import os
import distutils.util
build_lib = 'build/lib'
build_lib_ext = os.path.join(
'build', 'lib.%s-%s' % (distutils.util.get_platform(), sys.version[0:3])
)
sys.path.insert(0, build_lib)
sys.path.insert(0, build_lib_ext)
import test_yaml_ext
import test_appliance
test_appliance.run(test_yaml_ext)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.