ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40fed97f9a559839831dfe6855a5a784bffd821 | #
# Copyright (c) 2015-2016 Intel Corporation. All rights reserved
#
from __future__ import with_statement
import logging
from alembic import context
from sqlalchemy import create_engine, pool
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import models
logging.basicConfig(level=logging.INFO)
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config_url = config.get_main_option('sqlalchemy.url')
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
# support for passing database URL on the command line
# usage: alembic -x db_url=postgresql://localhost/orcmdb upgrade head
cmd_line_url = context.get_x_argument(as_dictionary=True).get('db_url')
# support passing database URL as tag in upgrade() function call.
# usage: command.upgrade(Config(alembic_ini), "head", tag=db_url)
tag_url = context.get_tag_argument()
missing_db_url_msg = ("Please set the database connection string in "
"either 'PG_DB_URL' environment variable or specify "
"it in the schema_migration config file under "
"'sqlalchemy.url'.\nConnection string pattern:\n"
"postgresql[+<driver>://[<username>[:<password>]]"
"@<server>[:<port>]/<database>\n\n"
"http://docs.sqlalchemy.org/en/latest/core/"
"engines.html#database-urls")
DB_URL_ENVIRONMENT_VAR = 'PG_DB_URL'
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
if cmd_line_url:
url = cmd_line_url
elif tag_url:
url = tag_url
elif config_url:
url = config_url
else:
url = os.getenv(DB_URL_ENVIRONMENT_VAR)
if url:
context.configure(url=url)
else:
raise RuntimeError(missing_db_url_msg)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
if cmd_line_url:
url = cmd_line_url
elif tag_url:
url = tag_url
elif config_url:
url = config_url
else:
url = os.getenv(DB_URL_ENVIRONMENT_VAR)
if url:
engine = create_engine(url, poolclass=pool.NullPool)
else:
raise RuntimeError(missing_db_url_msg)
connection = engine.connect()
context.configure(connection=connection, target_metadata=target_metadata,
compare_type=True)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
py | b40feebed1fee8b7e5b628327f9ed7cbb529bbea | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class AmtGlobals(Base):
"""Global settings placeholder for AMTPlugin.
The AmtGlobals class encapsulates a list of amtGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the AmtGlobals.find() method.
The list can be managed by using the AmtGlobals.add() and AmtGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'amtGlobals'
_SDM_ATT_MAP = {
'DiscoveryTimeout': 'discoveryTimeout',
'MaxOutstandingSessions': 'maxOutstandingSessions',
'MaxRelayDiscoveryRetransmissionCount': 'maxRelayDiscoveryRetransmissionCount',
'MaxRequestRetransmissionCount': 'maxRequestRetransmissionCount',
'ObjectId': 'objectId',
'RequestTimeout': 'requestTimeout',
'RetransmissionHolddown': 'retransmissionHolddown',
'SetupRate': 'setupRate',
'TeardownRate': 'teardownRate',
}
def __init__(self, parent):
super(AmtGlobals, self).__init__(parent)
@property
def DiscoveryTimeout(self):
"""
Returns
-------
- number: Initial time to wait for a response to a Relay Discovery message.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveryTimeout'])
@DiscoveryTimeout.setter
def DiscoveryTimeout(self, value):
self._set_attribute(self._SDM_ATT_MAP['DiscoveryTimeout'], value)
@property
def MaxOutstandingSessions(self):
"""
Returns
-------
- number: This is the point at which AMT Discovery packets will be restricted. AMT Discovery are sent at the configured speed until these are the number of AMT Discovery in progress, without receiving AMT Advertisment messages; at which point new Discovery messages are sent only when other are completed.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingSessions'])
@MaxOutstandingSessions.setter
def MaxOutstandingSessions(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingSessions'], value)
@property
def MaxRelayDiscoveryRetransmissionCount(self):
"""
Returns
-------
- number: Maximum number of Relay Discovery retransmissions to allow before terminating relay discovery and reporting an error.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxRelayDiscoveryRetransmissionCount'])
@MaxRelayDiscoveryRetransmissionCount.setter
def MaxRelayDiscoveryRetransmissionCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxRelayDiscoveryRetransmissionCount'], value)
@property
def MaxRequestRetransmissionCount(self):
"""
Returns
-------
- number: Maximum number of Request retransmissions to allow before abandoning a relay and restarting relay discovery or reporting an error.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxRequestRetransmissionCount'])
@MaxRequestRetransmissionCount.setter
def MaxRequestRetransmissionCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxRequestRetransmissionCount'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def RequestTimeout(self):
"""
Returns
-------
- number: Initial time to wait for a response to a Request message.
"""
return self._get_attribute(self._SDM_ATT_MAP['RequestTimeout'])
@RequestTimeout.setter
def RequestTimeout(self, value):
self._set_attribute(self._SDM_ATT_MAP['RequestTimeout'], value)
@property
def RetransmissionHolddown(self):
"""
Returns
-------
- number: Number of seconds to wait in hold-down when the maximum number of retries was reached before trying to retransmit message. Applicable for both Relay Discovery and Request messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['RetransmissionHolddown'])
@RetransmissionHolddown.setter
def RetransmissionHolddown(self, value):
self._set_attribute(self._SDM_ATT_MAP['RetransmissionHolddown'], value)
@property
def SetupRate(self):
"""
Returns
-------
- number: Request Rate is the number of AMT Discovery packets to send in each second.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetupRate'])
@SetupRate.setter
def SetupRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['SetupRate'], value)
@property
def TeardownRate(self):
"""
Returns
-------
- number: Teardown rate is the number of clients to stop in each second. This value represents the initial value for teardown rate.
"""
return self._get_attribute(self._SDM_ATT_MAP['TeardownRate'])
@TeardownRate.setter
def TeardownRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeardownRate'], value)
def update(self, DiscoveryTimeout=None, MaxOutstandingSessions=None, MaxRelayDiscoveryRetransmissionCount=None, MaxRequestRetransmissionCount=None, RequestTimeout=None, RetransmissionHolddown=None, SetupRate=None, TeardownRate=None):
"""Updates amtGlobals resource on the server.
Args
----
- DiscoveryTimeout (number): Initial time to wait for a response to a Relay Discovery message.
- MaxOutstandingSessions (number): This is the point at which AMT Discovery packets will be restricted. AMT Discovery are sent at the configured speed until these are the number of AMT Discovery in progress, without receiving AMT Advertisment messages; at which point new Discovery messages are sent only when other are completed.
- MaxRelayDiscoveryRetransmissionCount (number): Maximum number of Relay Discovery retransmissions to allow before terminating relay discovery and reporting an error.
- MaxRequestRetransmissionCount (number): Maximum number of Request retransmissions to allow before abandoning a relay and restarting relay discovery or reporting an error.
- RequestTimeout (number): Initial time to wait for a response to a Request message.
- RetransmissionHolddown (number): Number of seconds to wait in hold-down when the maximum number of retries was reached before trying to retransmit message. Applicable for both Relay Discovery and Request messages.
- SetupRate (number): Request Rate is the number of AMT Discovery packets to send in each second.
- TeardownRate (number): Teardown rate is the number of clients to stop in each second. This value represents the initial value for teardown rate.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, DiscoveryTimeout=None, MaxOutstandingSessions=None, MaxRelayDiscoveryRetransmissionCount=None, MaxRequestRetransmissionCount=None, RequestTimeout=None, RetransmissionHolddown=None, SetupRate=None, TeardownRate=None):
"""Adds a new amtGlobals resource on the server and adds it to the container.
Args
----
- DiscoveryTimeout (number): Initial time to wait for a response to a Relay Discovery message.
- MaxOutstandingSessions (number): This is the point at which AMT Discovery packets will be restricted. AMT Discovery are sent at the configured speed until these are the number of AMT Discovery in progress, without receiving AMT Advertisment messages; at which point new Discovery messages are sent only when other are completed.
- MaxRelayDiscoveryRetransmissionCount (number): Maximum number of Relay Discovery retransmissions to allow before terminating relay discovery and reporting an error.
- MaxRequestRetransmissionCount (number): Maximum number of Request retransmissions to allow before abandoning a relay and restarting relay discovery or reporting an error.
- RequestTimeout (number): Initial time to wait for a response to a Request message.
- RetransmissionHolddown (number): Number of seconds to wait in hold-down when the maximum number of retries was reached before trying to retransmit message. Applicable for both Relay Discovery and Request messages.
- SetupRate (number): Request Rate is the number of AMT Discovery packets to send in each second.
- TeardownRate (number): Teardown rate is the number of clients to stop in each second. This value represents the initial value for teardown rate.
Returns
-------
- self: This instance with all currently retrieved amtGlobals resources using find and the newly added amtGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained amtGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, DiscoveryTimeout=None, MaxOutstandingSessions=None, MaxRelayDiscoveryRetransmissionCount=None, MaxRequestRetransmissionCount=None, ObjectId=None, RequestTimeout=None, RetransmissionHolddown=None, SetupRate=None, TeardownRate=None):
"""Finds and retrieves amtGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve amtGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all amtGlobals resources from the server.
Args
----
- DiscoveryTimeout (number): Initial time to wait for a response to a Relay Discovery message.
- MaxOutstandingSessions (number): This is the point at which AMT Discovery packets will be restricted. AMT Discovery are sent at the configured speed until these are the number of AMT Discovery in progress, without receiving AMT Advertisment messages; at which point new Discovery messages are sent only when other are completed.
- MaxRelayDiscoveryRetransmissionCount (number): Maximum number of Relay Discovery retransmissions to allow before terminating relay discovery and reporting an error.
- MaxRequestRetransmissionCount (number): Maximum number of Request retransmissions to allow before abandoning a relay and restarting relay discovery or reporting an error.
- ObjectId (str): Unique identifier for this object
- RequestTimeout (number): Initial time to wait for a response to a Request message.
- RetransmissionHolddown (number): Number of seconds to wait in hold-down when the maximum number of retries was reached before trying to retransmit message. Applicable for both Relay Discovery and Request messages.
- SetupRate (number): Request Rate is the number of AMT Discovery packets to send in each second.
- TeardownRate (number): Teardown rate is the number of clients to stop in each second. This value represents the initial value for teardown rate.
Returns
-------
- self: This instance with matching amtGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of amtGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the amtGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
py | b40feec0789e9a0eed1bd64fc71495dea332a9f2 | #!/usr/bin/env python2
'''
Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
from time import time
import argparse
import yaml
import os
import subprocess
import json
import errno
import fcntl
import tempfile
import copy
from string import Template
import shutil
CONFIG_FILE_NAME = 'multi_inventory.yaml'
DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
class MultiInventoryException(Exception):
'''Exceptions for MultiInventory class'''
pass
class MultiInventory(object):
'''
MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
def __init__(self, args=None):
# Allow args to be passed when called as a library
if not args:
self.args = {}
else:
self.args = args
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
# Prefer a file in the same directory, fall back to a file in etc
if os.path.isfile(same_dir_config_file):
self.config_file = same_dir_config_file
elif os.path.isfile(etc_dir_config_file):
self.config_file = etc_dir_config_file
else:
self.config_file = None # expect env vars
def run(self):
'''This method checks to see if the local
cache is valid for the inventory.
if the cache is valid; return cache
else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
# Build a default config
self.config = {}
self.config['accounts'] = [
{
'name': 'default',
'cache_location': DEFAULT_CACHE_PATH,
'provider': 'aws/hosts/ec2.py',
'env_vars': {
'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
}
},
]
self.config['cache_max_age'] = 300
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
if self.config.has_key('cache_location'):
self.cache_path = self.config['cache_location']
if self.args.get('refresh_cache', None):
self.get_inventory()
self.write_to_cache()
# if its a host query, fetch and do not cache
elif self.args.get('host', None):
self.get_inventory()
elif not self.is_cache_valid():
# go fetch the inventories and cache them if cache is expired
self.get_inventory()
self.write_to_cache()
else:
# get data from disk
self.get_inventory_from_cache()
def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the
respective cloud for inventory.
"""
config = None
if not conf_file:
conf_file = self.config_file
with open(conf_file) as conf:
config = yaml.safe_load(conf)
# Provide a check for unique account names
if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
raise MultiInventoryException('Duplicate account names in config file')
return config
def get_provider_tags(self, provider, env=None):
"""Call <provider> and query all of the tags that are usuable
by ansible. If environment is empty use the default env.
"""
if not env:
env = os.environ
# Allow relatively path'd providers in config file
if os.path.isfile(os.path.join(self.file_path, provider)):
provider = os.path.join(self.file_path, provider)
# check to see if provider exists
if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
raise RuntimeError("Problem with the provider. Please check path " \
"and that it is executable. (%s)" % provider)
cmds = [provider]
if self.args.get('host', None):
cmds.append("--host")
cmds.append(self.args.get('host', None))
else:
cmds.append('--list')
if 'aws' in provider.lower():
cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
def generate_config(provider_files):
"""Generate the provider_files in a temporary directory.
"""
prefix = 'multi_inventory.'
tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
for provider_file in provider_files:
filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
filedes.write(content)
filedes.close()
return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
and call self.get_provider_tags.
'''
try:
all_results = []
tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
tmp_dir = None
if account.has_key('provider_files'):
tmp_dir = MultiInventory.generate_config(account['provider_files'])
tmp_dir_paths.append(tmp_dir)
# Update env vars after creating provider_config_files
# so that we can grab the tmp_dir if it exists
env = account.get('env_vars', {})
if env and tmp_dir:
for key, value in env.items():
env[key] = Template(value).substitute(tmpdir=tmp_dir)
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
# for each process collect stdout when its available
for name, process in processes.items():
out, err = process.communicate()
all_results.append({
"name": name,
"out": out.strip(),
"err": err.strip(),
"code": process.returncode
})
finally:
# Clean up the mkdtemp dirs
for tmp_dir in tmp_dir_paths:
shutil.rmtree(tmp_dir)
return all_results
def get_inventory(self):
"""Create the subprocess to fetch tags from a provider.
Host query:
Query to return a specific host. If > 1 queries have
results then fail.
List query:
Query all of the different accounts for their tags. Once completed
store all of their results into one merged updated hash.
"""
provider_results = self.run_provider()
# process --host results
# For any 0 result, return it
if self.args.get('host', None):
count = 0
for results in provider_results:
if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
self.result = json.loads(results['out'])
count += 1
if count > 1:
raise RuntimeError("Found > 1 results for --host %s. \
This is an invalid state." % self.args.get('host', None))
# process --list results
else:
# For any non-zero, raise an error on it
for result in provider_results:
if result['code'] != 0:
err_msg = ['\nProblem fetching account: {name}',
'Error Code: {code}',
'StdErr: {err}',
'Stdout: {out}',
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
for acc_config in self.config['accounts']:
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
MultiInventory.merge_destructively(self.result, result)
def add_entry(self, data, keys, item):
''' Add an item to a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
item = c
'''
if "." in keys:
key, rest = keys.split(".", 1)
if key not in data:
data[key] = {}
self.add_entry(data[key], rest, item)
else:
data[keys] = item
def get_entry(self, data, keys):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
return c
'''
if keys and "." in keys:
key, rest = keys.split(".", 1)
return self.get_entry(data[key], rest)
else:
return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
results = self.all_inventory_results[acc_config['name']]
results['all_hosts'] = results['_meta']['hostvars'].keys()
# Extra vars go here
for new_var, value in acc_config.get('extra_vars', {}).items():
for data in results['_meta']['hostvars'].values():
self.add_entry(data, new_var, value)
# Clone vars go here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
for data in results['_meta']['hostvars'].values():
self.add_entry(data, to_name, self.get_entry(data, from_name))
# Extra groups go here
for new_var, value in acc_config.get('extra_groups', {}).items():
for data in results['_meta']['hostvars'].values():
results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
# Clone groups go here
# Build a group based on the desired key name
for to_name, from_name in acc_config.get('clone_groups', {}).items():
for name, data in results['_meta']['hostvars'].items():
key = '%s_%s' % (to_name, self.get_entry(data, from_name))
if not results.has_key(key):
results[key] = []
results[key].append(name)
# store the results back into all_inventory_results
self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
"merges b into input_a"
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
for result in input_b[key]:
if result not in input_a[key]:
input_a[key].append(result)
# a is a list and not b
elif isinstance(input_a[key], list):
if input_b[key] not in input_a[key]:
input_a[key].append(input_b[key])
elif isinstance(input_b[key], list):
input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
input_a[key] = [input_a[key], input_b[key]]
else:
input_a[key] = input_b[key]
return input_a
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path):
mod_time = os.path.getmtime(self.cache_path)
current_time = time()
if (mod_time + self.config['cache_max_age']) > current_time:
return True
return False
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on a provider')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store', default=False,
help='Get all the variables about a specific instance')
self.args = parser.parse_args().__dict__
def write_to_cache(self):
''' Writes data in JSON format to a file '''
# if it does not exist, try and create it.
if not os.path.isfile(self.cache_path):
path = os.path.dirname(self.cache_path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
cache.write(json_data)
finally:
fcntl.flock(cache, fcntl.LOCK_UN)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
if not os.path.isfile(self.cache_path):
return None
with open(self.cache_path, 'r') as cache:
self.result = json.loads(cache.read())
return True
@classmethod
def json_format_dict(cls, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def result_str(self):
'''Return cache string stored in self.result'''
return self.json_format_dict(self.result, True)
if __name__ == "__main__":
MI2 = MultiInventory()
MI2.parse_cli_args()
MI2.run()
print MI2.result_str()
|
py | b40feef4f35e89733916c66515c915886066b009 | # Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import logging
from guild import cli
from guild import resolver as resolverlib
from guild import resourcedef
from guild import util
log = logging.getLogger("guild")
RES_TERM = r"[a-zA-Z0-9_\-\.]+"
MODEL_RES_P = re.compile(r"(%s)$" % RES_TERM)
GUILDFILE_RES_P = re.compile(r"(%s):(%s)$" % (RES_TERM, RES_TERM))
PACKAGE_RES_P = re.compile(r"(%s)/(%s)$" % (RES_TERM, RES_TERM))
DEFAULT_TARGET_TYPE = "copy"
###################################################################
# Exception classes
###################################################################
class OpDependencyError(Exception):
pass
###################################################################
# State
###################################################################
class OpDependency:
def __init__(self, resdef, res_location, config):
assert res_location
self.resdef = resdef
self.res_location = res_location
self.config = config
###################################################################
# Deps for opdef
###################################################################
def deps_for_opdef(opdef, flag_vals):
return [dep_for_depdef(depdef, flag_vals) for depdef in opdef.dependencies]
def dep_for_depdef(depdef, flag_vals):
resdef, res_location = resource_def(depdef, flag_vals)
config = _resdef_config(resdef, flag_vals)
return OpDependency(resdef, res_location, config)
def _resdef_config(resdef, flag_vals):
for name in [resdef.fullname, (resdef.flag_name or resdef.name)]:
try:
return flag_vals[name]
except KeyError:
pass
return None
def resource_def(depdef, flag_vals):
resdef, res_location = _resdef_for_dep(depdef, flag_vals)
_resolve_source_refs(resdef, flag_vals)
return resdef, res_location
def _resdef_for_dep(depdef, flag_vals):
if depdef.inline_resource:
return depdef.inline_resource, depdef.opdef.guildfile.dir
res_spec = util.resolve_refs(depdef.spec, flag_vals)
return util.find_apply(
[
_model_resource,
_guildfile_resource,
_package_resource,
_invalid_dependency_error,
],
res_spec,
depdef,
)
def _model_resource(spec, depdef):
m = MODEL_RES_P.match(spec)
if m is None:
return None
res_name = m.group(1)
return _modeldef_resource(depdef.modeldef, res_name, depdef.opdef)
def _modeldef_resource(modeldef, res_name, opdef):
resdef = modeldef.get_resource(res_name)
if resdef is None:
raise OpDependencyError(
"resource '%s' required by operation '%s' is not defined"
% (res_name, opdef.fullname)
)
return resdef, modeldef.guildfile.dir
def _guildfile_resource(spec, depdef):
m = GUILDFILE_RES_P.match(spec)
if m is None:
return None
model_name = m.group(1)
modeldef = depdef.opdef.guildfile.models.get(model_name)
if modeldef is None:
raise OpDependencyError(
"model '%s' in resource '%s' required by operation "
"'%s' is not defined" % (model_name, spec, depdef.opdef.fullname)
)
res_name = m.group(2)
return _modeldef_resource(modeldef, res_name, depdef.opdef)
def _package_resource(spec, depdef):
m = PACKAGE_RES_P.match(spec)
if m is None:
return None
pkg_name = m.group(1)
res_name = m.group(2)
res = _find_package_resource(pkg_name, res_name)
if not res:
raise OpDependencyError(
"resource '%s' required by operation '%s' is not installed"
% (spec, depdef.opdef.fullname)
)
return res.resdef, _package_res_location(res)
def _find_package_resource(pkg_name, res_name):
from guild import namespace # expensive
from guild import resource as reslib # expensive
try:
resources = list(reslib.for_name(res_name))
except LookupError:
return None
else:
for res in resources:
if namespace.apply_namespace(res.dist.project_name) == pkg_name:
return res
return None
def _package_res_location(res):
return os.path.join(res.dist.location, res.dist.key.replace(".", os.path.sep))
def _invalid_dependency_error(spec, depdef):
raise OpDependencyError(
"invalid dependency '%s' in operation '%s'" % (spec, depdef.opdef.fullname)
)
def _resolve_source_refs(resdef, flag_vals):
for source in resdef.sources:
source.uri = _resolve_dep_attr_refs(source.uri, flag_vals, resdef)
source.rename = _resolve_rename_spec_refs(source.rename, flag_vals, resdef)
def _resolve_dep_attr_refs(attr_val, flag_vals, resdef):
try:
return util.resolve_refs(attr_val, flag_vals)
except util.UndefinedReferenceError as e:
raise OpDependencyError(
"invalid flag reference '%s' in dependency '%s'"
% (resdef.name, e.reference)
)
def _resolve_rename_spec_refs(specs, flag_vals, resdef):
if not specs:
return specs
return [
resourcedef.RenameSpec(
_resolve_dep_attr_refs(spec.pattern, flag_vals, resdef),
_resolve_dep_attr_refs(spec.repl, flag_vals, resdef),
)
for spec in specs
]
###################################################################
# Dep constructors
###################################################################
def dep_for_path(path, resource_name=None):
res_data = {
"sources": [{"file": path}],
"target-type": "link",
}
resource_name = resource_name or "file:%s" % path
resdef = resourcedef.ResourceDef(resource_name, res_data)
return OpDependency(resdef, res_location=".", config=None)
###################################################################
# Resolve support
###################################################################
def ResolveContext(run):
"""Interface between op resolution and resolve context.
We maintain this interface keep op_dep and its implementation
separate.
"""
return resolverlib.ResolveContext(run=run, unpack_dir=None)
def resolve_source(source, dep, resolve_context, resolve_cb=None):
last_resolution_error = None
for location in _dep_resource_locations(dep):
try:
source_paths = _resolve_source_for_location(
source, dep, location, resolve_context
)
except resolverlib.ResolutionError as e:
last_resolution_error = e
except Exception as e:
_unknown_source_resolution_error(source, dep, e)
else:
for path in source_paths:
resolved = _resolve_source_for_path(
path,
location,
source,
resolve_context.run.dir,
)
_handle_resolved_source(resolved, resolve_cb)
return source_paths
assert last_resolution_error
_source_resolution_error(source, dep, last_resolution_error)
def _handle_resolved_source(resolved, resolve_cb):
if resolved and resolve_cb:
resolve_cb(resolved)
def _dep_resource_locations(dep):
yield dep.res_location
if hasattr(dep.resdef, "modeldef") and dep.resdef.modeldef:
for parent in dep.resdef.modeldef.parents:
yield parent.dir
def _resolve_source_for_location(source, dep, location, resolve_context):
res_proxy = ResourceProxy(location, dep.config)
resolver = resolverlib.for_resdef_source(source, res_proxy)
if not resolver:
raise OpDependencyError(
"unsupported source '%s' in %s resource" % (source, dep.resdef.name)
)
return resolver.resolve(resolve_context)
def resolver_for_source(source, dep):
res_proxy = ResourceProxy(dep.res_location, dep.config)
return resolverlib.for_resdef_source(source, res_proxy)
class ResourceProxy:
"""Proxy for guild.deps.Resource, used by resolver API.
The APIs in guild.deps and guild.resolver are to be replaced by
this module and a new resolver interface. Until the new resolver
interface is created, we use a proxy resource to work with the
current interface.
"""
def __init__(self, location, config):
assert location
self.location = location
self.config = config
def _source_resolution_error(source, dep, e):
msg = "could not resolve '%s' in %s resource: %s" % (source, dep.resdef.name, e)
if source.help:
msg += "\n%s" % cli.style(source.help, fg="yellow")
raise OpDependencyError(msg)
def _unknown_source_resolution_error(source, dep, e):
log.exception(
"resolving required source '%s' in %s resource", source, dep.resdef.name
)
raise OpDependencyError(
"unexpected error resolving '%s' in %s resource: %r"
% (source, dep.resdef.name, e)
)
class ResolvedSource:
def __init__(
self,
source,
target_path,
target_root,
source_path,
source_origin,
):
self.source = source
self.target_path = target_path
self.target_root = target_root
self.source_path = source_path
self.source_origin = source_origin
def _resolve_source_for_path(source_path, source_origin, source, target_dir):
target_type = _target_type_for_source(source)
target_path = _target_path_for_source(
source_path, source_origin, source, target_dir
)
if util.compare_paths(source_path, target_path):
# Source was resolved directly to run dir - nothing to do.
return None
if target_type == "link":
_link_to_source(source_path, target_path, source.replace_existing)
elif target_type == "copy":
_copy_source(source_path, target_path, source.replace_existing)
else:
assert False, (target_type, source, source.resdef)
return ResolvedSource(
source,
target_path,
target_dir,
source_path,
source_origin,
)
def _target_type_for_source(source):
if source.target_type:
return _validate_target_type(source.target_type, "source %s" % source.name)
if source.resdef.target_type:
return _validate_target_type(
source.resdef.target_type, "resource %s" % source.resdef.name
)
return DEFAULT_TARGET_TYPE
def _validate_target_type(val, desc):
if val in ("link", "copy"):
return val
raise OpDependencyError(
"unsupported target-type '%s' in %s (expected 'link' or 'copy')" % (val, desc)
)
def _target_path_for_source(source_path, source_origin, source, target_dir):
"""Returns target path for source.
If target path is defined for the source, it redefines any value
defined for the resource parent.
"""
target_path = _source_target_path(source, source_path, source_origin)
if os.path.isabs(target_path):
raise OpDependencyError(
"invalid path '%s' in %s resource (path must be relative)"
% (target_path, source.resdef.name)
)
basename = os.path.basename(source_path)
if source.rename:
basename = _rename_source(basename, source.rename)
return os.path.join(target_dir, target_path, basename)
def _source_target_path(source, source_path, source_origin):
target_path_attr = source.target_path or source.resdef.target_path
if source.preserve_path:
if target_path_attr:
log.warning(
"target-path '%s' specified with preserve-path - ignoring",
target_path_attr,
)
return os.path.relpath(os.path.dirname(source_path), source_origin)
else:
return target_path_attr or source.resdef.target_path or ""
def _link_to_source(source_path, link, replace_existing=False):
assert os.path.isabs(link), link
source_path = util.strip_trailing_sep(source_path)
if os.path.lexists(link) or os.path.exists(link):
if not replace_existing:
log.warning("%s already exists, skipping link", link)
return
log.debug("deleting existing source link %s", link)
util.safe_rmtree(link)
util.ensure_dir(os.path.dirname(link))
log.debug("resolving source %s as link %s", source_path, link)
source_rel_path = _source_rel_path(source_path, link)
try:
util.symlink(source_rel_path, link)
except OSError as e:
_handle_source_link_error(e)
def _source_rel_path(source, link):
source_dir, source_name = os.path.split(source)
real_link = util.realpath(link)
link_dir = os.path.dirname(real_link)
source_rel_dir = os.path.relpath(source_dir, link_dir)
return os.path.join(source_rel_dir, source_name)
def _handle_source_link_error(e):
raise OpDependencyError("unable to link to dependency source: %s" % e)
def _rename_source(name, rename):
for spec in rename:
try:
renamed = re.sub(spec.pattern, spec.repl, name, count=1)
except Exception as e:
raise OpDependencyError(
"error renaming source %s (%r %r): %s"
% (name, spec.pattern, spec.repl, e)
)
else:
if renamed != name:
return renamed
return name
def _copy_source(source_path, dest_path, replace_existing=False):
assert os.path.isabs(dest_path), dest_path
if os.path.lexists(dest_path) or os.path.exists(dest_path):
if not replace_existing:
log.warning("%s already exists, skipping copy", dest_path)
return
log.debug("deleting existing source dest %s", dest_path)
util.safe_rmtree(dest_path)
util.ensure_dir(os.path.dirname(dest_path))
log.debug("resolving source %s as copy %s", source_path, dest_path)
if os.path.isdir(source_path):
util.copytree(source_path, dest_path)
else:
util.copyfile(source_path, dest_path)
###################################################################
# Op run resolve support
###################################################################
def resolved_op_runs_for_opdef(opdef, flag_vals, resolver_factory=None):
"""Returns a list of run, dep tuples for resolved run deps for opdef."""
try:
deps = deps_for_opdef(opdef, flag_vals)
except OpDependencyError as e:
log.debug("error resolving runs for opdef: %s", e)
return []
else:
return list(_iter_resolved_op_runs(deps, flag_vals, resolver_factory))
def _iter_resolved_op_runs(deps, flag_vals, resolver_factory=None):
resolver_factory = resolver_factory or resolver_for_source
for dep in deps:
for source in dep.resdef.sources:
if not is_operation_source(source):
continue
resolver = resolver_factory(source, dep)
assert isinstance(resolver, resolverlib.OperationResolver), resolver
for run_id_prefix in _iter_flag_val_items(flag_vals.get(dep.resdef.name)):
try:
run = resolver.resolve_op_run(run_id_prefix, include_staged=True)
except resolverlib.ResolutionError:
log.warning(
"cannot find a suitable run for required resource '%s'",
dep.resdef.name,
)
else:
yield run, dep
def is_operation_source(source):
cls = resolverlib.resolver_class_for_source(source)
return cls is not None and issubclass(cls, resolverlib.OperationResolver)
def _iter_flag_val_items(val):
if isinstance(val, list):
for item in val:
yield item
else:
yield val
|
py | b40ff033be0c8fae158e6524e8741520a3d5864a | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
from contextlib import contextmanager
from typing import ContextManager, Callable
import sqlalchemy as sa
from sqlalchemy.engine import Engine, create_engine
from sqlalchemy.ext.declarative.api import DeclarativeMeta, declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from flask_sqlalchemy import SQLAlchemy
from envs import Envs
BASE_DIR = Envs.BASE_DIR
# Explicitly set autocommit and autoflush
# Disables autocommit to make developers to commit manually
# Enables autoflush to make changes visible in the same session
# Disable expire_on_commit to make it possible that object can detach
SESSION_OPTIONS = {
'autocommit': False,
'autoflush': True,
'expire_on_commit': False
}
ENGINE_OPTIONS = {}
def default_table_args(comment: str) -> dict:
return {
'comment': comment,
'mysql_engine': 'innodb',
'mysql_charset': 'utf8mb4',
}
def _turn_db_timezone_to_utc(original_uri: str) -> str:
""" string operator that make any db into utc timezone
Args:
original_uri (str): original uri without set timezone
Returns:
str: uri with explicittly set utc timezone
"""
# Do set use `init_command` for sqlite, since it doesn't support yet
if original_uri.startswith('sqlite'):
return original_uri
_set_timezone_args = 'init_command=SET SESSION time_zone=\'%2B00:00\''
parsed_uri = original_uri.split('?')
if len(parsed_uri) == 1:
return f'{parsed_uri[0]}?{_set_timezone_args}'
assert len(parsed_uri) == 2, \
f'failed to parse uri [{original_uri}], since it has more than one ?'
base_uri, args = parsed_uri
args = args.split('&&')
# remove if there's init_command already
args_list = [_set_timezone_args]
for a in args:
if a.startswith('init_command'):
command = a.split('=')[1]
# ignore other set time_zone args
if command.startswith('SET SESSION time_zone'):
continue
args_list[0] = f'{args_list[0]};{command}'
else:
args_list.append(a)
args = '&&'.join(args_list)
return f'{base_uri}?{args}'
def get_database_uri() -> str:
"""Get database uri for whole system.
- Get database uri:
- environmental variables
- default uri that uses sqlite at local file system
- Process with database uri
- add a fixed time_zone utc
- ...
Returns:
str: database uri with utc timezone
"""
uri = ''
if 'SQLALCHEMY_DATABASE_URI' in os.environ:
uri = os.getenv('SQLALCHEMY_DATABASE_URI')
else:
uri = 'sqlite:///{}?check_same_thread=False'.format(
os.path.join(BASE_DIR, 'app.db'))
return _turn_db_timezone_to_utc(uri)
def get_engine(database_uri: str) -> Engine:
"""get engine according to database uri
Args:
database_uri (str): database uri used for create engine
Returns:
Engine: engine used for managing connections
"""
return create_engine(database_uri, **ENGINE_OPTIONS)
@contextmanager
def get_session(db_engine: Engine) -> ContextManager[Session]:
"""Get session from database engine.
Example:
with get_session(db_engine) as session:
# write your query, do not need to handle database connection
session.query(MODEL).filter_by(field=value).first()
"""
try:
session: Session = sessionmaker(bind=db_engine, **SESSION_OPTIONS)()
except Exception:
raise Exception('unknown db engine')
try:
yield session
except Exception:
session.rollback()
raise
finally:
session.close()
def make_session_context() -> Callable[[], ContextManager[Session]]:
"""A functional closure that will store engine
Call it n times if you want to n connection pools
Returns:
Callable[[], Callable[[], ContextManager[Session]]]
a function that return a contextmanager
Examples:
# First initialize a connection pool,
# when you want to a new connetion pool
session_context = make_session_context()
...
# You use it multiple times as follows.
with session_context() as session:
session.query(SomeMapperClass).filter_by(id=1).one()
"""
engine = None
def wrapper_get_session():
nonlocal engine
if engine is None:
engine = get_engine(get_database_uri())
return get_session(engine)
return wrapper_get_session
class DBHandler(object):
def __init__(self) -> None:
super().__init__()
self.engine: Engine = get_engine(get_database_uri())
self.Model: DeclarativeMeta = declarative_base(bind=self.engine)
for module in sa, sa.orm:
for key in module.__all__:
if not hasattr(self, key):
setattr(self, key, getattr(module, key))
def session_scope(self) -> ContextManager[Session]:
return get_session(self.engine)
@property
def metadata(self) -> DeclarativeMeta:
return self.Model.metadata
def rebind(self, database_uri: str):
self.engine = get_engine(database_uri)
self.Model = declarative_base(bind=self.engine, metadata=self.metadata)
def create_all(self):
return self.metadata.create_all()
def drop_all(self):
return self.metadata.drop_all()
# now db_handler and db are alive at the same time
# db will be replaced by db_handler in the near future
db_handler = DBHandler()
db = SQLAlchemy(session_options=SESSION_OPTIONS,
engine_options=ENGINE_OPTIONS,
metadata=db_handler.metadata)
|
py | b40ff266d5850da208030b2724938d731cff42df | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Minimal Reference implementation for the Frechet Video Distance (FVD).
FVD is a metric for the quality of video generation models. It is inspired by
the FID (Frechet Inception Distance) used for images, but uses a different
embedding to be better suitable for videos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
def preprocess(videos, target_resolution):
"""Runs some preprocessing on the videos for I3D model.
Args:
videos: <T>[batch_size, num_frames, height, width, depth] The videos to be
preprocessed. We don't care about the specific dtype of the videos, it can
be anything that tf.image.resize_bilinear accepts. Values are expected to
be in the range 0-255.
target_resolution: (width, height): target video resolution
Returns:
videos: <float32>[batch_size, num_frames, height, width, depth]
"""
videos_shape = videos.shape.as_list()
all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution)
target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
output_videos = tf.reshape(resized_videos, target_shape)
scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1
return scaled_videos
def _is_in_graph(tensor_name):
"""Checks whether a given tensor does exists in the graph."""
try:
tf.get_default_graph().get_tensor_by_name(tensor_name)
except KeyError:
return False
return True
def create_id3_embedding(videos):
"""Embeds the given videos using the Inflated 3D Convolution network.
Downloads the graph of the I3D from tf.hub and adds it to the graph on the
first call.
Args:
videos: <float32>[batch_size, num_frames, height=224, width=224, depth=3].
Expected range is [-1, 1].
Returns:
embedding: <float32>[batch_size, embedding_size]. embedding_size depends
on the model used.
Raises:
ValueError: when a provided embedding_layer is not supported.
"""
batch_size = 16
module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
# Making sure that we import the graph separately for
# each different input video tensor.
module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(
videos.name).replace(":", "_")
assert_ops = [
tf.Assert(
tf.reduce_max(videos) <= 1.001,
["max value in frame is > 1", videos]),
tf.Assert(
tf.reduce_min(videos) >= -1.001,
["min value in frame is < -1", videos]),
tf.assert_equal(
tf.shape(videos)[0],
batch_size, ["invalid frame batch size: ",
tf.shape(videos)],
summarize=6),
]
with tf.control_dependencies(assert_ops):
videos = tf.identity(videos)
module_scope = "%s_apply_default/" % module_name
# To check whether the module has already been loaded into the graph, we look
# for a given tensor name. If this tensor name exists, we assume the function
# has been called before and the graph was imported. Otherwise we import it.
# Note: in theory, the tensor could exist, but have wrong shapes.
# This will happen if create_id3_embedding is called with a frames_placehoder
# of wrong size/batch size, because even though that will throw a tf.Assert
# on graph-execution time, it will insert the tensor (with wrong shape) into
# the graph. This is why we need the following assert.
video_batch_size = int(videos.shape[0])
assert video_batch_size in [batch_size, -1, None], "Invalid batch size"
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
if not _is_in_graph(tensor_name):
i3d_model = hub.Module(module_spec, name=module_name)
i3d_model(videos)
# gets the kinetics-i3d-400-logits layer
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
def calculate_fvd(real_activations,
generated_activations):
"""Returns a list of ops that compute metrics as funcs of activations.
Args:
real_activations: <float32>[num_samples, embedding_size]
generated_activations: <float32>[num_samples, embedding_size]
Returns:
A scalar that contains the requested FVD.
"""
return tfgan.eval.frechet_classifier_distance_from_activations(
real_activations, generated_activations)
|
py | b40ff28c30e97704c805199f8d5ecf7e330fdb17 | #Bert-GPT2 train all
from __future__ import absolute_import, division, print_function
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
import random
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from transformers import (AdamW, WEIGHTS_NAME, CONFIG_NAME)
from utils import get_dataset_key, make_logdir
from kogpt2.pytorch_kogpt2 import get_pytorch_kogpt2_model
from gluonnlp.data import SentencepieceTokenizer
from kogpt2.utils import get_tokenizer
from pytorch_pretrained_bert.modeling import BertModel
from pytorch_pretrained_bert.tokenization2 import BertTokenizer
from kogpt2.model.Seq2Seq import Seq2Seq
TOKENS = ["<bos>", "<eos>", "<cls>", "<sep>", "<pad>"]
MODEL_INPUTS = ["source_ids", "target_ids", "lm_labels", "key_scores"]
PADDED_INPUTS = ["source_ids", "target_ids", "lm_labels", "key_scores"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, args):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if args.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
max_s = max(len(x) for x in dataset["source_ids"])
max_t = max(len(x) for x in dataset["target_ids"])
max_l = max(max_s, max_t)
for name in PADDED_INPUTS:
if name == "source_ids":
dataset[name] = [x + [0] * (max_l - len(x)) for x in dataset[name]]
elif name == "key_scores":
dataset[name] = [x + [padding] * (max_l - len(x)) for x in dataset[name]]
else:
dataset[name] = [x + [padding if name != "lm_labels" else -100] * (max_t - len(x)) for x in dataset[name]]
return dataset
def build_input_from_segments(source, target, score, bert_tokenizer, gpt_vocab, lm_labels=False, with_eos=True):
bos, eos, = gpt_vocab[gpt_vocab.bos_token], gpt_vocab[gpt_vocab.eos_token]
instance = {}
instance["source_ids"] = bert_tokenizer.convert_tokens_to_ids(["[CLS]"] + source + ["[SEP]"])
instance["target_ids"] = [bos] + target + ([eos] if with_eos else [])
instance["lm_labels"] = [-100] * len(instance["target_ids"])
if lm_labels:
instance["lm_labels"] = [bos] + target + [eos]
instance["key_scores"] = score
return instance
def get_data_loaders(args, bert_tokenizer, gpt_tokenizer, gpt_vocab):
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
sourceList_train, targetList_train, attentionList_train, sourceList_valid, targetList_valid, attentionList_valid = get_dataset_key(bert_tokenizer, gpt_tokenizer, gpt_vocab, args.dataset_path)
for line in zip(sourceList_train, targetList_train, attentionList_train):
instance = build_input_from_segments(line[0], line[1], line[2], bert_tokenizer, gpt_vocab, True)
for input_name, input_array in instance.items():
datasets["train"][input_name].append(input_array)
for line in zip(sourceList_valid, targetList_valid, attentionList_valid):
instance = build_input_from_segments(line[0], line[1], line[2], bert_tokenizer, gpt_vocab, True)
for input_name, input_array in instance.items():
datasets["valid"][input_name].append(input_array)
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=gpt_vocab[gpt_vocab.padding_token])
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, default="", help="Path or url of the dataset.")
parser.add_argument("--use_adapter", default=False, action='store_true', help="Use adapter or not")
parser.add_argument("--keyword_module", type=str, default="", help="add, attention, ")
parser.add_argument("--model_checkpoint", type=str, default="bertGpt", help="Path, url or short name of the model")
parser.add_argument("--train_batch_size", type=int, default=4, help="Batch size for training")
parser.add_argument("--valid_batch_size", type=int, default=4, help="Batch size for validation")
parser.add_argument("--gradient_accumulation_steps", type=int, default=8, help="Accumulate gradients on several steps")
parser.add_argument("--lr", type=float, default=6.25e-5, help="Learning rate")
parser.add_argument("--max_norm", type=float, default=1.0, help="Clipping gradient norm")
parser.add_argument("--n_epochs", type=int, default=3, help="Number of training epochs")
parser.add_argument("--eval_before_start", action='store_true', help="If true start with a first evaluation before training")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--fp16", type=str, default="", help="Set to O0, O1, O2 or O3 for fp16 training (see apex documentation)")
parser.add_argument("--local_rank", type=int, default=-1, help="Local rank for distributed training (-1: not distributed)")
parser.add_argument("--bert_model_path", default="./", type=str, help="Bert pre-trained model path")
parser.add_argument("--vocab_file", default="./vocab.korean.rawtext.list", type=str, help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
args = parser.parse_args()
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d",
args.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(args))
# Initialize distributed training if needed
args.distributed = (args.local_rank != -1)
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer.")
#tokenizer_class = GPT2Tokenizer if "gpt2" in args.model_checkpoint else OpenAIGPTTokenizer # cant use Autotokenizer because checkpoint could be a Path
#tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Load KoBERT model and tokenizer
bert_tokenizer = BertTokenizer.from_pretrained(args.vocab_file, do_lower_case=args.do_lower_case)
bert_model = BertModel.from_pretrained(args.bert_model_path)
bert_model.to(args.device)
# Load KoGPT2 model and tokenizer
tok_path = get_tokenizer()
gpt_model, gpt_vocab = get_pytorch_kogpt2_model(keyword_module=args.keyword_module, use_adapter=args.use_adapter)
gpt_tokenizer = SentencepieceTokenizer(tok_path)
gpt_model.to(args.device)
model = Seq2Seq(bert_model, gpt_model, gpt_vocab, args)
optimizer = AdamW(model.parameters(), lr=args.lr, correct_bias=True)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
#if args.fp16:
#from apex import amp # Apex is only required if we use fp16 training
#model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16)
if args.distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(args, bert_tokenizer, gpt_tokenizer, gpt_vocab)
# Training function and trainer
def update(engine, batch):
model.train()
batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
source_ids, target_ids, lm_labels, keyword_scores = batch
#(lm_loss), *_ = model(input_ids, token_type_ids=token_type_ids, labels=lm_labels)
(lm_loss), *_ = model(source_ids, target_ids, key_score=keyword_scores, lm_labels=lm_labels)
loss = lm_loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
if engine.state.iteration % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
source_ids, target_ids, lm_labels, keyword_scores = batch
#lm_logits, *_ = model(input_ids, token_type_ids=token_type_ids,)
lm_logits, *_ = model(source_ids, target_ids, key_score=keyword_scores)
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted), (lm_labels_flat_shifted)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if args.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if args.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if args.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, args.lr), (args.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-100), output_transform=lambda x: (x[0], x[1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], args)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if args.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED,
lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
log_dir = make_logdir(args.model_checkpoint, args.dataset_path, args.use_adapter, args.keyword_module)
tb_logger = TensorboardLogger(log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(log_dir, 'checkpoint', save_interval=1, n_saved=2)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': model}) # "getattr" takes care of distributed encapsulation
torch.save(args, log_dir + '/model_training_args.bin')
#getattr(model, 'module', model).config.to_json_file(os.path.join(log_dir, CONFIG_NAME))
#tokenizer.save_pretrained(log_dir)
# Run the training
trainer.run(train_loader, max_epochs=args.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if args.local_rank in [-1, 0] and args.n_epochs > 0:
os.rename(os.path.join(log_dir, checkpoint_handler._saved[-1][1]), os.path.join(log_dir,
WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train() |
py | b40ff2abb01c4d97750a05d082f6f1791b35dc36 | #!/usr/bin/python
"""
print
print "checking for nltk"
try:
import nltk
except ImportError:
print("you should install nltk before continuing")
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for scipy"
try:
import scipy
except:
print "you should install scipy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print"""
print("downloading the Enron dataset (this may take a while)")
print("to check on progress, you can cd up one level, then execute <ls -lthr>")
print("Enron dataset should be last item on the list, along with its current size")
print("download will complete at about 423 MB")
import urllib.request
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tar.gz"
urllib.request.urlretrieve(url, filename="../enron_mail_20150507.tar.gz")
print("download complete!")
print
print("unzipping Enron dataset (this may take a while)")
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tar.gz", "r:gz")
tfile.extractall(".")
print("you're ready to go!")
|
py | b40ff450b7fc2d51a761cc5e9cc0fadffadf989e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('benchmark', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='benchmarkexecutionentry',
name='status',
field=models.IntegerField(default=0, choices=[(0, b'Ready'), (1, b'In_Progress'), (2, b'Finished')]),
preserve_default=True,
),
]
|
py | b40ff542a00ed15391c8d45aeba7c41ea7a9ab68 | # Copyright 2009-2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
from requestbuilder import Arg
class DeactivateMFADevice(IAMRequest):
DESCRIPTION = 'Deactivate an MFA device'
ARGS = [Arg('-u', '--user-name', dest='UserName', metavar='USER',
required=True,
help='user whose MFA device to deactivate (required)'),
Arg('-s', '--serial-number', dest='SerialNumber', metavar='SERIAL',
required=True, help='''serial number of the MFA device to
deactivate (required)'''),
AS_ACCOUNT]
|
py | b40ff636592f75351333a69cfdac196d2c767b9b | import numpy
import pytest
from helpers import *
from reikna.algorithms import PureParallel
from reikna.core import Parameter, Annotation, Type, Computation
import reikna.cluda.dtypes as dtypes
from reikna.transformations import mul_param, copy
class NestedPureParallel(Computation):
def __init__(self, size, dtype):
Computation.__init__(self, [
Parameter('output', Annotation(Type(dtype, shape=size), 'o')),
Parameter('input', Annotation(Type(dtype, shape=size), 'i'))])
self._p = PureParallel([
Parameter('output', Annotation(Type(dtype, shape=size), 'o')),
Parameter('i1', Annotation(Type(dtype, shape=size), 'i')),
Parameter('i2', Annotation(Type(dtype, shape=size), 'i'))],
"""
${i1.ctype} t1 = ${i1.load_idx}(${idxs[0]});
${i2.ctype} t2 = ${i2.load_idx}(${idxs[0]});
${output.store_idx}(${idxs[0]}, t1 + t2);
""")
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
plan.computation_call(self._p, output, input_, input_)
return plan
def test_nested(thr):
N = 1000
dtype = numpy.float32
p = NestedPureParallel(N, dtype)
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev)
res_ref = a + a
assert diff_is_negligible(res_dev.get(), res_ref)
def test_guiding_input(thr):
N = 1000
dtype = numpy.float32
p = PureParallel(
[
Parameter('output', Annotation(Type(dtype, shape=(2, N)), 'o')),
Parameter('input', Annotation(Type(dtype, shape=N), 'i'))],
"""
float t = ${input.load_idx}(${idxs[0]});
${output.store_idx}(0, ${idxs[0]}, t);
${output.store_idx}(1, ${idxs[0]}, t * 2);
""",
guiding_array='input')
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev)
res_ref = numpy.vstack([a, a * 2])
assert diff_is_negligible(res_dev.get(), res_ref)
def test_guiding_output(thr):
N = 1000
dtype = numpy.float32
p = PureParallel(
[
Parameter('output', Annotation(Type(dtype, shape=N), 'o')),
Parameter('input', Annotation(Type(dtype, shape=(2, N)), 'i'))],
"""
float t1 = ${input.load_idx}(0, ${idxs[0]});
float t2 = ${input.load_idx}(1, ${idxs[0]});
${output.store_idx}(${idxs[0]}, t1 + t2);
""",
guiding_array='output')
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev)
res_ref = a[0] + a[1]
assert diff_is_negligible(res_dev.get(), res_ref)
def test_guiding_shape(thr):
N = 1000
dtype = numpy.float32
p = PureParallel(
[
Parameter('output', Annotation(Type(dtype, shape=(2, N)), 'o')),
Parameter('input', Annotation(Type(dtype, shape=(2, N)), 'i'))],
"""
float t1 = ${input.load_idx}(0, ${idxs[0]});
float t2 = ${input.load_idx}(1, ${idxs[0]});
${output.store_idx}(0, ${idxs[0]}, t1 + t2);
${output.store_idx}(1, ${idxs[0]}, t1 - t2);
""",
guiding_array=(N,))
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev)
res_ref = numpy.vstack([a[0] + a[1], a[0] - a[1]])
assert diff_is_negligible(res_dev.get(), res_ref)
def test_zero_length_shape(thr):
dtype = numpy.float32
p = PureParallel(
[
Parameter('output', Annotation(Type(dtype, shape=tuple()), 'o')),
Parameter('input', Annotation(Type(dtype, shape=tuple()), 'i'))],
"""
float t = ${input.load_idx}();
${output.store_idx}(t * 2);
""",
guiding_array=tuple())
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev)
res_ref = (a * 2).astype(dtype)
assert diff_is_negligible(res_dev.get(), res_ref)
def test_trf_with_guiding_input(thr):
"""
Test the creation of ``PureParallel`` out of a transformation,
with an input parameter as a guiding array.
"""
N = 1000
coeff = 3
dtype = numpy.float32
arr_t = Type(dtype, shape=N)
trf = mul_param(arr_t, dtype)
p = PureParallel.from_trf(trf, trf.input)
# The new PureParallel has to preserve the parameter list of the original transformation.
assert list(p.signature.parameters.values()) == list(trf.signature.parameters.values())
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev, coeff)
assert diff_is_negligible(res_dev.get(), a * 3)
def test_trf_with_guiding_output(thr):
"""
Test the creation of ``PureParallel`` out of a transformation,
with an output parameter as a guiding array.
"""
N = 1000
coeff = 3
dtype = numpy.float32
arr_t = Type(dtype, shape=N)
trf = mul_param(arr_t, dtype)
p = PureParallel.from_trf(trf, trf.output)
# The new PureParallel has to preserve the parameter list of the original transformation.
assert list(p.signature.parameters.values()) == list(trf.signature.parameters.values())
a = get_test_array_like(p.parameter.input)
a_dev = thr.to_device(a)
res_dev = thr.empty_like(p.parameter.output)
pc = p.compile(thr)
pc(res_dev, a_dev, coeff)
assert diff_is_negligible(res_dev.get(), a * 3)
class TestSameArgument(Computation):
def __init__(self, arr):
copy_trf = copy(arr, out_arr_t=arr)
self._copy_comp = PureParallel.from_trf(copy_trf, copy_trf.input)
Computation.__init__(self, [
Parameter('outer_output', Annotation(arr, 'o')),
Parameter('outer_input', Annotation(arr, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
temp = plan.temp_array_like(output)
plan.computation_call(self._copy_comp, temp, input_)
plan.computation_call(self._copy_comp, temp, temp)
plan.computation_call(self._copy_comp, output, temp)
return plan
def test_same_argument(some_thr):
"""
A regression test for an unexpected interaction of the way PureParallel.from_trf() worked
and a logic flaw in processing 'io'-type nodes in a transformation tree.
from_trf() created a trivial computation with a single 'io' parameter and
attached the given transformation to it.
This preserved the order of parameters in the resultinc computation
(because of how the transformation tree was traversed),
and quite nicely relied only on the public API.
So, for a simple transformation with one input and one output the root PP computation
looked like:
input (io)
and after attaching the transformation:
input (i)
input (o) ---(tr:input -> tr:output)---> output
When this computation was used inside another computation and was passed the same argument
(e.g. 'temp') both for input and output, during the translation stage
this would be transformed to (since 'temp' is passed both to 'input' and 'output')
temp (i)
temp (o) ---(tr:input -> tr:output)---> temp
because the translation was purely name-based.
This resulted in some cryptic errors due to the name clash.
Now the masked 'input' should have been mangled instead of translated,
producing something like
temp (i)
_nested_input (o) ---(tr:input -> tr:output)---> temp
but this functionality was not implemented.
"""
arr = get_test_array((1000, 8, 1), numpy.complex64)
arr_dev = some_thr.to_device(arr)
test = TestSameArgument(arr)
testc = test.compile(some_thr)
testc(arr_dev, arr_dev)
assert diff_is_negligible(arr_dev.get(), arr)
|
py | b40ff726b3ef01149313d8393822f27f6fbd8e0e | import unittest
from code_transformer.modeling.constants import BIN_PADDING
from code_transformer.preprocessing.graph.distances import DistanceBinning, UNREACHABLE
import torch
class TestBinning(unittest.TestCase):
def test_with_unreachable(self):
values = torch.arange(-5, 6)
values = torch.cat([values, torch.tensor([1000])])
n_bins = 8
DB = DistanceBinning(n_bins, n_fixed=3)
ixs, bins = DB(values.to(torch.long))
assert bins.allclose(torch.tensor([UNREACHABLE, -5, -3.5, -1, 0, 1, 3.5, 5]))
assert bins[ixs].allclose(torch.tensor([-5, -5, -3.5, -3.5, -1, 0, 1, 3.5, 3.5, 5, 5, UNREACHABLE]))
def test_without_unreachable(self):
values = torch.arange(-5, 6)
n_bins = 8
DB = DistanceBinning(n_bins, n_fixed=3)
ixs, bins = DB(values.to(torch.long))
assert bins.allclose(torch.tensor([UNREACHABLE, -5, -3.5, -1, 0, 1, 3.5, 5]))
assert bins[ixs].allclose(torch.tensor([-5, -5, -3.5, -3.5, -1, 0, 1, 3.5, 3.5, 5, 5]))
def test_all_fixed(self):
values = torch.arange(-5, 6)
n_bins = len(values) + 1 # account for the UNREACHABLE bin
DB = DistanceBinning(n_bins, n_fixed=len(values))
ixs, bins = DB(values.to(torch.long))
# bins should be:
# [UNREACHABLE, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
assert bins.to(torch.long).allclose(torch.cat([torch.tensor([UNREACHABLE], dtype=torch.long), values]))
# binned values should be:
# [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
assert bins[ixs].allclose(values.to(torch.float32))
def test_all_fixed_even_number(self):
values = torch.arange(-5, 5)
n_bins = len(values) + 1 # account for the UNREACHABLE bin
DB = DistanceBinning(n_bins, n_fixed=len(values))
ixs, bins = DB(values.to(torch.long))
# bins should be:
# [UNREACHABLE, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 4]
assert bins.to(torch.long).allclose(torch.cat([torch.tensor([UNREACHABLE], dtype=torch.long), values]))
# binned values should be:
# [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert bins[ixs].allclose(values.to(torch.float32))
def test_all_fixed_with_unreachable(self):
values_orig = torch.arange(-5, 6)
values = torch.cat([values_orig, torch.tensor([1000]), torch.tensor([-1000])])
n_bins = len(values) - 1
DB = DistanceBinning(n_bins, n_fixed=len(values) - 2)
ixs, bins = DB(values.to(torch.long))
# bins should be:
# [UNREACHABLE, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
assert bins.to(torch.long).allclose(torch.cat([torch.tensor([UNREACHABLE], dtype=torch.long), values_orig]))
# binned values should be:
# [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, UNREACHABLE, UNREACHABLE]
assert bins[ixs].allclose(torch.cat([values_orig, torch.tensor([UNREACHABLE]),
torch.tensor([UNREACHABLE])]).to(torch.float32))
def test_all_fixed_with_unreachable_alternative(self):
values_orig = torch.arange(-50, 51)
values = torch.cat([values_orig, torch.tensor([1000])])
n_bins = len(values)
DB = DistanceBinning(n_bins, n_fixed=len(values) - 1)
ixs, bins = DB(values.to(torch.long))
assert bins.to(torch.long).allclose(torch.cat([torch.tensor([UNREACHABLE], dtype=torch.long), values_orig]))
assert bins[ixs].allclose(torch.cat([values_orig, torch.tensor([UNREACHABLE])]).to(torch.float32))
def test_mixed_positive_negative(self):
values = torch.tensor([5, -4, 3, -2, 1, 0, 8, 8, -8, -8])
n_bins = 6
DB = DistanceBinning(n_bins, n_fixed=3)
ixs, bins = DB(values.to(torch.long))
self.assertTrue(bins.allclose(torch.tensor([UNREACHABLE, -8, -1, 0, 1, 8], dtype=torch.float)))
self.assertTrue(bins[ixs].allclose(torch.tensor([8, -8, 8, -8, 1, 0, 8, 8, -8, -8], dtype=torch.float)))
def test_2d_matrix(self):
values = torch.arange(-6, 7, step=1).unsqueeze(0).repeat((7, 1))
n_bins = 10
DB = DistanceBinning(n_bins, n_fixed=5)
ixs, bins = DB(values.to(torch.long))
self.assertTrue(bins[ixs][0].allclose(
torch.tensor([-6, -6, -4.5, -4.5, -2, -1, 0, 1, 2, 4.5, 4.5, 6, 6], dtype=torch.float)))
def test_fewer_unique_values_than_bins(self):
values = torch.arange(-10, 11, step=5).unsqueeze(0).repeat((3, 1))
n_bins = 32
DB = DistanceBinning(n_bins, n_fixed=5)
ixs, bins = DB(values.to(torch.long))
self.assertTrue(bins.allclose(torch.cat([torch.tensor([UNREACHABLE, -10, -5, 0, 5, 10], dtype=torch.float),
torch.tensor([BIN_PADDING], dtype=torch.float).expand(26)])))
def test_uneven_bins(self):
values = torch.arange(-10, 11, step=1)
n_bins = 7
DB = DistanceBinning(n_bins, n_fixed=5)
ixs, bins = DB(values.to(torch.long))
self.assertTrue(bins.allclose(torch.tensor([UNREACHABLE, -2, -1, 0, 1, 2, 10], dtype=torch.float)))
def test_only_positive(self):
values = torch.arange(0, 9)
n_bins = 8
DB = DistanceBinning(n_bins, n_fixed=5)
ixs, bins = DB(values.to(torch.long))
self.assertTrue(bins[ixs].allclose(torch.tensor([0, 1, 2, 3, 4, 6, 6, 8, 8], dtype=torch.float)))
def test_continuous_distances(self):
values = torch.tensor([0.1, 1.2, 2.3, 4.5, 4.5, 5.6, 6.7, 7.8, 8.9])
n_bins = 8
DB = DistanceBinning(n_bins, n_fixed=5)
ixs, bins = DB(values)
self.assertEqual(bins[0], 0.1)
self.assertEqual(bins[-1], 8.9)
|
py | b40ff7b1e13ea019ade68de4011d34c71e1db8fc | """
A collection of some useful ocean functions. These are taken from a range of
MATLAB toolboxes as well as from ocean_funcs.ncl, which in turn has taken them
from the CSIRO SEAWATER (now GSW) MATLAB toolbox.
The NCL code can be found at:
http://www.ncl.ucar.edu/Support/talk_archives/2013/att-1501/ocean_funcs.ncl__size_15540__creation-date_
The MATLAB toolboxes used includes:
http://www.cmar.csiro.au/datacentre/ext_docs/seawater.htm
http://mooring.ucsd.edu/software/matlab/doc/toolbox/ocean/
http://www.mbari.org/staff/etp3/ocean1.htm
See also:
Feistel, R., A new extended Gibbs thermodynamic potential of seawater,
Prog. Oceanogr., 58, 43-115,
http://authors.elsevier.com/sd/article/S0079661103000880 corrigendum 61
(2004) 99, 2003.
Fofonoff, P. & Millard, R.C. Unesco 1983. Algorithms for computation of
fundamental properties of seawater, 1983. Unesco Tech. Pap. in Mar. Sci.,
No. 44.
Jackett, D. R., T. J. McDougall, R. Feistel, D. G. Wright, and S. M.
Griffies, Updated algorithms for density, potential temperature,
conservative temperature and freezing temperature of seawater, Journal of
Atmospheric and Oceanic Technology, submitted, 2005.
The Simpson-Hunter parameter is described in:
Simpson, JH, and JR Hunter. "Fronts in the Irish Sea." Nature 250 (1974):
404-6.
The relative humidity from dew point temperature and ambient temperature is
taken from:
http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
Provides functions:
- pressure2depth : convert pressure (decibars) to depth in metres
- depth2pressure : convert depth in metres to pressure in decibars
- dT_adiab_sw : calculate adiabatic temperature gradient
- theta_sw : calculate potential temperature for sea water
- cp_sw : calculate constant pressure specific heat for seawater
- sw_smow : calculate density of Standard Mean Ocean Water
- sw_dens0 : calculate seawater density at atmospheric surface pressure
- sw_seck : calculate Secant Bulk Modulus (K) of seawater
- sw_dens : calculate density from temperature, salinity and pressure
- sw_svan : calculate specific volume anomaly (only use if you don't
already have density)
- sw_sal78 : calculate salinity from conductivity, temperature and pressure
based on the Fofonoff and Millard (1983) SAL78 FORTRAN function
- sw_sal80 : calculate salinity from conductivity, temperature and pressure
based on the UCSD sal80.m function (identical approach in sw_salinity)
- sw_salinity : calculate salinity from conductivity, temperature and
pressure (identical approach in sw_sal80)
- dens_jackett : alternative formulation for calculating density from
temperature and salinity (after Jackett et al. (2005)
- pea: calculate the potential energy anomaly (stratification index).
- simpsonhunter : calculate the Simpson-Hunter parameter to predict frontal
locations.
- mixedlayerdepth : calculate the mixed layer depth using the ERSEM
definition.
- stokes : calculate the Stokes parameter.
- dissipation : calculate the tidal dissipation from a current speed.
- calculate_rhum : calculate relative humidity from dew point temperature
and ambient temperature.
Pierre Cazenave (Plymouth Marine Laboratory)
"""
import numpy as np
import matplotlib.pyplot as plt
# Define some commonly used constants.
c68 = 1.00024 # conversion constant to T68 temperature scale.
c90 = 0.99976 # conversion constant to T90 temperature scale.
def _tests():
"""
Put some (sort of) unit tests in here to make sure the functions work as
expected.
"""
test_lat = 30
test_z = np.logspace(0.1, 4, 50) # log depth distribution
test_p = np.logspace(0.1, 4, 50) # log pressure distribution
res_p1 = depth2pressure(test_z, test_lat)
res_z1 = pressure2depth(res_p1, test_lat)
res_z2 = pressure2depth(test_p, test_lat)
res_p2 = depth2pressure(res_z2, test_lat)
# Graph the differences
if False:
fig0 = plt.figure(figsize=(12, 10))
ax0 = fig0.add_subplot(1, 2, 1)
ax0.loglog(test_z, res_z1 - test_z, '.')
ax0.set_xlabel('Depth (m)')
ax0.set_ylabel('Difference (m)')
ax0.set_title('depth2pressure <-> pressure2depth')
ax1 = fig0.add_subplot(1, 2, 2)
ax1.loglog(test_p, res_p2 - test_p, '.')
ax1.set_xlabel('Pressure (dbar)')
ax1.set_ylabel('Difference (dbar)')
ax1.set_title('pressure2depth <-> depth2pressure ')
fig0.show()
# Input parameters
test_t = np.array(40)
test_s = np.array(40)
test_p = np.array(10000)
test_pr = np.array(0)
test_c = np.array(1.888091)
test_td = np.array(20) # for dens_jackett
test_sd = np.array(20) # for dens_jackett
test_pd = np.array(1000) # for dens_jackett
test_cond = np.array([100, 65000]) # for cond2salt
test_h = np.array((10, 20, 30, 100)) # depths for stokes
test_U = 0.25 # U for stokes and dissipation
test_omega = 1 / 44714.1647021416 # omega for stokes
test_z0 = np.array((0.0025)) # z0 for stokes
test_rho = 1025
test_temp = np.arange(-20, 50, 10)
test_dew = np.linspace(0, 20, len(test_temp))
# Use some of the Fofonoff and Millard (1983) checks.
res_svan = sw_svan(test_t, test_s, test_p)
print('Steric anomaly\nFofonoff and Millard (1983):\t9.8130210e-6\nsw_svan:\t\t\t{}\n'.format(res_svan))
res_z = pressure2depth(test_p, test_lat)
print('Pressure to depth\nFofonoff and Millard (1983):\t9712.653\npressure2depth:\t\t\t{}\n'.format(res_z))
# The return to depth is a bit inaccurate, not sure why.
res_pres = depth2pressure(res_z, test_lat)
print('Depth to pressure\nFofonoff and Millar (1983):\t9712.653\ndepth2pressure:\t\t\t{}\n'.format(res_pres))
res_cp = cp_sw(test_t, test_s, test_p)
print('Specific heat of seawater\nFofonoff and Millard (1983):\t3849.500\ncp_sw:\t\t\t\t{}\n'.format(res_cp))
res_atg = dT_adiab_sw(test_t, test_s, test_p)
print('Adiabatic temperature gradient\nFofonoff and Millard (1983):\t0.0003255976\ndT_adiab_sw:\t\t\t{}\n'.format(res_atg))
res_theta = theta_sw(test_t, test_s, test_p, test_pr)
print('Potential temperature\nFofonoff and Millard (1983):\t36.89073\ntheta_sw:\t\t\t{}\n'.format(res_theta))
# Haven't got the right input values for sal78 and sw_salinity, but the
# outputs match the MATLAB functions, so I'm assuming they're OK...
# res_salinity = sw_salinity(test_c, test_t, test_p)
# print('Salinity\nFofonoff and Millard (1983):\t40\nsw_salinity:\t\t\t{}\n'.format(res_salinity))
res_sal78 = sw_sal78(test_c, test_t, test_p)
print('Salinity\nFofonoff and Millard (1983):\t40\nsw_sal78:\t\t\t{}\n'.format(res_sal78))
# Haven't got the right input values for sal78 and sw_salinity, but the
# outputs match the MATLAB functions, so I'm assuming they're OK...
# test_c, test_t, test_p = np.array(1.888091), np.array(40), np.array(10000)
# res_sal80 = sw_sal80(test_c, test_t, test_p)
# print('Salinity\nFofonoff and Millard (1983):\t40\nsw_sal80:\t\t\t{}\n'.format(res_sal80))
res_dens = dens_jackett(test_td, test_sd, test_pd)
print('Jackett density\nJackett et al. (2005):\t1017.728868019642\ndens_jackett:\t\t{}\n'.format(res_dens))
res_salt = cond2salt(test_cond)
print('Conductivity to salinity\nUSGS:\t\t0.046,\t\t\t44.016\ncond2salt:\t{},\t{}'.format(res_salt[0], res_salt[1]))
res_stokes, res_u_star, res_delta = stokes(test_h, test_U, test_omega, test_z0, U_star=True, delta=True)
print('Stokes number\nSouza (2013):\tS:\tTEST\tstokes:\tS:{}\n\t\t\tSouza (2013):\tU*:\tTEST\t{}\n\t\t\tSouza (2013):\tdelta:\tTEST\t{}\n'.format(res_stokes, res_u_star, res_delta))
res_dissipation = dissipation(test_rho, test_U)
print('Tidal dissipation\nKnown good:\t0.0400390625\ndissipation():\t{}'.format(res_dissipation))
valid_rhum = np.array((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971, 31.67003471))
rhum = calculate_rhum(test_dew, test_temp)
for hum in zip(rhum, valid_rhum):
print('Relative humidity:\tvalid: {:.3f}\tcalculate_rhum: {:.3f}\t(difference = {:.3f})'.format(hum[1], hum[0], np.diff(hum)[0]))
def pressure2depth(p, lat):
"""
Convert from pressure in decibars to depth in metres.
Parameters
----------
p : ndarray
Pressure (1D array) in decibars.
lat : ndarray
Latitudes for samples in p.
Returns
-------
z : ndarray
Water depth in metres.
"""
c1 = 9.72659
c2 = -2.1512e-5
c3 = 2.279e-10
c4 = -1.82e-15
gam = 2.184e-6
y = np.abs(lat)
rad = np.sin(np.deg2rad(y))**2
gy = 9.780318 * (1.0 + (rad * 5.2788e-3) + (rad**2 * 2.36e-5))
bline = gy + (gam * 0.5 * p)
tline = (c1 * p) + (c2 * p**2.0) + (c3 * p**3.0) + (c4 * p**4.0)
z = tline / bline
return z
def depth2pressure(z, lat):
"""
Convert from depth in metres to pressure in decibars.
Parameters
----------
z : ndarray
Depth (1D array) in metres. Must be positive down (negative values are
set to zero before conversion to pressure).
lat : ndarray
Latitudes for samples in z.
Returns
-------
p : ndarray
Pressure in decibars.
"""
# Set negative depths to 0. We assume positive depth values (as in the
# docstring).
pz = z.copy()
if isinstance(pz, np.ndarray):
pz[z < 0] = 0
c2 = 2.21e-6
Y = np.sin(np.deg2rad(np.abs(lat)))
c1 = (5.92 + (5.25 * Y**2.0)) * 1.e-3
p = ((1.0 - c1) - np.sqrt((1.0 - c1)**2.0 - (4.0 * c2 * pz))) / (2.0 * c2)
return p
def dT_adiab_sw(t, s, p):
"""
Calculate adiabatic temperature gradient (degrees Celsius dbar^{-1})
Parameters
----------
t : ndarray
Temperature (Celsius)
s : ndarray
Salinity (PSU)
p : ndarray
Pressure (decibars)
All three arrays must have the same shape.
Returns
-------
atg : ndarray
Adiabatic temperature gradient
"""
# Constants
a0 = 3.5803e-5
a1 = 8.5258e-6
a2 = -6.836e-8
a3 = 6.6228e-10
b0 = 1.8932e-6
b1 = -4.2393e-8
c0 = 1.8741e-8
c1 = -6.7795e-10
c2 = 8.733e-12
c3 = -5.4481e-14
d0 = -1.1351e-10
d1 = 2.7759e-12
e0 = -4.6206e-13
e1 = 1.8676e-14
e2 = -2.1687e-16
T68 = t * c68 # convert to 1968 temperature scale
atg = a0 + (a1 + (a2 + a3 * T68) * T68) * T68 + (b0 + b1 * T68) * (s - 35) + \
((c0 + (c1 + (c2 + c3 * T68) * T68) * T68) + (d0 + d1 * T68) *
(s - 35)) * p + (e0 + (e1 + e2 * T68) * T68) * p * p
return atg
def theta_sw(t, s, p, pr):
"""
Calculate potential temperature for seawater from temperature, salinity and
pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
pr : ndarray
Reference pressure (decibars) either a scalar or the same shape as t.
Returns
-------
th : ndarray
Potential temperature (Celsius)
"""
dP = pr - p # pressure difference.
# 1st iteration
dth = dP * dT_adiab_sw(t, s, p)
th = (t * c68) + (0.5 * dth)
q = dth
# 2nd interation
dth = dP * dT_adiab_sw(th / c68, s, (p + (0.5 * dP)))
th = th + ((1 - (1 / np.sqrt(2))) * (dth - q))
q = ((2 - np.sqrt(2)) * dth) + (((3 / np.sqrt(2)) - 2) * q)
# 3rd iteration
dth = dP * dT_adiab_sw(th / c68, s, (p + (0.5 * dP)))
th = th + ((1 + (1 / np.sqrt(2))) * (dth - q))
q = ((2 + np.sqrt(2)) * dth) + (((-3 / np.sqrt(2)) - 2) * q)
# 4th interation
dth = dP * dT_adiab_sw(th / c68, s, (p + dP))
th = (th + (dth - (2 * q)) / 6) / c68
return th
def cp_sw(t, s, p):
"""
Calculate constant pressure specific heat (cp) for seawater, from
temperature, salinity and pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
cp : ndarray
Constant pressure specific heat (Celsius).
Notes
-----
Valid temperature range is -2 to 40C and salinity is 0-42 PSU. Warnings are
issued if the data fall outside these ranges.
"""
# Check for values outside the valid ranges.
if t.min() < -2:
n = np.sum(t < -2)
print('WARNING: {} values below minimum value temperature (-2C)'.format(n))
if t.max() > 40:
n = np.sum(t > 40)
print('WARNING: {} values above maximum value temperature (40C)'.format(n))
if s.min() < 0:
n = np.sum(s < 0)
print('WARNING: {} values below minimum salinity value (0 PSU)'.format(n))
if s.max() > 42:
n = np.sum(s > 42)
print('WARNING: {} values above maximum salinity value (42C)'.format(n))
# Convert from decibar to bar and temperature to the 1968 temperature scale.
pbar = p / 10.0
T1 = t * c68
# Specific heat at p = 0
# Temperature powers
T2 = T1**2
T3 = T1**3
T4 = T1**4
# Empirical constants
c0 = 4217.4
c1 = -3.720283
c2 = 0.1412855
c3 = -2.654387e-3
c4 = 2.093236e-5
a0 = -7.643575
a1 = 0.1072763
a2 = -1.38385e-3
b0 = 0.1770383
b1 = -4.07718e-3
b2 = 5.148e-5
cp_0t0 = c0 + (c1 * T1) + (c2 * T2) + (c3 * T3) + (c4 * T4)
A = a0 + (a1 * T1) + (a2 * T2)
B = b0 + (b1 * T1) + (b2 * T2)
cp_st0 = cp_0t0 + (A * s) + (B * s**1.5)
# Pressure dependance
a0 = -4.9592e-1
a1 = 1.45747e-2
a2 = -3.13885e-4
a3 = 2.0357e-6
a4 = 1.7168e-8
b0 = 2.4931e-4
b1 = -1.08645e-5
b2 = 2.87533e-7
b3 = -4.0027e-9
b4 = 2.2956e-11
c0 = -5.422e-8
c1 = 2.6380e-9
c2 = -6.5637e-11
c3 = 6.136e-13
d1_cp = (pbar * (a0 + (a1 * T1) + (a2 * T2) + (a3 * T3) + (a4 * T4))) + \
(pbar**2 * (b0 + (b1 * T1) + (b2 * T2) + (b3 * T3) + (b4 * T4))) + \
(pbar**3 * (c0 + (c1 * T1) + (c2 * T2) + (c3 * T3)))
d0 = 4.9247e-3
d1 = -1.28315e-4
d2 = 9.802e-7
d3 = 2.5941e-8
d4 = -2.9179e-10
e0 = -1.2331e-4
e1 = -1.517e-6
e2 = 3.122e-8
f0 = -2.9558e-6
f1 = 1.17054e-7
f2 = -2.3905e-9
f3 = 1.8448e-11
g0 = 9.971e-8
h0 = 5.540e-10
h1 = -1.7682e-11
h2 = 3.513e-13
j1 = -1.4300e-12
d2_cp = pbar * \
((s * (d0 + (d1 * T1) + (d2 * T2) + (d3 * T3) + (d4 * T4))) +
(s**1.5 * (e0 + (e1 * T1) + (e2 * T2)))) + \
(pbar**2 * ((s * (f0 + (f1 * T1) + (f2 * T2) + (f3 * T3))) +
(g0 * s**1.5))) + (pbar**3 * ((s * (h0 + (h1 * T1) + (h2 * T2))) +
(j1 * T1 * s**1.5)))
cp = cp_st0 + d1_cp + d2_cp
return(cp)
def sw_smow(t):
"""
Calculate the density of Standard Mean Ocean Water (pure water).
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
Returns
-------
rho : ndarray
Density in kg m^{-3}.
"""
# Coefficients
a0 = 999.842594
a1 = 6.793952e-2
a2 = -9.095290e-3
a3 = 1.001685e-4
a4 = -1.120083e-6
a5 = 6.536332e-9
T68 = t * c68
dens = a0 + (a1 * T68) + (a2 * T68**2) + (a3 * T68**3) \
+ (a4 * T68**4) + (a5 * T68**5)
return dens
def sw_dens0(t, s):
"""
Calculate sea water density at atmospheric surface pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s: ndarray
Salinity (PSU). Must be the same size as t.
Returns
-------
dens : ndarray
Seawater density at atmospheric surface pressure (kg m^{-1}).
"""
b0 = 8.24493e-1
b1 = -4.0899e-3
b2 = 7.6438e-5
b3 = -8.2467e-7
b4 = 5.3875e-9
c0 = -5.72466e-3
c1 = 1.0227e-4
c2 = -1.6546e-6
d0 = 4.8314e-4
t68 = t * c68
dens = s * (b0 + (b1 * t68) + (b2 * t68**2) + (b3 * t68**3) + (b4 * t68**4)) + \
s**1.5 * (c0 + (c1 * t68) + (c2 * t68**2)) + (d0 * s**2)
dens = dens + sw_smow(t68)
return dens
def sw_seck(t, s, p):
"""
Calculate Secant Bulk Modulus (K) of seawater.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
k : ndarray
Secant Bulk Modulus of seawater.
"""
# Compression terms
T68 = t * c68
Patm = p / 10.0 # convert to bar
h3 = -5.77905e-7
h2 = 1.16092e-4
h1 = 1.43713e-3
h0 = 3.239908
AW = h0 + (h1 * T68) + (h2 * T68**2) + (h3 * T68**3)
k2 = 5.2787e-8
k1 = -6.12293e-6
k0 = 8.50935e-5
BW = k0 + (k1 + k2 * T68) * T68
e4 = -5.155288e-5
e3 = 1.360477e-2
e2 = -2.327105
e1 = 148.4206
e0 = 19652.21
KW = e0 + (e1 + (e2 + (e3 + e4 * T68) * T68) * T68) * T68
# K at atmospheric pressure
j0 = 1.91075e-4
i2 = -1.6078e-6
i1 = -1.0981e-5
i0 = 2.2838e-3
A = AW + s * (i0 + (i1 * T68) + (i2 * T68**2)) + (j0 * s**1.5)
m2 = 9.1697e-10
m1 = 2.0816e-8
m0 = -9.9348e-7
# Equation 18
B = BW + (m0 + (m1 * T68) + (m2 * T68**2)) * s
f3 = -6.1670e-5
f2 = 1.09987e-2
f1 = -0.603459
f0 = 54.6746
g2 = -5.3009e-4
g1 = 1.6483e-2
g0 = 7.944e-2
# Equation 16
K0 = KW + s * (f0 + (f1 * T68) + (f2 * T68**2) + (f3 * T68**3)) + \
s**1.5 * (g0 + (g1 * T68) + (g2 * T68**2))
# K at t, s, p
K = K0 + (A * Patm) + (B * Patm**2) # Equation 15
return K
def sw_dens(t, s, p):
"""
Convert temperature, salinity and pressure to density.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
rho : ndarray
Density in kg m^{-3}.
Notes
-----
Valid temperature range is -2 to 40C, salinity is 0-42 and pressure is
0-10000 decibars. Warnings are issued if the data fall outside these
ranges.
"""
# Check for values outside the valid ranges.
if t.min() < -2:
n = np.sum(t < -2)
print('WARNING: {} values below minimum value temperature (-2C)'.format(n))
if t.max() > 40:
n = np.sum(t > 40)
print('WARNING: {} values above maximum value temperature (40C)'.format(n))
if s.min() < 0:
n = np.sum(s < 0)
print('WARNING: {} values below minimum salinity value (0 PSU)'.format(n))
if s.max() > 42:
n = np.sum(s > 42)
print('WARNING: {} values above maximum salinity value (42C)'.format(n))
if p.min() < 0:
n = np.sum(p < 0)
print('WARNING: {} values below minimum pressure value (0 decibar)'.format(n))
if p.max() > 10000:
n = np.sum(p > 10000)
print('WARNING: {} values above maximum pressure value (10000 decibar)'.format(n))
dens0 = sw_dens0(t, s)
k = sw_seck(t, s, p)
Patm = p / 10.0 # pressure in bars
rho = dens0 / (1 - Patm / k)
return rho
def sw_svan(t, s, p):
"""
Calculate the specific volume (steric) anomaly.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
svan : ndarray
Specific Volume Anomaly in kg m^{-3}.
"""
rho = sw_dens(t, s, p)
rho0 = sw_dens(np.zeros(p.shape), np.ones(p.shape) * 35.0, p)
svan = (1 / rho) - (1 / rho0)
return svan
def sw_sal78(c, t, p):
"""
Simplified version of the original SAL78 function from Fofonoff and Millard
(1983). This does only the conversion from conductivity, temperature and
pressure to salinity. Returns zero for conductivity values below 0.0005.
Parameters
----------
c : ndarray
Conductivity (S m{-1})
t : ndarray
Temperature (degrees Celsius IPTS-68)
p : ndarray
Pressure (decibars)
Returns
-------
s : salinity (PSU-78)
Notes
-----
The Conversion from IPTS-68 to ITS90 is:
T90 = 0.99976 * T68
T68 = 1.00024 * T90
These constants are defined here as c90 (0.99976) and c68 (1.00024).
"""
p = p / 10
C1535 = 1.0
DT = t - 15.0
# Convert conductivity to salinity
rt35 = np.array((((1.0031E-9 * t - 6.9698E-7) * t + 1.104259E-4)
* t + 2.00564E-2) * t + 0.6766097)
a0 = np.array(-3.107E-3 * t + 0.4215)
b0 = np.array((4.464E-4 * t + 3.426E-2) * t + 1.0)
c0 = np.array(((3.989E-12 * p - 6.370E-8) * p + 2.070E-4) * p)
R = np.array(c / C1535)
RT = np.sqrt(np.abs(R / (rt35 * (1.0 + c0 / (b0 + a0 * R)))))
s = np.array(((((2.7081 * RT - 7.0261) * RT + 14.0941) * RT + 25.3851)
* RT - 0.1692) * RT + 0.0080 + (DT / (1.0 + 0.0162 * DT)) *
(((((-0.0144 * RT + 0.0636) * RT - 0.0375) *
RT - 0.0066) * RT - 0.0056) * RT + 0.0005))
# Zero salinity trap
if len(s.shape) > 0:
s[c < 5e-4] = 0
return s
def sw_sal80(c, t, p):
"""
Converts conductivity, temperature and pressure to salinity.
Converted from SAL80 MATLAB function:
http://mooring.ucsd.edu/software/matlab/doc/ocean/index.html
originally authored by S. Chiswell (1991).
Parameters
----------
c : ndarray
Conductivity (S m{-1})
t : ndarray
Temperature (degrees Celsius IPTS-68)
p : ndarray
Pressure (decibars)
Returns
-------
s : salinity (PSU-78)
Notes
-----
The Conversion from IPTS-68 to ITS90 is:
T90 = 0.99976 * T68
T68 = 1.00024 * T90
These constants are defined here as c90 (0.99976) and c68 (1.00024).
References
----------
UNESCO Report No. 37, 1981 Practical Salinity Scale 1978: E.L. Lewis, IEEE
Ocean Engineering, Jan., 1980
"""
# c = c / 10 # [S/m]
r0 = 4.2914
tk = 0.0162
a = np.array([2.070e-05, -6.370e-10, 3.989e-15])
b = np.array([3.426e-02, 4.464e-04, 4.215e-01, -3.107e-3])
aa = np.array([0.0080, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081])
bb = np.array([0.0005, -0.0056, -0.0066, -0.0375, 0.0636, -0.0144])
cc = np.array([6.766097e-01, 2.00564e-02, 1.104259e-04,
-6.9698e-07, 1.0031e-09])
rt = cc[0] + cc[1] * t + cc[2] * t * t + cc[3] * t * t * t + cc[4] * t * t * t * t
rp = p * (a[0] + a[1] * p + a[2] * p * p)
rp = 1 + rp / (1 + b[0] * t + b[1] * t * t + b[2] * c / r0 + b[3] * c / r0 * t)
rt = c / (r0 * rp * rt)
art = aa[0]
brt = bb[0]
for ii in range(1, 6):
rp = rt**(ii / 2.0)
art = art + aa[ii] * rp
brt = brt + bb[ii] * rp
rt = t - 15.0
s = art + (rt / (1 + tk * rt)) * brt
return s
def sw_salinity(c, t, p):
"""
Calculate salinity from conductivity, temperature and salinity.
Converted from a salinity MATLAB function from:
http://www.mbari.org/staff/etp3/matlab/salinity.m
originally authored by Edward T Peltzer (MBARI).
Parameters
----------
c : ndarray
Conductivity (1D array) in S m^{-1}.
t : ndarray
Temperature (1D array) in degrees Celsius.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
sw_salinity : ndarray
Salinity in PSU (essentially unitless)
"""
# Define constants
C15 = 4.2914
a0 = 0.008
a1 = -0.1692
a2 = 25.3851
a3 = 14.0941
a4 = -7.0261
a5 = 2.7081
b0 = 0.0005
b1 = -0.0056
b2 = -0.0066
b3 = -0.0375
b4 = 0.0636
b5 = -0.0144
c0 = 0.6766097
c1 = 2.00564e-2
c2 = 1.104259e-4
c3 = -6.9698e-7
c4 = 1.0031e-9
d1 = 3.426e-2
d2 = 4.464e-4
d3 = 4.215e-1
d4 = -3.107e-3
# The e# coefficients reflect the use of pressure in dbar rather than in
# Pascals (SI).
e1 = 2.07e-5
e2 = -6.37e-10
e3 = 3.989e-15
k = 0.0162
# Calculate internal variables
R = c / C15
rt = c0 + (c1 + (c2 + (c3 + c4 * t) * t) * t) * t
Rp = 1.0 + (e1 + (e2 + e3 * p) * p) * p / (1.0 + (d1 + d2 * t)
* t + (d3 + d4 * t) * R)
Rt = R / Rp / rt
sqrt_Rt = np.sqrt(Rt)
# Calculate salinity
salt = a0 + (a1 + (a3 + a5 * Rt) * Rt) * sqrt_Rt + (a2 + a4 * Rt) * Rt
dS = b0 + (b1 + (b3 + b5 * Rt) * Rt) * sqrt_Rt + (b2 + b4 * Rt) * Rt
dS = dS * (t - 15.0) / (1 + k * (t - 15.0))
sw_salinity = salt + dS
return sw_salinity
def dens_jackett(th, s, p=None):
"""
Computes the in-situ density according to the Jackett et al. (2005)
equation of state for sea water, which is based on the Gibbs potential
developed by Fiestel (2003).
The pressure dependence can be switched on (off by default) by giving an
absolute pressure value (> 0). s is salinity in PSU, th is potential
temperature in degrees Celsius, p is gauge pressure (absolute pressure
- 10.1325 dbar) and dens is the in-situ density in kg m^{-3}.
The check value is dens_jackett(20, 20, 1000) = 1017.728868019642.
Adopted from GOTM (www.gotm.net) (Original author(s): Hans Burchard
& Karsten Bolding) and the PMLPython script EqS.py.
Parameters
----------
th : ndarray
Potential temperature (degrees Celsius)
s : ndarray
Salinity (PSU)
p : ndarray, optional
Gauge pressure (decibar) (absolute pressure - 10.1325 decibar)
Returns
-------
dens : ndarray
In-situ density (kg m^{-3})
References
----------
Feistel, R., A new extended Gibbs thermodynamic potential of seawater,
Prog. Oceanogr., 58, 43-115,
http://authors.elsevier.com/sd/article/S0079661103000880 corrigendum 61
(2004) 99, 2003.
Jackett, D. R., T. J. McDougall, R. Feistel, D. G. Wright, and S. M.
Griffies, Updated algorithms for density, potential temperature,
conservative temperature and freezing temperature of seawater, Journal of
Atmospheric and Oceanic Technology, submitted, 2005.
"""
th2 = th * th
sqrts = np.sqrt(s)
anum = 9.9984085444849347e+02 + \
th * (7.3471625860981584e+00 +
th * (-5.3211231792841769e-02 +
th * 3.6492439109814549e-04)) + \
s * (2.5880571023991390e+00 -
th * 6.7168282786692355e-03 +
s * 1.9203202055760151e-03)
aden = 1.0 + \
th * (7.2815210113327091e-03 +
th * (-4.4787265461983921e-05 +
th * (3.3851002965802430e-07 +
th * 1.3651202389758572e-10))) + \
s * (1.7632126669040377e-03 -
th * (8.8066583251206474e-06 +
th2 * 1.8832689434804897e-10) +
sqrts * (5.7463776745432097e-06 +
th2 * 1.4716275472242334e-09))
# Add pressure dependence
if p is not None and np.any(p > 0.0):
pth = p * th
anum += p * (1.1798263740430364e-02 +
th2 * 9.8920219266399117e-08 +
s * 4.6996642771754730e-06 -
p * (2.5862187075154352e-08 +
th2 * 3.2921414007960662e-12))
aden += p * (6.7103246285651894e-06 -
pth * (th2 * 2.4461698007024582e-17 +
p * 9.1534417604289062e-18))
dens = anum/aden
return dens
def cond2salt(cond):
"""
Convert conductivity to salinity assuming constant temperature (25 celsius)
and pressure.
Parameters
----------
cond : ndarray
Conductivity in microsiemens per cm.
Returns
-------
salt : ndarray
Salinity in PSU.
References
----------
Schemel, L. E., 2001, Simplified conversions between specific conductance
and salinity units for use with data from monitoring stations, IEP
Newsletter, v. 14, no. 1, p. 17-18 [accessed July 27, 2004, at
http://www.iep.ca.gov/report/newsletter/2001winter/IEPNewsletterWinter2001.pdf]
"""
# Some constants
k1, k2, k3, k4, k5, k6 = 0.012, -0.2174, 25.3283, 13.7714, -6.4788, 2.5842
# Ratio of specific conductance at 25 celsius to standard seawater
# (salinity equals 35) at 25 celsius (53.087 millisiemens per centimetre).
R = cond / (53.087 * 1000) # convert from milli to micro
salt = \
k1 + \
(k2 * np.power(R, 1/2.0)) + \
(k3 * R) + \
(k4 * np.power(R, 3/2.0)) + \
(k5 * np.power(R, 2)) + \
(k6 * np.power(R, 5/2.0))
return salt
def vorticity(x, y, u, v, vtype='averaged'):
"""
Calculate vorticity from a velocity field for an unstructured grid output
from FVCOM.
This is a Python translation of the FVCOM calc_vort.F function which for
some reason seems to be ignored in the main FVCOM code.
Parameters
----------
x, y : ndarray
Positions of the u and v samples. In FVCOM parlance, this is the 'xc'
and 'yc' or 'lonc' and 'latc' variables in the output file (element
centre positions).
u, v : ndarray
3D (i.e. vertically varying) u and v velocity components.
vtype : str, optional
Type of vorticity using either the vertically averaged velocity
(vtype='averaged') or the curl of the flux (vtype='flux'). Default is
vtype='averaged'.
Returns
-------
vort : ndarray
Calculated voriticity from the velocity components.
"""
# # Need to do some calculations on the model grid to find neighbours etc.
# # Use grid_tools for that.
# try:
# from grid_tools import triangleGridEdge as tge
# except:
# raise ImportError('Failed to import tge from the grid_tools.')
# # Some basic shape parameters
# nt, nz, nn = np.shape(u)
# if vtype == 'averaged':
# # Vertically average the velocity components.
# ua = np.mean(u, dims=0)
# va = np.mean(v, dims=0)
# elif vtype == 'flux':
# # Let's not do vertically averaged and instead do each vertical layer
# # separately.
# for zz in xrange(0, nz):
def zbar(data, levels):
"""
Depth-average values in data.
Parameters
----------
data : ndarray
Values to be depth-averaged. Shape is [t, z, x] where t is time, z is
vertical and x is space (unstructured).
levels : ndarray
Array of vertical layer thicknesses (fraction in the range 0-1). Shape
is [z, x] or [t, z, x].
Returns
-------
databar : ndarray
Depth-averaged values in data.
Notes
-----
This is a naive implementation using a for-loop. A faster version is almost
certainly possible.
Also, the code could probably be cleaned up (possibly at the expense of
understanding) by transposing all the arrays to have the vertical (z)
dimension first and the others after. This would make summation along an
axis always be axis=0, rather than the current situation where I have to
check what the number of dimensions is and act accordingly.
"""
nt, nz, nx = data.shape
nd = np.ndim(levels)
databar = np.zeros((nt, nx))
for i in range(nz):
if nd == 2:
# Depth, space only.
databar = databar + (data[:, i, :] * levels[i, :])
elif nd == 3:
# Time, depth, space.
databar = databar + (data[:, i, :] * levels[:, i, :])
else:
raise IndexError('Unable to use the number of dimensions provided in the levels data.')
if nd == 2:
databar = (1.0 / np.sum(levels, axis=0)) * databar
elif nd == 3:
databar = (1.0 / np.sum(levels, axis=1)) * databar
else:
raise IndexError('Unable to use the number of dimensions provided in the levels data.')
return databar
def pea(temp, salinity, depth, levels):
"""
Calculate potential energy anomaly (stratification index).
Parameters
----------
temp : ndarray
Temperature data (depth-resolved).
salinity : ndarray
Salinity data (depth-resolved).
depth : ndarray
Water depth (positive down). Can be 1D (node) or 3D (time, siglay,
node).
levels : ndarray
Vertical levels (fractions of 0-1) (FVCOM = siglev).
Returns
-------
PEA : ndarray
Potential energy anomaly (J/m^{3}).
Notes
-----
As with the zbar code, this could do with cleaning up by transposing the
arrays so the depth dimension is always first. This would make calculations
which require an axis to always use the 0th one instead of either the 0th
or 1st.
"""
nd = np.ndim(depth)
g = 9.81
rho = dens_jackett(temp, salinity)
dz = np.abs(np.diff(levels, axis=0)) * depth
if nd == 1:
# dims is time only.
H = np.cumsum(dz, axis=0)
nz = dz.shape[0]
else:
# dims are [time, siglay, node].
H = np.cumsum(dz, axis=1)
nz = dz.shape[1]
# Depth-averaged density
rhobar = zbar(rho, dz)
# Potential energy anomaly.
PEA = np.zeros(rhobar.shape)
# Hofmeister thesis equation.
for i in range(nz):
if nd == 1:
PEA = PEA + ((rho[:, i, :] - rhobar) * g * H[i, :] * dz[i, :])
else:
PEA = PEA + ((rho[:, i, :] - rhobar) * g * H[:, i, :] * dz[:, i, :])
if nd == 1:
PEA = (1.0 / depth) * PEA
else:
PEA = (1.0 / depth.max(axis=1)) * PEA
return PEA
def simpsonhunter(u, v, depth, levels, sampling=False):
"""
Calculate the Simpson-Hunter parameter (h/u^{3}).
Parameters
----------
u, v : ndarray
Depth-resolved current vectors.
depth : ndarray
Water depth (m, +ve down). Must be on the same grid as u and v.
levels : ndarray
Vertical levels (fractions of 0-1) (FVCOM = siglev).
sampling : int, optional
If given, calculate the current speed maximum over `sampling' indices.
Returns
-------
SH : ndarray
Simpson-Hunter parameter (np.log10(m^{-2}s^{-3})).
References
----------
- Simpson, JH, and JR Hunter. "Fronts in the Irish Sea." Nature 250 (1974):
404-6.
- Holt, Jason, and Lars Umlauf. "Modelling the Tidal Mixing Fronts and
Seasonal Stratification of the Northwest European Continental Shelf."
Continental Shelf Research 28, no. 7 (April 2008): 887-903.
doi:10.1016/j.csr.2008.01.012.
"""
dz = np.abs(np.diff(levels, axis=0)) * depth
uv = zbar(np.sqrt(u**2 + v**2), dz)
if isinstance(sampling, int):
nd = uv.shape[0] / sampling
uvmax = np.empty((nd, uv.shape[-1]))
for i in range(nd):
uvmax[i, :] = uv[i * sampling:(i + 1) * sampling, :].max(axis=0)
else:
uvmax = uv
del(uv)
# Take the average of the maxima for the parameter calculation.
uvbar = uvmax.mean(axis=0)
SH = np.log10(depth / np.sqrt((uvbar**3)**2))
return SH
def mixedlayerdepth(rho, depth, thresh=0.03):
"""
Calculate the mixed layer depth based on a threshold in the vertical
density distribution.
Parameters
----------
rho : ndarray
Density in kg m^{3}.
depth : ndarray
Water depth (m, -ve down).
thresh : float, optional
Optionally specify a different threshold (use at your own risk!).
Defaults to 0.03kg m^{-3}.
Returns
-------
mld : ndarray
Depth at which the density exceeds the surface value plus the
threshold (m, -ve down).
Notes
-----
The mixed layer depth is given as the layer depth where the density is
greater than the threshold. As such, there is no interpolation between
layer depths (for now).
If you have coarse layers, you will resolve the mixed layer depth poorly.
You will also get odd patterns where the water depth happens to make the
vertical layer which is closest to the actual density threshold jump by
one, either up or down.
Really, I need to add a linear interpolation between the two closest
layers.
"""
rhosurface = rho[:, 0, :]
mld = np.max(np.ma.masked_where(rho < (rhosurface[:, np.newaxis, :] + thresh),
depth), axis=1)
return mld
def stokes(h, U, omega, z0, delta=False, U_star=False):
"""
Calculate the Stokes number for a given data set.
Parameters
----------
h : ndarray
Water depth (positive down) in metres.
U : float
Constituent of intetest's (e.g. M2) major axis in metres.
omega : float
Oscillatory frequency of the constituent of interest (e.g. M2) in
s^{-1}. For M2, omega is 1.4e-4.
z0 : float, ndarray
Roughness length in metres. Either a single value or an array the same
shape as the depth data.
delta : bool, optional
Return the oscillatory boundary layer thickness (delta).
U_star : bool, optional
Return the frictional velocity (U_star).
Returns
-------
S : ndarray
Stokes number.
delta : ndarray, optional
Oscillatory boundary layer thickness (Lamb, 1932).
U_star : ndarray, optional
Frictional velocity (U_star = Cd^{1/2}U)
Examples
--------
>>> h = 30
>>> z0 = 0.0025
>>> U = 0.25
>>> omega = 1 / 44714.1647021416
>>> S = stokes(h, U, omega, z0)
>>> S
0.70923635467504365
>>> S, U_star = stokes(h, U, omega, z0, U_star=True)
>>> U_star
0.011915170758540733
References
----------
Souza, A. J. "On the Use of the Stokes Number to Explain Frictional Tidal
Dynamics and Water Column Structure in Shelf Seas." Ocean Science 9, no.
2 (April 2, 2013): 391-98. doi:10.5194/os-9-391-2013.
Lamb, H. "Hydrodynamics", 6th Edn., Cambridge University Press, New York,
USA, p. 622, 1932.
"""
c1 = 0.25 # after Lamb (1932)
Cd = (0.4 / (1 + np.log(z0 / h)))**2
u_s = np.sqrt(Cd * U**2)
d = (c1 * u_s) / omega
S = d / h
if delta and U_star:
return S, d, u_s
elif delta and not U_star:
return S, d
elif not delta and U_star:
return S, u_s
else:
return S
def dissipation(rho, U, Cd=2.5e-3):
"""
Calculate tidal dissipation for a given tidal harmonic (or harmonics).
Parameters
----------
rho : ndarray
Density (kg m^{-3}). See dens_jackett() for calculating density from
temperature and salinity. Must be depth-averaged or a single value.
U : ndarray
Tidal harmonic major axis. Extend the array into the second dimension
to include results from multiple constituents.
Cd : float, ndarray, optional
If provided, a value for the quadratic drag coefficient. Defaults to
2.5e-3m. Can be an array whose size matches the number of locations in
rho.
Returns
-------
D : ndarray
Tidal dissipation. Units?
References
----------
Souza, A. J. "On the Use of the Stokes Number to Explain Frictional Tidal
Dynamics and Water Column Structure in Shelf Seas." Ocean Science 9, no.
2 (April 2, 2013): 391-98. doi:10.5194/os-9-391-2013.
Pingree, R. D., and D. K. Griffiths. "Tidal Fronts on the Shelf Seas around
the British Isles." Journal of Geophysical Research: Oceans 83, no. C9
(1978): 4615-22. doi:10.1029/JC083iC09p04615.
"""
D = rho * Cd * np.abs(U)**3
return D
def calculate_rhum(dew, temperature):
"""
Calculate relative humidity from dew temperature and ambient temperature.
This uses the range of constants which yields results accurate in the range
-20 to 50 Celsius.
Parameters
----------
dew : ndarray
Dew point temperature (Celsius).
temperature : ndarray
Ambient temperature (Celsius).
Returns
-------
rhum : ndarray
Relative humidity (%).
References
----------
http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
m = 7.59138
Tn = 240.7263
rhum = 100 * 10**(m * ((dew / (dew + Tn)) - (temperature / (temperature + Tn))))
return rhum
if __name__ == '__main__':
# Run the tests to check things are working OK.
_tests()
|
py | b40ff7d2b38695114807f48fc9d95c711c5e0fe6 | # Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ord_schema.message_helpers."""
import os
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import json_format
from google.protobuf import text_format
from ord_schema import message_helpers
from ord_schema.proto import reaction_pb2
from ord_schema.proto import test_pb2
try:
from rdkit import Chem
except ImportError:
Chem = None
class MessageHelpersTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(
('ord-1234567890', 'data/12/ord-1234567890'),
('test/ord-foo.pbtxt', 'data/fo/ord-foo.pbtxt'),
('ord_dataset-f00.pbtxt', 'data/f0/ord_dataset-f00.pbtxt'),
('ord_data-123456foo7.jpg', 'data/12/ord_data-123456foo7.jpg'))
def test_id_filename(self, filename, expected):
self.assertEqual(message_helpers.id_filename(filename), expected)
class FindSubmessagesTest(absltest.TestCase):
def test_scalar(self):
message = test_pb2.Scalar(int32_value=5, float_value=6.7)
self.assertEmpty(
message_helpers.find_submessages(message, test_pb2.Scalar))
with self.assertRaisesRegex(TypeError, 'must be a Protocol Buffer'):
message_helpers.find_submessages(message, float)
def test_nested(self):
message = test_pb2.Nested()
self.assertEmpty(
message_helpers.find_submessages(message, test_pb2.Nested.Child))
message.child.value = 5.6
submessages = message_helpers.find_submessages(message,
test_pb2.Nested.Child)
self.assertLen(submessages, 1)
# Show that the returned submessages work as references.
submessages[0].value = 7.8
self.assertAlmostEqual(message.child.value, 7.8, places=4)
def test_repeated_nested(self):
message = test_pb2.RepeatedNested()
message.children.add().value = 1.2
message.children.add().value = 3.4
self.assertLen(
message_helpers.find_submessages(message,
test_pb2.RepeatedNested.Child), 2)
def test_map_nested(self):
message = test_pb2.MapNested()
message.children['one'].value = 1.2
message.children['two'].value = 3.4
self.assertLen(
message_helpers.find_submessages(message,
test_pb2.MapNested.Child), 2)
def test_compounds(self):
message = reaction_pb2.Reaction()
message.inputs['test'].components.add().identifiers.add(
type='NAME', value='aspirin')
self.assertLen(
message_helpers.find_submessages(message, reaction_pb2.Compound),
1)
class BuildDataTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.test_subdirectory = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)
self.data = b'test data'
self.filename = os.path.join(self.test_subdirectory, 'test.data')
with open(self.filename, 'wb') as f:
f.write(self.data)
def test_build_data(self):
message = message_helpers.build_data(self.filename,
description='binary data')
self.assertEqual(message.bytes_value, self.data)
self.assertEqual(message.description, 'binary data')
self.assertEqual(message.format, 'data')
def test_bad_filename(self):
with self.assertRaisesRegex(ValueError,
'cannot deduce the file format'):
message_helpers.build_data('testdata', 'no description')
class BuildCompoundTest(parameterized.TestCase, absltest.TestCase):
def test_smiles_and_name(self):
compound = message_helpers.build_compound(smiles='c1ccccc1',
name='benzene')
expected = reaction_pb2.Compound(identifiers=[
reaction_pb2.CompoundIdentifier(value='c1ccccc1', type='SMILES'),
reaction_pb2.CompoundIdentifier(value='benzene', type='NAME')
])
self.assertEqual(compound, expected)
@parameterized.named_parameters(
('mass', '1.2 g', reaction_pb2.Mass(value=1.2, units='GRAM')),
('moles', '3.4 mol', reaction_pb2.Moles(value=3.4, units='MOLE')),
('volume', '5.6 mL', reaction_pb2.Volume(value=5.6,
units='MILLILITER')))
def test_amount(self, amount, expected):
compound = message_helpers.build_compound(amount=amount)
self.assertEqual(getattr(compound, compound.WhichOneof('amount')),
expected)
@parameterized.named_parameters(('missing_units', '1.2'),
('negative_mass', '-3.4 g'))
def test_bad_amount(self, amount):
with self.assertRaises((KeyError, ValueError)):
message_helpers.build_compound(amount=amount)
def test_role(self):
compound = message_helpers.build_compound(role='solvent')
self.assertEqual(compound.reaction_role,
reaction_pb2.Compound.ReactionRole.SOLVENT)
def test_bad_role(self):
with self.assertRaisesRegex(KeyError, 'not a supported type'):
message_helpers.build_compound(role='flavorant')
def test_is_limiting(self):
self.assertTrue(
message_helpers.unconvert_boolean(
message_helpers.build_compound(is_limiting=True).is_limiting))
self.assertFalse(
message_helpers.unconvert_boolean(
message_helpers.build_compound(is_limiting=False).is_limiting))
self.assertEqual(
message_helpers.unconvert_boolean(
message_helpers.build_compound().is_limiting), None)
@parameterized.named_parameters(
('prep_without_details', 'dried', None,
reaction_pb2.CompoundPreparation(type='DRIED')),
('prep_with_details', 'dried', 'in the fire of the sun',
reaction_pb2.CompoundPreparation(type='DRIED',
details='in the fire of the sun')),
('custom_prep_with_details', 'custom', 'threw it on the ground',
reaction_pb2.CompoundPreparation(type='CUSTOM',
details='threw it on the ground')))
def test_prep(self, prep, details, expected):
compound = message_helpers.build_compound(prep=prep,
prep_details=details)
self.assertEqual(compound.preparations[0], expected)
def test_bad_prep(self):
with self.assertRaisesRegex(KeyError, 'not a supported type'):
message_helpers.build_compound(prep='shaken')
def test_prep_details_without_prep(self):
with self.assertRaisesRegex(ValueError, 'prep must be provided'):
message_helpers.build_compound(prep_details='rinsed gently')
def test_custom_prep_without_details(self):
with self.assertRaisesRegex(ValueError,
'prep_details must be provided'):
message_helpers.build_compound(prep='custom')
def test_vendor(self):
self.assertEqual(
message_helpers.build_compound(vendor='Sally').vendor_source,
'Sally')
class SetSoluteMolesTest(parameterized.TestCase, absltest.TestCase):
def test_set_solute_moles_should_fail(self):
solute = message_helpers.build_compound(name='Solute')
solvent = message_helpers.build_compound(name='Solvent')
with self.assertRaisesRegex(ValueError, 'defined volume'):
message_helpers.set_solute_moles(solute, [solvent], '10 mM')
solute = message_helpers.build_compound(name='Solute', amount='1 mol')
solvent = message_helpers.build_compound(name='Solvent', amount='1 L')
with self.assertRaisesRegex(ValueError, 'overwrite'):
message_helpers.set_solute_moles(solute, [solvent], '10 mM')
def test_set_solute_moles(self):
solute = message_helpers.build_compound(name='Solute')
solvent2 = message_helpers.build_compound(name='Solvent',
amount='100 mL')
message_helpers.set_solute_moles(solute, [solvent2], '1 molar')
self.assertEqual(solute.moles,
reaction_pb2.Moles(units='MILLIMOLE', value=100))
solvent3 = message_helpers.build_compound(name='Solvent',
amount='75 uL')
message_helpers.set_solute_moles(solute, [solvent3],
'3 mM',
overwrite=True)
self.assertEqual(solute.moles,
reaction_pb2.Moles(units='NANOMOLE', value=225))
solvent4 = message_helpers.build_compound(name='Solvent',
amount='0.2 uL')
message_helpers.set_solute_moles(solute, [solvent4],
'30 mM',
overwrite=True)
self.assertEqual(solute.moles,
reaction_pb2.Moles(units='NANOMOLE', value=6))
solvent5 = message_helpers.build_compound(name='Solvent',
amount='0.8 uL')
message_helpers.set_solute_moles(solute, [solvent4, solvent5],
'30 mM',
overwrite=True)
self.assertEqual(solute.moles,
reaction_pb2.Moles(units='NANOMOLE', value=30))
class GetCompoundSmilesTest(absltest.TestCase):
def test_get_compound_smiles(self):
compound = message_helpers.build_compound(smiles='c1ccccc1',
name='benzene')
self.assertEqual(message_helpers.get_compound_smiles(compound),
'c1ccccc1')
class GetCompoundMolTest(absltest.TestCase):
@absltest.skipIf(Chem is None, 'no rdkit')
def test_get_compound_mol(self):
mol = Chem.MolFromSmiles('c1ccccc1')
compound = message_helpers.build_compound(smiles='c1ccccc1',
name='benzene')
identifier = compound.identifiers.add()
identifier.type = identifier.RDKIT_BINARY
identifier.bytes_value = mol.ToBinary()
self.assertEqual(
Chem.MolToSmiles(mol),
Chem.MolToSmiles(message_helpers.get_compound_mol(compound)))
class LoadAndWriteMessageTest(parameterized.TestCase, absltest.TestCase):
def setUp(self):
super().setUp()
self.messages = [
test_pb2.Scalar(int32_value=3, float_value=4.5),
test_pb2.RepeatedScalar(values=[1.2, 3.4]),
test_pb2.Enum(value='FIRST'),
test_pb2.RepeatedEnum(values=['FIRST', 'SECOND']),
test_pb2.Nested(child=test_pb2.Nested.Child(value=1.2)),
]
@parameterized.parameters(message_helpers.MessageFormat)
def test_round_trip(self, message_format):
for message in self.messages:
with tempfile.NamedTemporaryFile(suffix=message_format.value) as f:
message_helpers.write_message(message, f.name)
f.flush()
self.assertEqual(
message,
message_helpers.load_message(f.name, type(message)))
def test_bad_binary(self):
with tempfile.NamedTemporaryFile(suffix='.pb') as f:
message = test_pb2.RepeatedScalar(values=[1.2, 3.4])
f.write(message.SerializeToString())
f.flush()
# NOTE(kearnes): The decoder is not perfect; for example, it will
# not be able to distinguish from a message with the same tags and
# types (e.g. test_pb2.Scalar and test_pb2.RepeatedScalar).
with self.assertRaisesRegex(ValueError, 'Error parsing message'):
message_helpers.load_message(f.name, test_pb2.Nested)
def test_bad_json(self):
with tempfile.NamedTemporaryFile(mode='w+', suffix='.json') as f:
message = test_pb2.RepeatedScalar(values=[1.2, 3.4])
f.write(json_format.MessageToJson(message))
f.flush()
with self.assertRaisesRegex(ValueError, 'no field named "values"'):
message_helpers.load_message(f.name, test_pb2.Nested)
def test_bad_pbtxt(self):
with tempfile.NamedTemporaryFile(mode='w+', suffix='.pbtxt') as f:
message = test_pb2.RepeatedScalar(values=[1.2, 3.4])
f.write(text_format.MessageToString(message))
f.flush()
with self.assertRaisesRegex(ValueError, 'no field named "values"'):
message_helpers.load_message(f.name, test_pb2.Nested)
def test_bad_suffix(self):
message = test_pb2.RepeatedScalar(values=[1.2, 3.4])
with self.assertRaisesRegex(ValueError, 'not a valid MessageFormat'):
message_helpers.write_message(message, 'test.proto')
if __name__ == '__main__':
absltest.main()
|
py | b40ff91b3d1c66d1f2885a32d9fb9314b79066a6 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ConversationEmailEventTopicDetail(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ConversationEmailEventTopicDetail - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error_code': 'str',
'field_name': 'str',
'entity_id': 'str',
'entity_name': 'str'
}
self.attribute_map = {
'error_code': 'errorCode',
'field_name': 'fieldName',
'entity_id': 'entityId',
'entity_name': 'entityName'
}
self._error_code = None
self._field_name = None
self._entity_id = None
self._entity_name = None
@property
def error_code(self):
"""
Gets the error_code of this ConversationEmailEventTopicDetail.
:return: The error_code of this ConversationEmailEventTopicDetail.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""
Sets the error_code of this ConversationEmailEventTopicDetail.
:param error_code: The error_code of this ConversationEmailEventTopicDetail.
:type: str
"""
self._error_code = error_code
@property
def field_name(self):
"""
Gets the field_name of this ConversationEmailEventTopicDetail.
:return: The field_name of this ConversationEmailEventTopicDetail.
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""
Sets the field_name of this ConversationEmailEventTopicDetail.
:param field_name: The field_name of this ConversationEmailEventTopicDetail.
:type: str
"""
self._field_name = field_name
@property
def entity_id(self):
"""
Gets the entity_id of this ConversationEmailEventTopicDetail.
:return: The entity_id of this ConversationEmailEventTopicDetail.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this ConversationEmailEventTopicDetail.
:param entity_id: The entity_id of this ConversationEmailEventTopicDetail.
:type: str
"""
self._entity_id = entity_id
@property
def entity_name(self):
"""
Gets the entity_name of this ConversationEmailEventTopicDetail.
:return: The entity_name of this ConversationEmailEventTopicDetail.
:rtype: str
"""
return self._entity_name
@entity_name.setter
def entity_name(self, entity_name):
"""
Sets the entity_name of this ConversationEmailEventTopicDetail.
:param entity_name: The entity_name of this ConversationEmailEventTopicDetail.
:type: str
"""
self._entity_name = entity_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b40ff91bd8d1f879ed0ba3122ca860d914672083 | # Generated by Django 2.2.19 on 2021-05-14 08:13
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_job_master_job'),
]
operations = [
migrations.AddField(
model_name='client',
name='port',
field=models.PositiveIntegerField(blank=True, default=22, max_length=5, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)]),
),
]
|
py | b40ffa1038a2267a314c29d51253fcc3dc3ab2c8 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 修改配置文件中参数,同时发送信号量到postgresql.conf时不指定-I或者-D
Description :
1.查看krb_caseins_users默认值
2.修改配置文件中参数,同时发送信号量到postgresql.conf时不指定-I/-D
Expect :
1.显示成功
2.执行失败
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
class Tools(unittest.TestCase):
def setUp(self):
LOG.info('----Opengauss_Function_Tools_gs_guc_Case0035开始执行----')
self.dbuser_node = Node('dbuser')
self.constant = Constant()
self.commonsh = CommonSH()
def test_server_tools(self):
LOG.info('-----步骤1.查看krb_caseins_users默认值-----')
sql_cmd = self.commonsh.execut_db_sql(f'show krb_caseins_users;')
LOG.info(sql_cmd)
self.assertIn('off', sql_cmd)
LOG.info('-----步骤2.修改配置文件中参数,不指定-N----')
check_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_guc reload -N all -c krb_caseins_users=off;'
LOG.info(check_cmd)
msg = self.dbuser_node.sh(check_cmd).result()
LOG.info(msg)
self.assertIn('ERROR: -D or -I are mandatory for executing gs_guc',
msg)
def tearDown(self):
LOG.info('----------------无需清理环境-----------------------')
LOG.info('----Opengauss_Function_Tools_gs_guc_Case0035执行结束----')
|
py | b40ffa1b6b17b1a8670db1cd8ce5eceeaf02776b | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import division
from __future__ import print_function
from util import config, utility
from data_reader import data_reader
import os
import sys
import six
import time
import numpy as np
import paddle
import paddle.fluid as fluid
import trainer
def train(cfg):
MODELS = [
"CGAN", "DCGAN", "Pix2pix", "CycleGAN", "StarGAN", "AttGAN", "STGAN"
]
if cfg.model_net not in MODELS:
raise NotImplementedError("{} is not support!".format(cfg.model_net))
reader = data_reader(cfg)
if cfg.model_net in ['CycleGAN']:
a_reader, b_reader, a_reader_test, b_reader_test, batch_num, a_id2name, b_id2name = reader.make_data(
)
else:
if cfg.dataset in ['mnist']:
train_reader = reader.make_data()
else:
train_reader, test_reader, batch_num, id2name = reader.make_data()
if cfg.model_net in ['CGAN', 'DCGAN']:
if cfg.dataset != 'mnist':
raise NotImplementedError("CGAN/DCGAN only support MNIST now!")
model = trainer.__dict__[cfg.model_net](cfg, train_reader)
elif cfg.model_net in ['CycleGAN']:
model = trainer.__dict__[cfg.model_net](cfg, a_reader, b_reader,
a_reader_test, b_reader_test,
batch_num, a_id2name, b_id2name)
else:
model = trainer.__dict__[cfg.model_net](cfg, train_reader, test_reader,
batch_num, id2name)
model.build_model()
if __name__ == "__main__":
cfg = config.parse_args()
config.print_arguments(cfg)
utility.check_gpu(cfg.use_gpu)
if cfg.profile:
if cfg.use_gpu:
with fluid.profiler.profiler('All', 'total',
'/tmp/profile') as prof:
train(cfg)
else:
with fluid.profiler.profiler("CPU", sorted_key='total') as cpuprof:
train(cfg)
else:
train(cfg)
|
py | b40ffaa33cf7c7fbf079a90867e98a100dc0b371 | x = range(5)
y = x
x = [0, 1, 10, 3, 4]
print "x,y=", x, y
x = range(5)
y = x
y[-3] = 10
print "x,y=", x, y
x = range(5)
x = y
x[2] = 10
print "x,y=", x, y
x = range(5)
y = x
x[2] = 10
print "x,y=", x, y
x = range(5)
y = x
y = [0, 1, 10, 3, 4]
print "x,y=", x, y
|
py | b40ffd2972501ba4197dc336491a6adf4ecc51d8 | #!/usr/bin/env python
import code
import serial
import time
ser = None
for i in range(3):
try:
ser = serial.Serial('/dev/ttyUSB{n}'.format(n=i), 19200)
except serial.serialutil.SerialException:
pass
print("Connected to: " + ser.name)
def packet(address, data=b''):
if len(data) > 0xf:
raise Exception('Too much data for this packet')
send = chr((address & 0xf) | (len(data) << 4)) + data
print("Sending: " + str([ord(x) for x in send]))
ser.write(send)
def zvezde_command(id, command, data=b''):
if id >= 32:
raise Exception('Only 32 leds here')
packet(0xf, chr((command << 5) | id) + data)
def zvezde_set(id, brightness):
zvezde_command(id, 2, chr(brightness))
def zvezde_id(id):
zvezde_command(id, 3)
def zvezde_all():
data = {
# zupnik
# 0: 7,
# 1: 255,
# 2: 255,
# 3: 15,
# 4: 127,
# 5: 15,
# 6: 31,
# 7: 63,
# 8: 15,
# 9: 3,
# 10: 127,
# 11: 31,
# 12: 7,
# 13: 15,
# 14: 63,
# 15: 3,
# 16: 7,
# 17: 63,
# 18: 63,
# 19: 3,
# 20: 7,
# 21: 15,
# 22: 127,
# 23: 1,
# 24: 31,
# 25: 63,
# 26: 127,
# 27: 15,
# 28: 7,
# 29: 15,
# 30: 31,
# 31: 7,
# david
# 0: 3,
# 1: 3,
# 2: 7,
# 3: 3,
# 4: 127,
# 5: 7,
# 6: 15,
# 7: 31,
# 8: 1,
# 9: 63,
# 10: 127,
# 11: 3,
# 12: 63,
# 13: 15,
# 14: 31,
# 15: 7,
# 16: 15,
# 17: 63,
# 18: 255,
# 19: 255,
# 20: 15,
# 21: 31,
# 22: 15,
# 23: 127,
# 24: 127,
# 25: 15,
# 26: 1,
# 27: 63,
# 28: 7,
# 29: 3,
# 30: 7,
# 31: 127,
# zupnik 2017
0: 31,
1: 31,
2: 15,
# 3: 3,
# 4: 127,
# 5: 7,
# 6: 15,
# 7: 31,
8: 31,
9: 7,
10: 255,
11: 3,
12: 7,
13: 255,
14: 31,
15: 15,
# 16: 15,
17: 3,
18: 15,
19: 15,
20: 1,
21: 255,
22: 7,
23: 31,
# 24: 127,
# 25: 15,
# 26: 1,
# 27: 63,
# 28: 7,
# 29: 3,
# 30: 7,
# 31: 127,
}
for id, value in data.items():
zvezde_set(id, value)
time.sleep(0.1)
def utrinek_start_left(address):
packet(address, chr(0))
def utrinek_start_right(address):
packet(address, chr(1))
def utrinek_set_mode(address, mode):
packet(address, chr(2) + chr(mode))
def utrinek_set_random_min(address, min):
packet(address, chr(3) + chr(min >> 8) + chr(min & 0xff))
def utrinek_set_random_max(address, max):
packet(address, chr(4) + chr(max >> 8) + chr(max & 0xff))
def utrinek_set_address(address, new_address):
packet(address, chr(5) + chr(new_address))
def luna_set_mode(mode):
packet(0xe, chr(mode << 4))
def luna_set_auto():
luna_set_mode(0xe)
def luna_set_changing():
luna_set_mode(0xf)
def luna_get_time():
ser.reset_input_buffer()
packet(0xe, chr(1))
data = [ord(x) for x in ser.read(8)]
if (data[0] == 1):
print('Success')
if data[4] & 0b00100000:
print('Oscillator running')
else:
print('Oscillator not running')
else:
print('Fail')
print("20{}{}-{}{}-{}{} {}{}:{}{}:{}{}".format(
data[7] >> 4,
data[7] & 0xf,
(data[6] >> 4) & 1,
data[6] & 0xf,
data[5] >> 4,
data[5] & 0xf,
data[3] >> 4,
data[3] & 0xf,
data[2] >> 4,
data[2] & 0xf,
(data[1] >> 4) & 0b0111,
data[1] & 0xf
))
print(data)
def luna_oscillator_out():
ser.reset_input_buffer()
packet(0xe, chr(3) + chr(0b11000011))
print(ord(ser.read()))
def luna_disable_out():
ser.reset_input_buffer()
packet(0xe, chr(3) + chr(0b10000000))
print(ord(ser.read()))
def luna_set_time(year, month, day, hours, minutes, seconds):
sec_byte = chr(0b10000000 | ((seconds / 10) << 4) | (seconds % 10))
min_byte = chr(((minutes / 10) << 4) | (minutes % 10))
hour_byte = chr(((hours / 10) << 4) | (hours % 10))
day_byte = chr(0b00001000)
date_byte = chr(((day / 10) << 4) | (day % 10))
month_byte = chr(((month / 10) << 4) | (month % 10))
year_byte = chr((((year / 10) % 10) << 4) | (year % 10))
ser.reset_input_buffer()
packet(0xe, chr(2) + sec_byte + min_byte + hour_byte + day_byte + date_byte + month_byte + year_byte)
print(ord(ser.read()))
def snow_stars():
packet(0xc)
def snow_snow():
packet(0xb)
def snow_speed(s):
packet(0xd, chr(0) + chr(s))
def snow_test(x, y):
packet(0xd, chr(1) + chr(x) + chr(y))
code.interact(local=locals())
|
py | b40ffdc4cf5ae4f5771262e3b118b488c8569e34 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import sys
if sys.version_info < (3,):
raise ImportError("Python Flight bindings require Python 3")
from pyarrow._flight import ( # noqa
connect,
Action,
ActionType,
CertKeyPair,
DescriptorType,
FlightCallOptions,
FlightClient,
FlightDescriptor,
FlightEndpoint,
FlightInfo,
SchemaResult,
FlightServerBase,
FlightError,
FlightInternalError,
FlightTimedOutError,
FlightCancelledError,
FlightUnauthenticatedError,
FlightUnauthorizedError,
FlightUnavailableError,
GeneratorStream,
Location,
Ticket,
RecordBatchStream,
Result,
ClientAuthHandler,
ServerAuthHandler,
BasicAuth
)
|
py | b40ffe3cef777fe119974536f2ab51e9f8e9adb1 | from __future__ import unicode_literals
import errno
import io
import hashlib
import json
import os.path
import re
import types
import ssl
import sys
import yt_dlp.extractor
from yt_dlp import YoutubeDL
from yt_dlp.compat import (
compat_os_name,
compat_str,
)
from yt_dlp.utils import (
preferredencoding,
write_string,
)
if "pytest" in sys.modules:
import pytest
is_download_test = pytest.mark.download
else:
def is_download_test(testClass):
return testClass
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"local_parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if os.path.exists(LOCAL_PARAMETERS_FILE):
with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
parameters.update(json.load(pf))
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def gettestcases(include_onlymatching=False):
for ie in yt_dlp.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching):
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_value(self, got, expected, field):
if isinstance(expected, compat_str) and expected.startswith('re:'):
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (field, got, match_str))
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
got.startswith(start_str),
'field %s (value: %r) should start with %r' % (field, got, start_str))
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str))
elif isinstance(expected, type):
self.assertTrue(
isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
elif isinstance(expected, dict) and isinstance(got, dict):
expect_dict(self, got, expected)
elif isinstance(expected, list) and isinstance(got, list):
self.assertEqual(
len(expected), len(got),
'Expect a list of length %d, but got a list of length %d for field %s' % (
len(expected), len(got), field))
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got)
type_expected = type(item_expected)
self.assertEqual(
type_expected, type_got,
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
index, field, type_expected, type_got))
expect_value(self, item_got, item_expected, field)
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
self.assertTrue(
isinstance(got, compat_str),
'Expected field %s to be a unicode object, but got value %r of type %r' % (field, got, type(got)))
got = 'md5:' + md5(got)
elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected):
self.assertTrue(
isinstance(got, (list, dict)),
'Expected field %s to be a list or a dict, but it is of type %s' % (
field, type(got).__name__))
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
assert_func = assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
assert_func = assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
assert_func = assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
self, len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
expected, got,
'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
def expect_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
got = got_dict.get(info_field)
expect_value(self, got, expected, info_field)
def expect_info_dict(self, got_dict, expected_dict):
expect_dict(self, got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''
if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str:
info_dict_str += '\n'
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
for k in missing_keys)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp)
if len(text) < 1000:
note += ' in %r' % text
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def assertLessEqual(self, got, expected, msg=None):
if not (got <= expected):
if msg is None:
msg = '%r not less than or equal to %r' % (got, expected)
self.assertTrue(got <= expected, msg)
def assertEqual(self, got, expected, msg=None):
if not (got == expected):
if msg is None:
msg = '%r not equal to %r' % (got, expected)
self.assertTrue(got == expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
def http_server_port(httpd):
if os.name == 'java' and isinstance(httpd.socket, ssl.SSLSocket):
# In Jython SSLSocket is not a subclass of socket.socket
sock = httpd.socket.sock
else:
sock = httpd.socket
return sock.getsockname()[1]
|
py | b40ffe7366e91cd1bc2d5e289cd566d0ff47f23c | # PRE: you must do the following for this test to work
# aws s3 cp s3://amazon-reviews-pds/parquet/ ./data/amazon-reviews-pds/parquet/ --recursive
def pytest_addoption(parser):
parser.addoption(
'--extended',
action='store_true',
dest='extended',
default=False,
help="enable longrundecorated tests"
)
def pytest_configure(config):
if not config.option.extended:
setattr(config.option, 'markexpr', 'not extended')
|
py | b40ffec6e080c142f6a6a22fb571381b219baf33 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.speech.v1 Speech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.protobuf_helpers
import grpc
from google.cloud.speech_v1.gapic import enums
from google.cloud.speech_v1.gapic import speech_client_config
from google.cloud.speech_v1.gapic.transports import speech_grpc_transport
from google.cloud.speech_v1.proto import cloud_speech_pb2
from google.cloud.speech_v1.proto import cloud_speech_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-speech").version
class SpeechClient(object):
"""Service that implements Google Cloud Speech API."""
SERVICE_ADDRESS = "speech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.speech.v1.Speech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.SpeechGrpcTransport,
Callable[[~.Credentials, type], ~.SpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=speech_grpc_transport.SpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = speech_grpc_transport.SpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1
>>> from google.cloud.speech_v1 import enums
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "recognize" not in self._inner_api_calls:
self._inner_api_calls[
"recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.recognize,
default_retry=self._method_configs["Recognize"].retry,
default_timeout=self._method_configs["Recognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._inner_api_calls["recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def long_running_recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs asynchronous speech recognition: receive results via the
google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains a
``LongRunningRecognizeResponse`` message.
Example:
>>> from google.cloud import speech_v1
>>> from google.cloud.speech_v1 import enums
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.long_running_recognize(config, audio)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "long_running_recognize" not in self._inner_api_calls:
self._inner_api_calls[
"long_running_recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.long_running_recognize,
default_retry=self._method_configs["LongRunningRecognize"].retry,
default_timeout=self._method_configs["LongRunningRecognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.LongRunningRecognizeRequest(
config=config, audio=audio
)
operation = self._inner_api_calls["long_running_recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_speech_pb2.LongRunningRecognizeResponse,
metadata_type=cloud_speech_pb2.LongRunningRecognizeMetadata,
)
def streaming_recognize(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs bidirectional streaming speech recognition: receive results while
sending audio. This method is only available via the gRPC API (not REST).
EXPERIMENTAL: This method interface might change in the future.
Example:
>>> from google.cloud import speech_v1
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> request = {}
>>>
>>> requests = [request]
>>> for element in client.streaming_recognize(requests):
... # process element
... pass
Args:
requests (iterator[dict|google.cloud.speech_v1.proto.cloud_speech_pb2.StreamingRecognizeRequest]): The input objects. If a dict is provided, it must be of the
same form as the protobuf message :class:`~google.cloud.speech_v1.types.StreamingRecognizeRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.speech_v1.types.StreamingRecognizeResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "streaming_recognize" not in self._inner_api_calls:
self._inner_api_calls[
"streaming_recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.streaming_recognize,
default_retry=self._method_configs["StreamingRecognize"].retry,
default_timeout=self._method_configs["StreamingRecognize"].timeout,
client_info=self._client_info,
)
return self._inner_api_calls["streaming_recognize"](
requests, retry=retry, timeout=timeout, metadata=metadata
)
|
py | b40ffecb5d43819e4f5b489259e5cdb5caa61379 | class features(object):
def getFeatures(self):
return ['replot']
|
py | b40fff09f494476d9f748b2c8cc160bf0e6c3670 | import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from .exceptions import APIError
class PvpMixin:
@commands.group(case_insensitive=True)
async def pvp(self, ctx):
"""PvP related commands.
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@pvp.command(name="stats")
@commands.cooldown(1, 20, BucketType.user)
async def pvp_stats(self, ctx):
"""Information about your general pvp stats
Required permissions: pvp
"""
try:
doc = await self.fetch_key(ctx.author, ["pvp"])
results = await self.call_api("pvp/stats", key=doc["key"])
except APIError as e:
return await self.error_handler(ctx, e)
rank = results["pvp_rank"] + results["pvp_rank_rollovers"]
totalgamesplayed = sum(results["aggregate"].values())
totalwins = results["aggregate"]["wins"] + results["aggregate"]["byes"]
if totalgamesplayed != 0:
totalwinratio = int((totalwins / totalgamesplayed) * 100)
else:
totalwinratio = 0
rankedgamesplayed = sum(results["ladders"]["ranked"].values())
rankedwins = results["ladders"]["ranked"]["wins"] + \
results["ladders"]["ranked"]["byes"]
if rankedgamesplayed != 0:
rankedwinratio = int((rankedwins / rankedgamesplayed) * 100)
else:
rankedwinratio = 0
rank_id = results["pvp_rank"] // 10 + 1
try:
ranks = await self.call_api("pvp/ranks/{0}".format(rank_id))
except APIError as e:
await self.error_handler(ctx, e)
return
data = discord.Embed(colour=await self.get_embed_color(ctx))
data.add_field(name="Rank", value=rank, inline=False)
data.add_field(name="Total games played", value=totalgamesplayed)
data.add_field(name="Total wins", value=totalwins)
data.add_field(
name="Total winratio", value="{}%".format(totalwinratio))
data.add_field(name="Ranked games played", value=rankedgamesplayed)
data.add_field(name="Ranked wins", value=rankedwins)
data.add_field(
name="Ranked winratio", value="{}%".format(rankedwinratio))
data.set_author(name=doc["account_name"])
data.set_thumbnail(url=ranks["icon"])
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
@pvp.command(name="professions")
@commands.cooldown(1, 5, BucketType.user)
async def pvp_professions(self, ctx, *, profession: str = None):
"""Information about your pvp profession stats.
If no profession is given, defaults to general profession stats.
Example! $pvp professions mesmer
Required permissions: pvp
"""
try:
doc = await self.fetch_key(ctx.author, ["pvp"])
results = await self.call_api("pvp/stats", key=doc["key"])
except APIError as e:
return await self.error_handler(ctx, e)
professions = self.gamedata["professions"].keys()
professionsformat = {}
if not profession:
for profession in professions:
if profession in results["professions"]:
wins = (results["professions"][profession]["wins"] +
results["professions"][profession]["byes"])
total = sum(results["professions"][profession].values())
winratio = int((wins / total) * 100)
professionsformat[profession] = {
"wins": wins,
"total": total,
"winratio": winratio
}
mostplayed = max(
professionsformat, key=lambda i: professionsformat[i]['total'])
icon = self.gamedata["professions"][mostplayed]["icon"]
mostplayedgames = professionsformat[mostplayed]["total"]
highestwinrate = max(
professionsformat,
key=lambda i: professionsformat[i]["winratio"])
highestwinrategames = professionsformat[highestwinrate]["winratio"]
leastplayed = min(
professionsformat, key=lambda i: professionsformat[i]["total"])
leastplayedgames = professionsformat[leastplayed]["total"]
lowestestwinrate = min(
professionsformat,
key=lambda i: professionsformat[i]["winratio"])
lowestwinrategames = professionsformat[lowestestwinrate][
"winratio"]
data = discord.Embed(
description="Professions",
color=await self.get_embed_color(ctx))
data.set_thumbnail(url=icon)
data.add_field(
name="Most played profession",
value="{0}, with {1}".format(mostplayed.capitalize(),
mostplayedgames))
data.add_field(
name="Highest winrate profession",
value="{0}, with {1}%".format(highestwinrate.capitalize(),
highestwinrategames))
data.add_field(
name="Least played profession",
value="{0}, with {1}".format(leastplayed.capitalize(),
leastplayedgames))
data.add_field(
name="Lowest winrate profession",
value="{0}, with {1}%".format(lowestestwinrate.capitalize(),
lowestwinrategames))
data.set_author(name=doc["account_name"])
data.set_footer(
text="PROTIP: Use $pvp professions <profession> for "
"more detailed stats")
try:
await ctx.send(embed=data)
except discord.HTTPException:
await ctx.send("Need permission to embed links")
elif profession.lower() not in self.gamedata["professions"]:
await ctx.send("Invalid profession")
elif profession.lower() not in results["professions"]:
await ctx.send("You haven't played that profession!")
else:
prof = profession.lower()
wins = results["professions"][prof]["wins"] + \
results["professions"][prof]["byes"]
total = sum(results["professions"][prof].values())
winratio = int((wins / total) * 100)
color = self.gamedata["professions"][prof]["color"]
color = int(color, 0)
data = discord.Embed(
description="Stats for {0}".format(prof), colour=color)
data.set_thumbnail(url=self.gamedata["professions"][prof]["icon"])
data.add_field(
name="Total games played", value="{0}".format(total))
data.add_field(name="Wins", value="{0}".format(wins))
data.add_field(name="Winratio", value="{0}%".format(winratio))
data.set_author(name=doc["account_name"])
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
|
py | b40fff669589c6eab8dbbfbd0724813907e3fcc7 | """Support for Z-Wave climate devices."""
# Because we do not compile openzwave on CI
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_OFF,
PRESET_BOOST,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_PRESET_MODE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity
_LOGGER = logging.getLogger(__name__)
CONF_NAME = "name"
DEFAULT_NAME = "Z-Wave Climate"
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120)
ATTR_OPERATING_STATE = "operating_state"
ATTR_FAN_STATE = "fan_state"
ATTR_FAN_ACTION = "fan_action"
AUX_HEAT_ZWAVE_MODE = "Aux Heat"
# Device is in manufacturer specific mode (e.g. setting the valve manually)
PRESET_MANUFACTURER_SPECIFIC = "Manufacturer Specific"
WORKAROUND_ZXT_120 = "zxt_120"
DEVICE_MAPPINGS = {REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120}
HVAC_STATE_MAPPINGS = {
"off": HVAC_MODE_OFF,
"heat": HVAC_MODE_HEAT,
"heat mode": HVAC_MODE_HEAT,
"heat (default)": HVAC_MODE_HEAT,
"furnace": HVAC_MODE_HEAT,
"fan only": HVAC_MODE_FAN_ONLY,
"dry air": HVAC_MODE_DRY,
"moist air": HVAC_MODE_DRY,
"cool": HVAC_MODE_COOL,
"heat_cool": HVAC_MODE_HEAT_COOL,
"auto": HVAC_MODE_HEAT_COOL,
"auto changeover": HVAC_MODE_HEAT_COOL,
}
HVAC_CURRENT_MAPPINGS = {
"idle": CURRENT_HVAC_IDLE,
"heat": CURRENT_HVAC_HEAT,
"pending heat": CURRENT_HVAC_IDLE,
"heating": CURRENT_HVAC_HEAT,
"cool": CURRENT_HVAC_COOL,
"pending cool": CURRENT_HVAC_IDLE,
"cooling": CURRENT_HVAC_COOL,
"fan only": CURRENT_HVAC_FAN,
"vent / economiser": CURRENT_HVAC_FAN,
"off": CURRENT_HVAC_OFF,
}
PRESET_MAPPINGS = {
"full power": PRESET_BOOST,
"manufacturer specific": PRESET_MANUFACTURER_SPECIFIC,
}
DEFAULT_HVAC_MODES = [
HVAC_MODE_HEAT_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_DRY,
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old method of setting up Z-Wave climate devices."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Climate device from Config Entry."""
@callback
def async_add_climate(climate):
"""Add Z-Wave Climate Device."""
async_add_entities([climate])
async_dispatcher_connect(hass, "zwave_new_climate", async_add_climate)
def get_device(hass, values, **kwargs):
"""Create Z-Wave entity device."""
temp_unit = hass.config.units.temperature_unit
return ZWaveClimate(values, temp_unit)
class ZWaveClimate(ZWaveDeviceEntity, ClimateDevice):
"""Representation of a Z-Wave Climate device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._target_temperature = None
self._current_temperature = None
self._hvac_action = None
self._hvac_list = None # [zwave_mode]
self._hvac_mapping = None # {ha_mode:zwave_mode}
self._hvac_mode = None # ha_mode
self._aux_heat = None
self._default_hvac_mode = None # ha_mode
self._preset_mapping = None # {ha_mode:zwave_mode}
self._preset_list = None # [zwave_mode]
self._preset_mode = None # ha_mode if exists, else zwave_mode
self._current_fan_mode = None
self._fan_modes = None
self._fan_action = None
self._current_swing_mode = None
self._swing_modes = None
self._unit = temp_unit
_LOGGER.debug("temp_unit is %s", self._unit)
self._zxt_120 = None
# Make sure that we have values for the key before converting to int
if self.node.manufacturer_id.strip() and self.node.product_id.strip():
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16),
)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug("Remotec ZXT-120 Zwave Thermostat workaround")
self._zxt_120 = 1
self.update_properties()
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.values.fan_mode:
support |= SUPPORT_FAN_MODE
if self._zxt_120 == 1 and self.values.zxt_120_swing_mode:
support |= SUPPORT_SWING_MODE
if self._aux_heat:
support |= SUPPORT_AUX_HEAT
if self._preset_list:
support |= SUPPORT_PRESET_MODE
return support
def update_properties(self):
"""Handle the data changes for node values."""
# Operation Mode
self._update_operation_mode()
# Current Temp
self._update_current_temp()
# Fan Mode
self._update_fan_mode()
# Swing mode
self._update_swing_mode()
# Set point
self._update_target_temp()
# Operating state
self._update_operating_state()
# Fan operating state
self._update_fan_state()
def _update_operation_mode(self):
"""Update hvac and preset modes."""
if self.values.mode:
self._hvac_list = []
self._hvac_mapping = {}
self._preset_list = []
self._preset_mapping = {}
mode_list = self.values.mode.data_items
if mode_list:
for mode in mode_list:
ha_mode = HVAC_STATE_MAPPINGS.get(str(mode).lower())
ha_preset = PRESET_MAPPINGS.get(str(mode).lower())
if mode == AUX_HEAT_ZWAVE_MODE:
# Aux Heat should not be included in any mapping
self._aux_heat = True
elif ha_mode and ha_mode not in self._hvac_mapping:
self._hvac_mapping[ha_mode] = mode
self._hvac_list.append(ha_mode)
elif ha_preset and ha_preset not in self._preset_mapping:
self._preset_mapping[ha_preset] = mode
self._preset_list.append(ha_preset)
else:
# If nothing matches
self._preset_list.append(mode)
# Default operation mode
for mode in DEFAULT_HVAC_MODES:
if mode in self._hvac_mapping.keys():
self._default_hvac_mode = mode
break
if self._preset_list:
# Presets are supported
self._preset_list.append(PRESET_NONE)
current_mode = self.values.mode.data
_LOGGER.debug("current_mode=%s", current_mode)
_hvac_temp = next(
(
key
for key, value in self._hvac_mapping.items()
if value == current_mode
),
None,
)
if _hvac_temp is None:
# The current mode is not a hvac mode
if (
"heat" in current_mode.lower()
and HVAC_MODE_HEAT in self._hvac_mapping.keys()
):
# The current preset modes maps to HVAC_MODE_HEAT
_LOGGER.debug("Mapped to HEAT")
self._hvac_mode = HVAC_MODE_HEAT
elif (
"cool" in current_mode.lower()
and HVAC_MODE_COOL in self._hvac_mapping.keys()
):
# The current preset modes maps to HVAC_MODE_COOL
_LOGGER.debug("Mapped to COOL")
self._hvac_mode = HVAC_MODE_COOL
else:
# The current preset modes maps to self._default_hvac_mode
_LOGGER.debug("Mapped to DEFAULT")
self._hvac_mode = self._default_hvac_mode
self._preset_mode = next(
(
key
for key, value in self._preset_mapping.items()
if value == current_mode
),
current_mode,
)
else:
# The current mode is a hvac mode
self._hvac_mode = _hvac_temp
self._preset_mode = PRESET_NONE
_LOGGER.debug("self._hvac_mapping=%s", self._hvac_mapping)
_LOGGER.debug("self._hvac_list=%s", self._hvac_list)
_LOGGER.debug("self._hvac_mode=%s", self._hvac_mode)
_LOGGER.debug("self._default_hvac_mode=%s", self._default_hvac_mode)
_LOGGER.debug("self._hvac_action=%s", self._hvac_action)
_LOGGER.debug("self._aux_heat=%s", self._aux_heat)
_LOGGER.debug("self._preset_mapping=%s", self._preset_mapping)
_LOGGER.debug("self._preset_list=%s", self._preset_list)
_LOGGER.debug("self._preset_mode=%s", self._preset_mode)
def _update_current_temp(self):
"""Update current temperature."""
if self.values.temperature:
self._current_temperature = self.values.temperature.data
device_unit = self.values.temperature.units
if device_unit is not None:
self._unit = device_unit
def _update_fan_mode(self):
"""Update fan mode."""
if self.values.fan_mode:
self._current_fan_mode = self.values.fan_mode.data
fan_modes = self.values.fan_mode.data_items
if fan_modes:
self._fan_modes = list(fan_modes)
_LOGGER.debug("self._fan_modes=%s", self._fan_modes)
_LOGGER.debug("self._current_fan_mode=%s", self._current_fan_mode)
def _update_swing_mode(self):
"""Update swing mode."""
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self._current_swing_mode = self.values.zxt_120_swing_mode.data
swing_modes = self.values.zxt_120_swing_mode.data_items
if swing_modes:
self._swing_modes = list(swing_modes)
_LOGGER.debug("self._swing_modes=%s", self._swing_modes)
_LOGGER.debug("self._current_swing_mode=%s", self._current_swing_mode)
def _update_target_temp(self):
"""Update target temperature."""
if self.values.primary.data == 0:
_LOGGER.debug(
"Setpoint is 0, setting default to " "current_temperature=%s",
self._current_temperature,
)
if self._current_temperature is not None:
self._target_temperature = round((float(self._current_temperature)), 1)
else:
self._target_temperature = round((float(self.values.primary.data)), 1)
def _update_operating_state(self):
"""Update operating state."""
if self.values.operating_state:
mode = self.values.operating_state.data
self._hvac_action = HVAC_CURRENT_MAPPINGS.get(str(mode).lower(), mode)
def _update_fan_state(self):
"""Update fan state."""
if self.values.fan_action:
self._fan_action = self.values.fan_action.data
@property
def fan_mode(self):
"""Return the fan speed set."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return a list of available fan modes."""
return self._fan_modes
@property
def swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_modes(self):
"""Return a list of available swing modes."""
return self._swing_modes
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._unit == "C":
return TEMP_CELSIUS
if self._unit == "F":
return TEMP_FAHRENHEIT
return self._unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self.values.mode:
return self._hvac_mode
return self._default_hvac_mode
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
if self.values.mode:
return self._hvac_list
return []
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
return self._hvac_action
@property
def is_aux_heat(self):
"""Return true if aux heater."""
if not self._aux_heat:
return None
if self.values.mode.data == AUX_HEAT_ZWAVE_MODE:
return True
return False
@property
def preset_mode(self):
"""Return preset operation ie. eco, away.
Need to be one of PRESET_*.
"""
if self.values.mode:
return self._preset_mode
return PRESET_NONE
@property
def preset_modes(self):
"""Return the list of available preset operation modes.
Need to be a subset of PRESET_MODES.
"""
if self.values.mode:
return self._preset_list
return []
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
_LOGGER.debug("Set temperature to %s", kwargs.get(ATTR_TEMPERATURE))
if kwargs.get(ATTR_TEMPERATURE) is None:
return
self.values.primary.data = kwargs.get(ATTR_TEMPERATURE)
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
_LOGGER.debug("Set fan mode to %s", fan_mode)
if not self.values.fan_mode:
return
self.values.fan_mode.data = fan_mode
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
_LOGGER.debug("Set hvac_mode to %s", hvac_mode)
if not self.values.mode:
return
operation_mode = self._hvac_mapping.get(hvac_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self.values.mode.data = operation_mode
def turn_aux_heat_on(self):
"""Turn auxillary heater on."""
if not self._aux_heat:
return
operation_mode = AUX_HEAT_ZWAVE_MODE
_LOGGER.debug("Aux heat on. Set operation mode to %s", operation_mode)
self.values.mode.data = operation_mode
def turn_aux_heat_off(self):
"""Turn auxillary heater off."""
if not self._aux_heat:
return
if HVAC_MODE_HEAT in self._hvac_mapping:
operation_mode = self._hvac_mapping.get(HVAC_MODE_HEAT)
else:
operation_mode = self._hvac_mapping.get(HVAC_MODE_OFF)
_LOGGER.debug("Aux heat off. Set operation mode to %s", operation_mode)
self.values.mode.data = operation_mode
def set_preset_mode(self, preset_mode):
"""Set new target preset mode."""
_LOGGER.debug("Set preset_mode to %s", preset_mode)
if not self.values.mode:
return
if preset_mode == PRESET_NONE:
# Activate the current hvac mode
self._update_operation_mode()
operation_mode = self._hvac_mapping.get(self.hvac_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self.values.mode.data = operation_mode
else:
operation_mode = self._preset_mapping.get(preset_mode, preset_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self.values.mode.data = operation_mode
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
_LOGGER.debug("Set swing_mode to %s", swing_mode)
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self.values.zxt_120_swing_mode.data = swing_mode
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = super().device_state_attributes
if self._fan_action:
data[ATTR_FAN_ACTION] = self._fan_action
return data
|
py | b41000443329f03b24e35f3e5adecd246df44f0b | from pathlib import Path
from typing import List
import re
# ______________________________________________________________________________
# //////////////////////////////////////////////////////////// GET OR CREATE DIR
def find_or_create_target_directory(
path: Path, search_keys: List[str], target_directory_path: Path
) -> Path:
"""
Summary
-------
This function recursively searches for a target directory path, creating folders on the way (in case of a nested target directory path).
Notes
-----
Gets used in conftest.py in the test suite, in order to create file structure for
"""
# Base Case - we found what we were looking for
if path == target_directory_path:
return path
else:
# Try finding a directory with that includes the first search key
try:
# This raises an IndexError if the list is empty
path = [
directory_name
for directory_name in path.iterdir()
if search_keys[0] in str(directory_name)
][0]
except IndexError as error:
# If there's no directory for the first search key, create one
path = path / search_keys[0]
# Create a new directory
path.mkdir()
finally:
# Delete the first search key
del search_keys[0]
find_or_create_target_directory(
path=path,
search_keys=search_keys,
target_directory_path=target_directory_path,
)
# ______________________________________________________________________________
# ////////////////////////////////////////////////////////// CAMEL TO SNAKE CASE
def reformat_camel_to_snake_case(camel_case_string: str) -> str:
""" Converts CamelCase strings snake_case """
# Splitting on UpperCase using re and lowercasing it
array_with_lower_case_strings = [
s.lower() for s in re.split("([A-Z][^A-Z]*)", camel_case_string) if s
]
# Combine lower cased strings with underscores
snake_cased_string = "_".join(array_with_lower_case_strings)
return snake_cased_string
# ______________________________________________________________________________
# ////////////////////////////////////////////////////////// SNAKE TO CAMEL CASE
def reformat_snake_to_camel_case(snake_case_string: str) -> str:
""" Converts snake_case strings to CamelCase """
# Splitting camel case string and storing them as list
array_with_capitalized_with_capitalized_strings = [
s.capitalize() for s in snake_case_string.split("_")
]
# Combine lower case strings underlines
camel_case_string = "".join(array_with_capitalized_with_capitalized_strings)
return camel_case_string |
py | b41000d5532780f73d4faeeff3590f75fd285050 |
print("Hi!")
name = input("What's your name? ")
print("It's nice to meet you,", name)
answer = input("Are you enjoying the course? ")
if answer == "Yes":
print("That's good to hear!")
else:
print("Oh no! That makes me sad!") |
py | b4100149d887470d0d24646b4e2d5e6169b69fb0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# whitebox documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import whitebox
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'whitebox'
copyright = u"2018, Qiusheng Wu"
author = u"Qiusheng Wu"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = whitebox.__version__
# The full version, including alpha/beta/rc tags.
release = whitebox.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'whiteboxdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'whitebox.tex',
u'whitebox Documentation',
u'Qiusheng Wu', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'whitebox',
u'whitebox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'whitebox',
u'whitebox Documentation',
author,
'whitebox',
'One line description of project.',
'Miscellaneous'),
]
|
py | b41002879e8c25925affb5e6b6b0f7a139423277 | #! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__('stop_reason2pose_node')
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer(
(1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn('stop_factor is null')
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist)
if nearest_pose_id:
self._update_pose(
snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(
snake_case_stop_reason, pose.pose)
pose_topic_name = '{snake_case_stop_reason}_{pose_id}'.format(
**locals())
topic_ns = '/autoware_debug_tools/stop_reason2pose/'
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = '/autoware_debug_tools/stop_reason2pose/'
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1)
self._pub_pose_map[snake_case_stop_reason].publish(
nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(
lambda p: StopReason2PoseNode.calc_distance2d(
p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(
StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(
pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(
pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y,
pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument('topic_name', type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main(sys.argv[1:])
|
py | b41002990253b59a406f5b40dad8eff4d3c44615 | from setuptools import setup, find_packages
setup(
name="skallel-stats",
description="Statistical functions for genome variation data.",
packages=find_packages("src"),
package_dir={"": "src"},
setup_requires=["setuptools>18.0", "setuptools-scm>1.5.4"],
install_requires=[
"numpy", "scipy", "numba", "dask[complete]>=2.0.0", "multipledispatch"
],
use_scm_version={
"version_scheme": "guess-next-dev",
"local_scheme": "dirty-tag",
"write_to": "src/skallel_stats/version.py",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
maintainer="Alistair Miles",
maintainer_email="[email protected]",
url="https://github.com/scikit-allel/skallel-stats",
license="MIT",
include_package_data=True,
zip_safe=False,
)
|
py | b410039c8655edf38609efa174050cb7012f72be | # Generated by Django 2.0.2 on 2018-06-11 10:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_project_owner'),
]
operations = [
migrations.AlterModelOptions(
name='customuser',
options={'verbose_name_plural': 'Users'},
),
migrations.AddField(
model_name='profile',
name='uid_number',
field=models.PositiveIntegerField(null=True, verbose_name='UID Number'),
),
]
|
py | b4100535ca86230ed04fe9ae7693e68ed2e6f5bf | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# BSD License and Copyright Notice ============================================
# Copyright (c) 2014, Lojack
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the TESDumpStats nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
"""Everything included into a single source file, so it's easy to copy/paste
into a directory and run and cleanup afterwards."""
# Python Version Check --------------------------------------------------------
# Cannot do this via the shebang, since 3.3 or 3.4 are both valid (and newer
# version when they come out). So instead have to specify 3.x on the shebang
# and manually check versions here.
# Python 3.1 and older:
# - does not have argparse
# Python 3.2 and older:
# - does not have shutil.get_terminal_size
# - does not have 'flush' as a keyword for the print function
import sys
print("Running on Python {0}.{1}.{2}".format(*sys.version_info))
if sys.version_info < (3,3):
print("TESDumpStats requires Python 3.3 or higher.")
sys.exit(0)
# Imports ---------------------------------------------------------------------
import traceback
import datetime
import binascii
import argparse
import shutil
import struct
import time
import zlib
import re
import os
import io
# Regex to find valid plugin files
rePlugin = re.compile(r'\.es[mp](.ghost)?$', re.M|re.U|re.I)
# List of official plugin names
officialPlugins = [x.lower()
for y in ('Skyrim.esm', # Skyrim
'Update.esm',
'Dawnguard.esm',
'Hearthfires.esm',
'Dragonborn.esm',
'Fallout3.esm', # Fallout 3
'Anchorage.esm',
'BrokenSteel.esm',
'PointLookout.esm',
'ThePitt.esm',
'Zeta.esm',
'FalloutNV.esm', # Fallout New Vegas
'CaravanPack.esm',
'ClassicPack.esm',
'DeadMoney.esm',
'GunRunnersArsenal.esm',
'HonestHearts.esm',
'LonesomeRoad.esm',
'MercenaryPack.esm',
'OldWorldBlues.esm',
'TribalPack.esm',
'Oblivion.esm', # Oblivion
# 'DLCShiveringIsles.esp, # dummy plugin
'DLCFrostcrag.esp',
'DLCBattlehornCastle.esp',
'DLCSpellTomes.esp',
'DLCMehrunesRazor.esp',
'DLCOrrery.esp',
'DLCThievesDen.esp',
'DLCHorseArmor.esp',
'DLCVileLair.esp',
'Knights.esp',
'Fallout4.esm', # Fallout4
'dlcrobot.esm',
'dlcworkshop01.esm',
'dlccoast.esm',
'dlcworkshop02.esm',
'dlcworkshop03.esm',
'dlcnukaworld.esm',
)
for x in (y, y+'.ghost')]
# Oblivon/Rest of the Games format specific data
gameFormat = {
# Oblivion
True: {'headerSize': 20,
},
# non-Oblivion
False: {'headerSize': 24,
},
}
# Command line parser
parser = argparse.ArgumentParser(prog='TESDumpStats',
add_help=True)
parser.add_argument('-a', '--a',
dest='all',
action='store_true',
default=False,
help='Process all plugins in the Data directory.')
parser.add_argument('-p', '--plugin',
dest='plugin',
action='store',
type=str,
default='',
help='Process a single specified plugin.')
parser.add_argument('-o', '--output',
dest='output',
action='store',
type=str,
default='',
help='Specify the output directory for dumped stats.')
parser.add_argument('-s', '--split',
dest='split',
action='store_true',
default=False,
help='Create a separate dump file for each plugin.')
parser.add_argument('-O', '--Oblivion', '--oblivion',
dest='oblivion',
action='store_true',
default=False,
help='Process all plugins as Oblivion plugins.')
class FileReader(io.FileIO):
"""File-object with convenience reading functions."""
def unpack(self, fmt):
return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
def readUByte(self): return struct.unpack('B', self.read(1))[0]
def readUInt16(self): return struct.unpack('H', self.read(2))[0]
def readUInt32(self): return struct.unpack('I', self.read(4))[0]
def readUInt64(self): return struct.unpack('Q', self.read(8))[0]
def readByte(self): return struct.unpack('b', self.read(1))[0]
def readInt16(self): return struct.unpack('h', self.read(2))[0]
def readInt32(self): return struct.unpack('i', self.read(4))[0]
def readInt64(self): return struct.unpack('q', self.read(8))[0]
class Progress(object):
"""Simple console-based progress bar."""
__slots__ = ('prefix', 'end', 'cur', 'length', 'outFile', 'percent',)
def __init__(self, prefix='', maxValue=100, length=20, percent=True,
padPrefix=None, file=sys.stdout):
if padPrefix:
self.prefix = ('{:<%s}' % padPrefix).format(prefix)
else:
self.prefix = prefix
# Make sure it'll fit in the console
try:
maxWidth = shutil.get_terminal_size()[0]
except:
maxWidth = 80
# Calculate length of current message
# +9 accounts for spacing, brackets, percentage, and one empty
# space to prevent scrolling to the next line automatically
width = len(self.prefix) + length + 9
if width > maxWidth:
extra = width - maxWidth
# Too long, make things smaller
if length > 20:
remove = length - max(20, length - extra)
extra -= remove
length -= remove
if extra > 0:
# Still too much
remove = self.prefix[-extra:]
self.prefix = self.prefix[:-extra]
if remove != ' '*extra:
# Had to remove some text
self.prefix = self.prefix[:-3] + '...'
self.end = maxValue
self.length = length
self.cur = 0
self.outFile = file
self.percent = percent
def update(self):
"""Increment progress by 1."""
self(self.cur+1)
def __call__(self, pos):
"""Set progress bar to specified amount."""
self.cur = max(0, min(self.end, pos))
# Build progress bar:
percent = self.cur / self.end
filled = percent * self.length
partial = filled - int(filled)
filled = int(filled)
empty = self.length - filled - 1
bar = '#' * filled
if self.cur == 0:
bar += ' '
elif self.cur == self.end:
pass
elif partial < 0.25:
bar += ' '
elif partial < 0.50:
bar += '-'
elif partial < 0.75:
bar += '+'
else:
bar += '*'
bar += ' ' * empty
# Print
msg = '\r%s [%s]' % (self.prefix, bar)
if self.percent:
msg += '{:>4}%'.format(int(percent * 100))
print(msg, end='', file=self.outFile, flush=True)
def fill(self):
self(self.end)
print('', file=self.outFile)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.fill()
else:
print('', file=self.outFile)
def main():
"""Main function, fires everything off."""
opts = parser.parse_args()
if opts.all:
# Dump stats for every plugin
to_dump = [x.lower() for x in os.listdir() if rePlugin.search(x)]
to_dump.sort()
# Move Skryim.esm/Update.esm first
for plugin in reversed(officialPlugins):
if plugin in to_dump:
i = to_dump.index(plugin)
del to_dump[i]
to_dump.insert(0, plugin)
elif opts.plugin:
# Dump specified plugin
plugin = opts.plugin.lower()
if plugin.endswith('.ghost'):
plugin = plugin[-6:]
to_dump = [x for x in (plugin, plugin+'.ghost')
if os.path.exists(x)]
else:
# Only dump stats for offical plugins
to_dump = [x for x in officialPlugins if os.path.exists(x)]
if not to_dump:
print('Could not find any plugins to dump. Are you sure TESDumpStats'
' is in the Skyrim Data directory?')
return
# Check to see if any plugins also have their ghosted version present.
# We'll dump both if they exist, but something wonky could be up with the
# user's Data directory.
dupes = []
for plugin in to_dump:
if plugin+'.ghost' in to_dump:
dupes.append(plugin)
if dupes:
print('WARNING: The following plugins exist in your Data directory as'
' both plugins and ghosted plugins. Something may be wrong! '
'The plugins will both be processed however.')
for dupe in dupes:
print(' ', dupe)
# Setup output directory/file
timestamp = time.strftime('%Y-%m-%d_%H%M.%S')
outDir = (opts.output if opts.output else
os.path.join(os.getcwd(),'TESDumpStats'))
if opts.split:
outDir = os.path.join(outDir, timestamp)
try:
if not os.path.exists(outDir):
os.makedirs(outDir)
testFile = os.path.join(outDir,'test.txt')
with open(testFile,'wb'):
pass
os.remove(testFile)
except Exception as e:
print('ERROR: Could not setup output path specified:\n\n'
' ' + outDir + '\n\n'
' ' + str(e) + '\n\n')
return
# Start dumping
print('Beginning dump.')
print('Output directory:', outDir)
print('')
stats = dict()
padLength = max([len(x) for x in to_dump])
try:
for plugin in to_dump:
s = stats.setdefault(plugin, dict())
dumpPlugin(plugin, s, padLength, opts.oblivion)
print('Writing statistics...')
printStats(stats, outDir, opts)
print('Dump complete.')
except KeyboardInterrupt:
print('Dump canceled.')
def dumpPlugin(fileName, stats, padLength, oblivion=False):
"""Gets stats about records, etc from fileName, updates stats dict,
then prints results to outFile."""
s = dict()
# GRUP/plugin header size
headerSize = gameFormat[oblivion]['headerSize']
# Get basic stats on the file
stats['size'] = size = os.path.getsize(fileName)
stats['time'] = os.path.getmtime(fileName)
with Progress(fileName, size, padPrefix=padLength) as progress:
try:
with FileReader(fileName, 'rb') as ins:
# Calculate CRC32
crc = 0
while ins.tell() < size:
crc = binascii.crc32(ins.read(2097152), crc)
crc = crc & 0xFFFFFFFF
stats['crc'] = crc
ins.seek(0)
# No error checking, just assume everything is properly formed
s = stats['records'] = dict()
# Read TES4 record + GRUPs
while ins.tell() < size:
dumpGRUPOrRecord(ins, s, size, progress, oblivion, headerSize)
except KeyboardInterrupt:
raise
except Exception as e:
print('ERROR: Unhandled exception\n')
traceback.print_exc()
def formatSize(size):
suffix = 'B'
if size > 1024*10:
size /= 1024
suffix = 'KB'
if size > 1024*10:
size /= 1024
suffix = 'MB'
if size > 1024*10:
size /= 1024
suffix = 'GB'
return '%i %s' % (int(size), suffix)
def printRecordStats(stats, outFile):
for Type in sorted(stats):
print('', Type, file=outFile)
recStats = stats[Type]
count = recStats['count']
print(' Count:', count, file=outFile)
sizes = recStats['sizes']
minsize = min(sizes)
maxsize = max(sizes)
compressed = recStats['compressed']
if compressed == count:
print(' All compressed', file=outFile)
elif compressed > 0:
print(' Compressed: %i / %i' % (compressed, count), file=outFile)
else:
print(' None compressed', file=outFile)
if minsize == maxsize:
print(' Size:', maxsize, file=outFile)
else:
print(' Min Size:', minsize, file=outFile)
print(' Max Size:', maxsize, file=outFile)
# Subrecords
print(' Subrecords:', file=outFile)
for subtype in sorted(recStats):
if subtype in ('count','sizes','compressed'):
continue
subStats = recStats[subtype]
subCounts = subStats['counts']
if len(subCounts) == count:
# At least 1 per record
print(' ', subtype, '- Required', file=outFile)
else:
print(' ', subtype,
'- %i / %i records' % (len(subCounts), count),
file=outFile)
maxcount = max(subCounts)
mincount = min(subCounts)
if maxcount == mincount:
print(' Count:', maxcount, file=outFile)
else:
print(' Min Count:', mincount, file=outFile)
print(' Max Count:', maxcount, file=outFile)
sizes = subStats['sizes']
maxsize = max(sizes)
minsize = min(sizes)
if maxsize == minsize:
print(' Size:', maxsize, file=outFile)
else:
print(' Min Size:', minsize, file=outFile)
print(' Max Size:', maxsize, file=outFile)
print('', file=outFile)
def mergeRecordStats(dest, source):
for Type in source:
if Type not in dest:
dest[Type] = source[Type]
else:
destRecStats = dest[Type]
sourceRecStats = source[Type]
# Merge
destRecStats['count'] += sourceRecStats['count']
destRecStats['sizes'].extend(sourceRecStats['sizes'])
destRecStats['compressed'] += sourceRecStats['compressed']
# Subrecords
for subType in sourceRecStats:
if subType in ('compressed','sizes','count'):
continue
if subType not in destRecStats:
destRecStats[subType] = sourceRecStats[subType]
else:
destSubStats = destRecStats[subType]
sourceSubStats = sourceRecStats[subType]
destSubStats['counts'].extend(sourceSubStats['counts'])
destSubStats['sizes'].extend(sourceSubStats['sizes'])
def printStats(stats, outDir, opts):
outName = os.path.join(outDir, time.strftime('%Y-%m-%d_%H%M.%S_dump.txt'))
if not opts.split:
# Make sure combined output file is empty
if os.path.exists(outName):
os.remove(outName)
mode = 'a+'
else:
mode = 'w'
allstats = dict()
for plugin in stats:
if opts.split:
outName = os.path.join(outDir, plugin+'.txt')
with open(outName, mode) as outFile:
print(plugin, file=outFile)
pstats = stats[plugin]
print(' File Size:', formatSize(pstats['size']), file=outFile)
print(' File Date:',
datetime.datetime.fromtimestamp(pstats['time']),
file=outFile)
print(' File CRC: 0x%X' % pstats['crc'], file=outFile)
recStats = pstats['records']
printRecordStats(recStats, outFile)
mergeRecordStats(allstats, recStats)
if len(stats) > 1:
if opts.split:
outName = os.path.join(outDir, 'combined_stats.txt')
with open(outName, mode) as outFile:
print('Combined Stats:', file=outFile)
printRecordStats(allstats, outFile)
def dumpGRUPOrRecord(ins, stats, end, progress, oblivion, headerSize):
pos = ins.tell()
progress(pos)
if pos+headerSize > end:
ins.seek(end)
return
grup = ins.read(4)
if grup == b'GRUP':
# It's a GRUP
size = ins.readUInt32() - headerSize
label = ins.read(4)
Type = ins.readInt32()
stamp = ins.readUInt16()
unk1 = ins.readUInt16()
if not oblivion:
version = ins.readUInt16()
unk2 = ins.readUInt16()
pos = ins.tell()
if pos+size > end:
ins.seek(end)
return
# Data
while ins.tell() < pos+size:
dumpGRUPOrRecord(ins, stats, pos+size, progress, oblivion, headerSize)
else:
Type = grup.decode('ascii')
dataSize = ins.readUInt32()
flags = ins.readUInt32()
id = ins.readUInt32()
revision = ins.readUInt32()
if not oblivion:
version = ins.readUInt16()
unk = ins.readUInt16()
if not flags & 0x20: # Not deleted
# Data
s = stats.setdefault(Type, dict())
num = s.get('count', 0)
s['count'] = num + 1
num = s.get('compressed', 0)
data = ins.read(dataSize)
if flags & 0x00040000:
# Data is compressed
uncompSize = struct.unpack('I', data[:4])
data = zlib.decompress(data[4:])
num += 1
s['compressed'] = num
s.setdefault('sizes',[]).append(len(data))
dumpSubRecords(data, s)
# Ensure we're at the end of the record
ins.seek(pos+dataSize+headerSize)
def dumpSubRecords(data, stats):
size = len(data)
pos = 0
counts = dict()
while pos < size - 6:
subType = data[pos:pos+4].decode('ascii')
pos += 4
if subType == 'XXXX':
subSize = struct.unpack('I', data[pos:pos+4])[0]
pos += 4
subType = data[pos:pos+4].decode('ascii')
pos += 4
pos += 2 # datasize
else:
subSize = struct.unpack('H', data[pos:pos+2])[0]
pos += 2
if pos+subSize > size:
break
pos += subSize
s = stats.setdefault(subType, dict())
num = counts.get(subType,0)
counts[subType] = num + 1
s.setdefault('sizes',[]).append(subSize)
for subType in counts:
stats[subType].setdefault('counts',[]).append(counts[subType])
if __name__=='__main__':
main()
|
py | b41005763991653f447dbb86ca9ba81da737a92f |
"""Low-level functions for building Grafana dashboards.
The functions in this module don't enforce Weaveworks policy, and only mildly
encourage it by way of some defaults. Rather, they are ways of building
arbitrary Grafana JSON.
"""
import itertools
import math
import string
import warnings
from numbers import Number
import attr
from attr.validators import in_, instance_of
@attr.s
class RGBA(object):
r = attr.ib(validator=instance_of(int))
g = attr.ib(validator=instance_of(int))
b = attr.ib(validator=instance_of(int))
a = attr.ib(validator=instance_of(float))
def to_json_data(self):
return "rgba({}, {}, {}, {})".format(self.r, self.g, self.b, self.a)
@attr.s
class RGB(object):
r = attr.ib(validator=instance_of(int))
g = attr.ib(validator=instance_of(int))
b = attr.ib(validator=instance_of(int))
def to_json_data(self):
return "rgb({}, {}, {})".format(self.r, self.g, self.b)
@attr.s
class Pixels(object):
num = attr.ib(validator=instance_of(int))
def to_json_data(self):
return "{}px".format(self.num)
@attr.s
class Percent(object):
num = attr.ib(default=100, validator=instance_of(Number))
def to_json_data(self):
return "{}%".format(self.num)
GREY1 = RGBA(216, 200, 27, 0.27)
GREY2 = RGBA(234, 112, 112, 0.22)
BLUE_RGBA = RGBA(31, 118, 189, 0.18)
BLUE_RGB = RGB(31, 120, 193)
GREEN = RGBA(50, 172, 45, 0.97)
ORANGE = RGBA(237, 129, 40, 0.89)
RED = RGBA(245, 54, 54, 0.9)
BLANK = RGBA(0, 0, 0, 0.0)
WHITE = RGB(255, 255, 255)
INDIVIDUAL = 'individual'
CUMULATIVE = 'cumulative'
NULL_CONNECTED = 'connected'
NULL_AS_ZERO = 'null as zero'
NULL_AS_NULL = 'null'
FLOT = 'flot'
ABSOLUTE_TYPE = 'absolute'
DASHBOARD_TYPE = 'dashboard'
ROW_TYPE = 'row'
GRAPH_TYPE = 'graph'
DISCRETE_TYPE = 'natel-discrete-panel'
STAT_TYPE = 'stat'
SINGLESTAT_TYPE = 'singlestat'
STATE_TIMELINE_TYPE = 'state-timeline'
TABLE_TYPE = 'table'
TEXT_TYPE = 'text'
ALERTLIST_TYPE = 'alertlist'
BARGAUGE_TYPE = 'bargauge'
GAUGE_TYPE = 'gauge'
DASHBOARDLIST_TYPE = 'dashlist'
LOGS_TYPE = 'logs'
HEATMAP_TYPE = 'heatmap'
STATUSMAP_TYPE = 'flant-statusmap-panel'
SVG_TYPE = 'marcuscalidus-svg-panel'
PIE_CHART_TYPE = 'grafana-piechart-panel'
PIE_CHART_V2_TYPE = 'piechart'
TIMESERIES_TYPE = 'timeseries'
WORLD_MAP_TYPE = 'grafana-worldmap-panel'
NEWS_TYPE = 'news'
DEFAULT_FILL = 1
DEFAULT_REFRESH = '10s'
DEFAULT_ROW_HEIGHT = Pixels(250)
DEFAULT_LINE_WIDTH = 2
DEFAULT_POINT_RADIUS = 5
DEFAULT_RENDERER = FLOT
DEFAULT_STEP = 10
DEFAULT_LIMIT = 10
TOTAL_SPAN = 12
DARK_STYLE = 'dark'
LIGHT_STYLE = 'light'
UTC = 'utc'
SCHEMA_VERSION = 12
# (DEPRECATED: use formatunits.py) Y Axis formats
DURATION_FORMAT = 'dtdurations'
NO_FORMAT = 'none'
OPS_FORMAT = 'ops'
PERCENT_UNIT_FORMAT = 'percentunit'
DAYS_FORMAT = 'd'
HOURS_FORMAT = 'h'
MINUTES_FORMAT = 'm'
SECONDS_FORMAT = 's'
MILLISECONDS_FORMAT = 'ms'
SHORT_FORMAT = 'short'
BYTES_FORMAT = 'bytes'
BITS_PER_SEC_FORMAT = 'bps'
BYTES_PER_SEC_FORMAT = 'Bps'
NONE_FORMAT = 'none'
JOULE_FORMAT = 'joule'
WATTHOUR_FORMAT = 'watth'
WATT_FORMAT = 'watt'
KWATT_FORMAT = 'kwatt'
KWATTHOUR_FORMAT = 'kwatth'
VOLT_FORMAT = 'volt'
BAR_FORMAT = 'pressurebar'
PSI_FORMAT = 'pressurepsi'
CELSIUS_FORMAT = 'celsius'
KELVIN_FORMAT = 'kelvin'
GRAM_FORMAT = 'massg'
EUR_FORMAT = 'currencyEUR'
USD_FORMAT = 'currencyUSD'
METER_FORMAT = 'lengthm'
SQUARE_METER_FORMAT = 'areaM2'
CUBIC_METER_FORMAT = 'm3'
LITRE_FORMAT = 'litre'
PERCENT_FORMAT = 'percent'
VOLT_AMPERE_FORMAT = 'voltamp'
# Alert rule state
STATE_NO_DATA = 'no_data'
STATE_ALERTING = 'alerting'
STATE_KEEP_LAST_STATE = 'keep_state'
STATE_OK = 'ok'
# Evaluator
EVAL_GT = 'gt'
EVAL_LT = 'lt'
EVAL_WITHIN_RANGE = 'within_range'
EVAL_OUTSIDE_RANGE = 'outside_range'
EVAL_NO_VALUE = 'no_value'
# Reducer Type
# avg/min/max/sum/count/last/median/diff/percent_diff/count_non_null
RTYPE_AVG = 'avg'
RTYPE_MIN = 'min'
RTYPE_MAX = 'max'
RTYPE_SUM = 'sum'
RTYPE_COUNT = 'count'
RTYPE_LAST = 'last'
RTYPE_MEDIAN = 'median'
RTYPE_DIFF = 'diff'
RTYPE_PERCENT_DIFF = 'percent_diff'
RTYPE_COUNT_NON_NULL = 'count_non_null'
# Condition Type
CTYPE_QUERY = 'query'
# Operator
OP_AND = 'and'
OP_OR = 'or'
# Text panel modes
TEXT_MODE_MARKDOWN = 'markdown'
TEXT_MODE_HTML = 'html'
TEXT_MODE_TEXT = 'text'
# Datasource plugins
PLUGIN_ID_GRAPHITE = 'graphite'
PLUGIN_ID_PROMETHEUS = 'prometheus'
PLUGIN_ID_INFLUXDB = 'influxdb'
PLUGIN_ID_OPENTSDB = 'opentsdb'
PLUGIN_ID_ELASTICSEARCH = 'elasticsearch'
PLUGIN_ID_CLOUDWATCH = 'cloudwatch'
# Target formats
TIME_SERIES_TARGET_FORMAT = 'time_series'
TABLE_TARGET_FORMAT = 'table'
# Table Transforms
AGGREGATIONS_TRANSFORM = 'timeseries_aggregations'
ANNOTATIONS_TRANSFORM = 'annotations'
COLUMNS_TRANSFORM = 'timeseries_to_columns'
JSON_TRANSFORM = 'json'
ROWS_TRANSFORM = 'timeseries_to_rows'
TABLE_TRANSFORM = 'table'
# AlertList show selections
ALERTLIST_SHOW_CURRENT = 'current'
ALERTLIST_SHOW_CHANGES = 'changes'
# AlertList state filter options
ALERTLIST_STATE_OK = 'ok'
ALERTLIST_STATE_PAUSED = 'paused'
ALERTLIST_STATE_NO_DATA = 'no_data'
ALERTLIST_STATE_EXECUTION_ERROR = 'execution_error'
ALERTLIST_STATE_ALERTING = 'alerting'
ALERTLIST_STATE_PENDING = 'pending'
# Display Sort Order
SORT_ASC = 1
SORT_DESC = 2
SORT_IMPORTANCE = 3
# Template
REFRESH_NEVER = 0
REFRESH_ON_DASHBOARD_LOAD = 1
REFRESH_ON_TIME_RANGE_CHANGE = 2
SHOW = 0
HIDE_LABEL = 1
HIDE_VARIABLE = 2
SORT_DISABLED = 0
SORT_ALPHA_ASC = 1
SORT_ALPHA_DESC = 2
SORT_NUMERIC_ASC = 3
SORT_NUMERIC_DESC = 4
SORT_ALPHA_IGNORE_CASE_ASC = 5
SORT_ALPHA_IGNORE_CASE_DESC = 6
GAUGE_CALC_LAST = 'last'
GAUGE_CALC_FIRST = 'first'
GAUGE_CALC_MIN = 'min'
GAUGE_CALC_MAX = 'max'
GAUGE_CALC_MEAN = 'mean'
GAUGE_CALC_TOTAL = 'total'
GAUGE_CALC_COUNT = 'count'
GAUGE_CALC_RANGE = 'range'
GAUGE_CALC_DELTA = 'delta'
GAUGE_CALC_STEP = 'step'
GAUGE_CALC_DIFFERENCE = 'difference'
GAUGE_CALC_LOGMIN = 'logmin'
GAUGE_CALC_CHANGE_COUNT = 'changeCount'
GAUGE_CALC_DISTINCT_COUNT = 'distinctCount'
ORIENTATION_HORIZONTAL = 'horizontal'
ORIENTATION_VERTICAL = 'vertical'
GAUGE_DISPLAY_MODE_BASIC = 'basic'
GAUGE_DISPLAY_MODE_LCD = 'lcd'
GAUGE_DISPLAY_MODE_GRADIENT = 'gradient'
DEFAULT_AUTO_COUNT = 30
DEFAULT_MIN_AUTO_INTERVAL = '10s'
@attr.s
class Mapping(object):
name = attr.ib()
value = attr.ib(validator=instance_of(int))
def to_json_data(self):
return {
'name': self.name,
'value': self.value,
}
MAPPING_TYPE_VALUE_TO_TEXT = 1
MAPPING_TYPE_RANGE_TO_TEXT = 2
MAPPING_VALUE_TO_TEXT = Mapping('value to text', MAPPING_TYPE_VALUE_TO_TEXT)
MAPPING_RANGE_TO_TEXT = Mapping('range to text', MAPPING_TYPE_RANGE_TO_TEXT)
# Value types min/max/avg/current/total/name/first/delta/range
VTYPE_MIN = 'min'
VTYPE_MAX = 'max'
VTYPE_AVG = 'avg'
VTYPE_CURR = 'current'
VTYPE_TOTAL = 'total'
VTYPE_NAME = 'name'
VTYPE_FIRST = 'first'
VTYPE_DELTA = 'delta'
VTYPE_RANGE = 'range'
VTYPE_DEFAULT = VTYPE_AVG
@attr.s
class Grid(object):
threshold1 = attr.ib(default=None)
threshold1Color = attr.ib(
default=attr.Factory(lambda: GREY1),
validator=instance_of(RGBA),
)
threshold2 = attr.ib(default=None)
threshold2Color = attr.ib(
default=attr.Factory(lambda: GREY2),
validator=instance_of(RGBA),
)
def to_json_data(self):
return {
'threshold1': self.threshold1,
'threshold1Color': self.threshold1Color,
'threshold2': self.threshold2,
'threshold2Color': self.threshold2Color,
}
@attr.s
class Legend(object):
avg = attr.ib(default=False, validator=instance_of(bool))
current = attr.ib(default=False, validator=instance_of(bool))
max = attr.ib(default=False, validator=instance_of(bool))
min = attr.ib(default=False, validator=instance_of(bool))
show = attr.ib(default=True, validator=instance_of(bool))
total = attr.ib(default=False, validator=instance_of(bool))
values = attr.ib(default=None)
alignAsTable = attr.ib(default=False, validator=instance_of(bool))
hideEmpty = attr.ib(default=False, validator=instance_of(bool))
hideZero = attr.ib(default=False, validator=instance_of(bool))
rightSide = attr.ib(default=False, validator=instance_of(bool))
sideWidth = attr.ib(default=None)
sort = attr.ib(default=None)
sortDesc = attr.ib(default=False)
def to_json_data(self):
values = ((self.avg or self.current or self.max or self.min)
if self.values is None else self.values)
return {
'avg': self.avg,
'current': self.current,
'max': self.max,
'min': self.min,
'show': self.show,
'total': self.total,
'values': values,
'alignAsTable': self.alignAsTable,
'hideEmpty': self.hideEmpty,
'hideZero': self.hideZero,
'rightSide': self.rightSide,
'sideWidth': self.sideWidth,
'sort': self.sort,
'sortDesc': self.sortDesc,
}
def is_valid_max_per_row(instance, attribute, value):
if ((value is not None) and not isinstance(value, int)):
raise ValueError("{attr} should either be None or an integer".format(
attr=attribute))
@attr.s
class Repeat(object):
"""
Panel repetition settings.
:param direction: The direction into which to repeat ('h' or 'v')
:param variable: The name of the variable over whose values to repeat
:param maxPerRow: The maximum number of panels per row in horizontal repetition
"""
direction = attr.ib(default=None)
variable = attr.ib(default=None)
maxPerRow = attr.ib(default=None, validator=is_valid_max_per_row)
def is_valid_target(instance, attribute, value):
"""
Check if a given attribute is a valid target
"""
if not hasattr(value, "refId"):
raise ValueError(f"{attribute.name} should have 'refId' attribute")
@attr.s
class Target(object):
"""
Metric to show.
:param target: Graphite way to select data
"""
expr = attr.ib(default="")
format = attr.ib(default=TIME_SERIES_TARGET_FORMAT)
hide = attr.ib(default=False, validator=instance_of(bool))
legendFormat = attr.ib(default="")
interval = attr.ib(default="", validator=instance_of(str))
intervalFactor = attr.ib(default=2)
metric = attr.ib(default="")
refId = attr.ib(default="")
step = attr.ib(default=DEFAULT_STEP)
target = attr.ib(default="")
instant = attr.ib(validator=instance_of(bool), default=False)
datasource = attr.ib(default=None)
def to_json_data(self):
return {
'expr': self.expr,
'target': self.target,
'format': self.format,
'hide': self.hide,
'interval': self.interval,
'intervalFactor': self.intervalFactor,
'legendFormat': self.legendFormat,
'metric': self.metric,
'refId': self.refId,
'step': self.step,
'instant': self.instant,
'datasource': self.datasource,
}
@attr.s
class Tooltip(object):
msResolution = attr.ib(default=True, validator=instance_of(bool))
shared = attr.ib(default=True, validator=instance_of(bool))
sort = attr.ib(default=0)
valueType = attr.ib(default=CUMULATIVE)
def to_json_data(self):
return {
'msResolution': self.msResolution,
'shared': self.shared,
'sort': self.sort,
'value_type': self.valueType,
}
def is_valid_xaxis_mode(instance, attribute, value):
XAXIS_MODES = ('time', 'series')
if value not in XAXIS_MODES:
raise ValueError("{attr} should be one of {choice}".format(
attr=attribute, choice=XAXIS_MODES))
@attr.s
class XAxis(object):
"""
X Axis
:param mode: Mode of axis can be time, series or histogram
:param name: X axis name
:param value: list of values eg. ["current"] or ["avg"]
:param show: show X axis
"""
mode = attr.ib(default='time', validator=is_valid_xaxis_mode)
name = attr.ib(default=None)
values = attr.ib(default=attr.Factory(list))
show = attr.ib(validator=instance_of(bool), default=True)
def to_json_data(self):
return {
'mode': self.mode,
'name': self.name,
'values': self.values,
'show': self.show,
}
@attr.s
class YAxis(object):
"""A single Y axis.
Grafana graphs have two Y axes: one on the left and one on the right.
:param decimals: Defines how many decimals are displayed for Y value. (default auto)
:param format: The display unit for the Y value
:param label: The Y axis label. (default “")
:param logBase: The scale to use for the Y value, linear, or logarithmic. (default linear)
:param max: The maximum Y value
:param min: The minimum Y value
:param show: Show or hide the axis
"""
decimals = attr.ib(default=None)
format = attr.ib(default=None)
label = attr.ib(default=None)
logBase = attr.ib(default=1)
max = attr.ib(default=None)
min = attr.ib(default=None)
show = attr.ib(default=True, validator=instance_of(bool))
def to_json_data(self):
return {
'decimals': self.decimals,
'format': self.format,
'label': self.label,
'logBase': self.logBase,
'max': self.max,
'min': self.min,
'show': self.show,
}
@attr.s
class YAxes(object):
"""The pair of Y axes on a Grafana graph.
Each graph has two Y Axes, a left one and a right one.
"""
left = attr.ib(default=attr.Factory(lambda: YAxis(format=SHORT_FORMAT)),
validator=instance_of(YAxis))
right = attr.ib(default=attr.Factory(lambda: YAxis(format=SHORT_FORMAT)),
validator=instance_of(YAxis))
def to_json_data(self):
return [
self.left,
self.right,
]
def single_y_axis(**kwargs):
"""Specify that a graph has a single Y axis.
Parameters are those passed to `YAxis`. Returns a `YAxes` object (i.e. a
pair of axes) that can be used as the yAxes parameter of a graph.
"""
axis = YAxis(**kwargs)
return YAxes(left=axis)
def to_y_axes(data):
"""Backwards compatibility for 'YAxes'.
In grafanalib 0.1.2 and earlier, Y axes were specified as a list of two
elements. Now, we have a dedicated `YAxes` type.
This function converts a list of two `YAxis` values to a `YAxes` value,
silently passes through `YAxes` values, warns about doing things the old
way, and errors when there are invalid values.
"""
if isinstance(data, YAxes):
return data
if not isinstance(data, (list, tuple)):
raise ValueError(
"Y axes must be either YAxes or a list of two values, got %r"
% data)
if len(data) != 2:
raise ValueError(
"Must specify exactly two YAxes, got %d: %r"
% (len(data), data))
warnings.warn(
"Specify Y axes using YAxes or single_y_axis, rather than a "
"list/tuple",
DeprecationWarning, stacklevel=3)
return YAxes(left=data[0], right=data[1])
def _balance_panels(panels):
"""Resize panels so they are evenly spaced."""
allotted_spans = sum(panel.span if panel.span else 0 for panel in panels)
no_span_set = [panel for panel in panels if panel.span is None]
auto_span = math.ceil(
(TOTAL_SPAN - allotted_spans) / (len(no_span_set) or 1))
return [
attr.evolve(panel, span=auto_span) if panel.span is None else panel
for panel in panels
]
@attr.s
class GridPos(object):
"""GridPos describes the panel size and position in grid coordinates.
:param h: height of the panel, grid height units each represents
30 pixels
:param w: width of the panel 1-24 (the width of the dashboard
is divided into 24 columns)
:param x: x cordinate of the panel, in same unit as w
:param y: y cordinate of the panel, in same unit as h
"""
h = attr.ib()
w = attr.ib()
x = attr.ib()
y = attr.ib()
def to_json_data(self):
return {
'h': self.h,
'w': self.w,
'x': self.x,
'y': self.y
}
@attr.s
class Annotations(object):
list = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return {
'list': self.list,
}
@attr.s
class DataLink(object):
title = attr.ib()
linkUrl = attr.ib(default="", validator=instance_of(str))
isNewTab = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return {
'title': self.title,
'url': self.linkUrl,
'targetBlank': self.isNewTab,
}
@attr.s
class DataSourceInput(object):
name = attr.ib()
label = attr.ib()
pluginId = attr.ib()
pluginName = attr.ib()
description = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
return {
'description': self.description,
'label': self.label,
'name': self.name,
'pluginId': self.pluginId,
'pluginName': self.pluginName,
'type': 'datasource',
}
@attr.s
class ConstantInput(object):
name = attr.ib()
label = attr.ib()
value = attr.ib()
description = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
return {
'description': self.description,
'label': self.label,
'name': self.name,
'type': 'constant',
'value': self.value,
}
@attr.s
class DashboardLink(object):
dashboard = attr.ib()
uri = attr.ib()
keepTime = attr.ib(
default=True,
validator=instance_of(bool),
)
title = attr.ib(default=None)
type = attr.ib(default=DASHBOARD_TYPE)
def to_json_data(self):
title = self.dashboard if self.title is None else self.title
return {
'dashUri': self.uri,
'dashboard': self.dashboard,
'keepTime': self.keepTime,
'title': title,
'type': self.type,
'url': self.uri,
}
@attr.s
class ExternalLink(object):
"""ExternalLink creates a top-level link attached to a dashboard.
:param url: the URL to link to
:param title: the text of the link
:param keepTime: if true, the URL params for the dashboard's
current time period are appended
"""
uri = attr.ib()
title = attr.ib()
keepTime = attr.ib(
default=False,
validator=instance_of(bool),
)
def to_json_data(self):
return {
'keepTime': self.keepTime,
'title': self.title,
'type': 'link',
'url': self.uri,
}
@attr.s
class Template(object):
"""Template create a new 'variable' for the dashboard, defines the variable
name, human name, query to fetch the values and the default value.
:param default: the default value for the variable
:param dataSource: where to fetch the values for the variable from
:param label: the variable's human label
:param name: the variable's name
:param query: the query users to fetch the valid values of the variable
:param refresh: Controls when to update values in the dropdown
:param allValue: specify a custom all value with regex,
globs or lucene syntax.
:param includeAll: Add a special All option whose value includes
all options.
:param regex: Regex to filter or capture specific parts of the names
return by your data source query.
:param multi: If enabled, the variable will support the selection of
multiple options at the same time.
:param type: The template type, can be one of: query (default),
interval, datasource, custom, constant, adhoc.
:param hide: Hide this variable in the dashboard, can be one of:
SHOW (default), HIDE_LABEL, HIDE_VARIABLE
:param auto: Interval will be dynamically calculated by dividing time range by the count specified in auto_count.
:param autoCount: Number of intervals for dividing the time range.
:param autoMin: Smallest interval for auto interval generator.
"""
name = attr.ib()
query = attr.ib()
_current = attr.ib(init=False, default=attr.Factory(dict))
default = attr.ib(default=None)
dataSource = attr.ib(default=None)
label = attr.ib(default=None)
allValue = attr.ib(default=None)
includeAll = attr.ib(
default=False,
validator=instance_of(bool),
)
multi = attr.ib(
default=False,
validator=instance_of(bool),
)
options = attr.ib(default=attr.Factory(list))
regex = attr.ib(default=None)
useTags = attr.ib(
default=False,
validator=instance_of(bool),
)
tagsQuery = attr.ib(default=None)
tagValuesQuery = attr.ib(default=None)
refresh = attr.ib(default=REFRESH_ON_DASHBOARD_LOAD,
validator=instance_of(int))
type = attr.ib(default='query')
hide = attr.ib(default=SHOW)
sort = attr.ib(default=SORT_ALPHA_ASC)
auto = attr.ib(
default=False,
validator=instance_of(bool),
)
autoCount = attr.ib(
default=DEFAULT_AUTO_COUNT,
validator=instance_of(int)
)
autoMin = attr.ib(default=DEFAULT_MIN_AUTO_INTERVAL)
def __attrs_post_init__(self):
if self.type == 'custom':
if len(self.options) == 0:
for value in self.query.split(','):
is_default = value == self.default
option = {
'selected': is_default,
'text': value,
'value': value,
}
if is_default:
self._current = option
self.options.append(option)
else:
for option in self.options:
if option['selected']:
self._current = option
break
else:
self._current = {
'selected': False if self.default is None or not self.default else True,
'text': self.default,
'value': self.default,
'tags': [],
}
def to_json_data(self):
return {
'allValue': self.allValue,
'current': self._current,
'datasource': self.dataSource,
'hide': self.hide,
'includeAll': self.includeAll,
'label': self.label,
'multi': self.multi,
'name': self.name,
'options': self.options,
'query': self.query,
'refresh': self.refresh,
'regex': self.regex,
'sort': self.sort,
'type': self.type,
'useTags': self.useTags,
'tagsQuery': self.tagsQuery,
'tagValuesQuery': self.tagValuesQuery,
'auto': self.auto,
'auto_min': self.autoMin,
'auto_count': self.autoCount
}
@attr.s
class Templating(object):
list = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return {
'list': self.list,
}
@attr.s
class Time(object):
start = attr.ib()
end = attr.ib()
def to_json_data(self):
return {
'from': self.start,
'to': self.end,
}
DEFAULT_TIME = Time('now-1h', 'now')
@attr.s
class TimePicker(object):
"""
Time Picker
:param refreshIntervals: dashboard auto-refresh interval options
:param timeOptions: dashboard time range options
:param hidden: hide the time picker from dashboard
"""
refreshIntervals = attr.ib()
timeOptions = attr.ib()
hidden = attr.ib(
default=False,
validator=instance_of(bool),
)
def to_json_data(self):
return {
'refresh_intervals': self.refreshIntervals,
'time_options': self.timeOptions,
'hidden': self.hidden
}
DEFAULT_TIME_PICKER = TimePicker(
refreshIntervals=[
'5s',
'10s',
'30s',
'1m',
'5m',
'15m',
'30m',
'1h',
'2h',
'1d'
],
timeOptions=[
'5m',
'15m',
'1h',
'6h',
'12h',
'24h',
'2d',
'7d',
'30d'
]
)
@attr.s
class Evaluator(object):
type = attr.ib()
params = attr.ib()
def to_json_data(self):
return {
'type': self.type,
'params': self.params,
}
def GreaterThan(value):
return Evaluator(EVAL_GT, [value])
def LowerThan(value):
return Evaluator(EVAL_LT, [value])
def WithinRange(from_value, to_value):
return Evaluator(EVAL_WITHIN_RANGE, [from_value, to_value])
def OutsideRange(from_value, to_value):
return Evaluator(EVAL_OUTSIDE_RANGE, [from_value, to_value])
def NoValue():
return Evaluator(EVAL_NO_VALUE, [])
@attr.s
class TimeRange(object):
"""A time range for an alert condition.
A condition has to hold for this length of time before triggering.
:param str from_time: Either a number + unit (s: second, m: minute,
h: hour, etc) e.g. ``"5m"`` for 5 minutes, or ``"now"``.
:param str to_time: Either a number + unit (s: second, m: minute,
h: hour, etc) e.g. ``"5m"`` for 5 minutes, or ``"now"``.
"""
from_time = attr.ib()
to_time = attr.ib()
def to_json_data(self):
return [self.from_time, self.to_time]
@attr.s
class AlertCondition(object):
"""
A condition on an alert.
:param Target target: Metric the alert condition is based on.
:param Evaluator evaluator: How we decide whether we should alert on the
metric. e.g. ``GreaterThan(5)`` means the metric must be greater than 5
to trigger the condition. See ``GreaterThan``, ``LowerThan``,
``WithinRange``, ``OutsideRange``, ``NoValue``.
:param TimeRange timeRange: How long the condition must be true for before
we alert.
:param operator: One of ``OP_AND`` or ``OP_OR``. How this condition
combines with other conditions.
:param reducerType: RTYPE_*
:param type: CTYPE_*
"""
target = attr.ib(validator=is_valid_target)
evaluator = attr.ib(validator=instance_of(Evaluator))
timeRange = attr.ib(validator=instance_of(TimeRange))
operator = attr.ib()
reducerType = attr.ib()
type = attr.ib(default=CTYPE_QUERY, kw_only=True)
def to_json_data(self):
queryParams = [
self.target.refId, self.timeRange.from_time, self.timeRange.to_time
]
return {
'evaluator': self.evaluator,
'operator': {
'type': self.operator,
},
'query': {
'model': self.target,
'params': queryParams,
},
'reducer': {
'params': [],
'type': self.reducerType,
},
'type': self.type,
}
@attr.s
class Alert(object):
"""
:param alertRuleTags: Key Value pairs to be sent with Alert notifications.
"""
name = attr.ib()
message = attr.ib()
alertConditions = attr.ib()
executionErrorState = attr.ib(default=STATE_ALERTING)
frequency = attr.ib(default='60s')
handler = attr.ib(default=1)
noDataState = attr.ib(default=STATE_NO_DATA)
notifications = attr.ib(default=attr.Factory(list))
gracePeriod = attr.ib(default='5m')
alertRuleTags = attr.ib(
default=attr.Factory(dict),
validator=attr.validators.deep_mapping(
key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(str),
mapping_validator=attr.validators.instance_of(dict),
)
)
def to_json_data(self):
return {
'conditions': self.alertConditions,
'executionErrorState': self.executionErrorState,
'frequency': self.frequency,
'handler': self.handler,
'message': self.message,
'name': self.name,
'noDataState': self.noDataState,
'notifications': self.notifications,
'for': self.gracePeriod,
'alertRuleTags': self.alertRuleTags,
}
@attr.s
class Notification(object):
uid = attr.ib()
def to_json_data(self):
return {
'uid': self.uid,
}
@attr.s
class Dashboard(object):
title = attr.ib()
annotations = attr.ib(
default=attr.Factory(Annotations),
validator=instance_of(Annotations),
)
description = attr.ib(default="", validator=instance_of(str))
editable = attr.ib(
default=True,
validator=instance_of(bool),
)
gnetId = attr.ib(default=None)
hideControls = attr.ib(
default=False,
validator=instance_of(bool),
)
id = attr.ib(default=None)
inputs = attr.ib(default=attr.Factory(list))
links = attr.ib(default=attr.Factory(list))
panels = attr.ib(default=attr.Factory(list), validator=instance_of(list))
refresh = attr.ib(default=DEFAULT_REFRESH)
rows = attr.ib(default=attr.Factory(list), validator=instance_of(list))
schemaVersion = attr.ib(default=SCHEMA_VERSION)
sharedCrosshair = attr.ib(
default=False,
validator=instance_of(bool),
)
style = attr.ib(default=DARK_STYLE)
tags = attr.ib(default=attr.Factory(list))
templating = attr.ib(
default=attr.Factory(Templating),
validator=instance_of(Templating),
)
time = attr.ib(
default=attr.Factory(lambda: DEFAULT_TIME),
validator=instance_of(Time),
)
timePicker = attr.ib(
default=attr.Factory(lambda: DEFAULT_TIME_PICKER),
validator=instance_of(TimePicker),
)
timezone = attr.ib(default=UTC)
version = attr.ib(default=0)
uid = attr.ib(default=None)
def _iter_panels(self):
for row in self.rows:
for panel in row._iter_panels():
yield panel
for panel in self.panels:
if hasattr(panel, 'panels'):
yield panel
for row_panel in panel._iter_panels():
yield panel
else:
yield panel
def _map_panels(self, f):
return attr.evolve(
self,
rows=[r._map_panels(f) for r in self.rows],
panels=[p._map_panels(f) for p in self.panels]
)
def auto_panel_ids(self):
"""Give unique IDs all the panels without IDs.
Returns a new ``Dashboard`` that is the same as this one, except all
of the panels have their ``id`` property set. Any panels which had an
``id`` property set will keep that property, all others will have
auto-generated IDs provided for them.
"""
ids = set([panel.id for panel in self._iter_panels() if panel.id])
auto_ids = (i for i in itertools.count(1) if i not in ids)
def set_id(panel):
return panel if panel.id else attr.evolve(panel, id=next(auto_ids))
return self._map_panels(set_id)
def to_json_data(self):
if self.panels and self.rows:
print(
"Warning: You are using both panels and rows in this dashboard, please use one or the other. "
"Panels should be used in preference over rows, see example dashboard for help."
)
return {
'__inputs': self.inputs,
'annotations': self.annotations,
'description': self.description,
'editable': self.editable,
'gnetId': self.gnetId,
'hideControls': self.hideControls,
'id': self.id,
'links': self.links,
'panels': self.panels if not self.rows else [],
'refresh': self.refresh,
'rows': self.rows,
'schemaVersion': self.schemaVersion,
'sharedCrosshair': self.sharedCrosshair,
'style': self.style,
'tags': self.tags,
'templating': self.templating,
'title': self.title,
'time': self.time,
'timepicker': self.timePicker,
'timezone': self.timezone,
'version': self.version,
'uid': self.uid,
}
def _deep_update(base_dict, extra_dict):
if extra_dict is None:
return base_dict
for k, v in extra_dict.items():
if k in base_dict and hasattr(base_dict[k], "to_json_data"):
base_dict[k] = base_dict[k].to_json_data()
if k in base_dict and isinstance(base_dict[k], dict):
_deep_update(base_dict[k], v)
else:
base_dict[k] = v
@attr.s
class Panel(object):
"""
Generic panel for shared defaults
:param cacheTimeout: metric query result cache ttl
:param dataSource: Grafana datasource name
:param description: optional panel description
:param editable: defines if panel is editable via web interfaces
:param height: defines panel height
:param hideTimeOverride: hides time overrides
:param id: panel id
:param interval: defines time interval between metric queries
:param links: additional web links
:param maxDataPoints: maximum metric query results,
that will be used for rendering
:param minSpan: minimum span number
:param repeat: Template's name to repeat Graph on
:param span: defines the number of spans that will be used for panel
:param targets: list of metric requests for chosen datasource
:param timeFrom: time range that Override relative time
:param title: of the panel
:param transparent: defines if panel should be transparent
:param transformations: defines transformations applied to the table
:param extraJson: raw JSON additions or overrides added to the JSON output
of this panel, can be used for using unsupported features
"""
dataSource = attr.ib(default=None)
targets = attr.ib(default=attr.Factory(list), validator=instance_of(list))
title = attr.ib(default="")
cacheTimeout = attr.ib(default=None)
description = attr.ib(default=None)
editable = attr.ib(default=True, validator=instance_of(bool))
error = attr.ib(default=False, validator=instance_of(bool))
height = attr.ib(default=None)
gridPos = attr.ib(default=None)
hideTimeOverride = attr.ib(default=False, validator=instance_of(bool))
id = attr.ib(default=None)
interval = attr.ib(default=None)
links = attr.ib(default=attr.Factory(list))
maxDataPoints = attr.ib(default=100)
minSpan = attr.ib(default=None)
repeat = attr.ib(default=attr.Factory(Repeat), validator=instance_of(Repeat))
span = attr.ib(default=None)
timeFrom = attr.ib(default=None)
timeShift = attr.ib(default=None)
transparent = attr.ib(default=False, validator=instance_of(bool))
transformations = attr.ib(default=attr.Factory(list), validator=instance_of(list))
extraJson = attr.ib(default=None, validator=attr.validators.optional(instance_of(dict)))
def _map_panels(self, f):
return f(self)
def panel_json(self, overrides):
res = {
'cacheTimeout': self.cacheTimeout,
'datasource': self.dataSource,
'description': self.description,
'editable': self.editable,
'error': self.error,
'height': self.height,
'gridPos': self.gridPos,
'hideTimeOverride': self.hideTimeOverride,
'id': self.id,
'interval': self.interval,
'links': self.links,
'maxDataPoints': self.maxDataPoints,
'minSpan': self.minSpan,
'repeat': self.repeat.variable,
'repeatDirection': self.repeat.direction,
'maxPerRow': self.repeat.maxPerRow,
'span': self.span,
'targets': self.targets,
'timeFrom': self.timeFrom,
'timeShift': self.timeShift,
'title': self.title,
'transparent': self.transparent,
'transformations': self.transformations
}
res.update(overrides)
_deep_update(res, self.extraJson)
return res
@attr.s
class RowPanel(Panel):
"""
Generates Row panel json structure.
:param title: title of the panel
:param collapsed: set True if row should be collapsed
:param panels: list of panels in the row, only to be used when collapsed=True
"""
collapsed = attr.ib(default=False, validator=instance_of(bool))
panels = attr.ib(default=attr.Factory(list), validator=instance_of(list))
collapsed = attr.ib(default=False, validator=instance_of(bool))
def _iter_panels(self):
return iter(self.panels)
def _map_panels(self, f):
self = f(self)
return attr.evolve(self, panels=list(map(f, self.panels)))
def to_json_data(self):
return self.panel_json(
{
'collapsed': self.collapsed,
'panels': self.panels,
'type': ROW_TYPE
}
)
@attr.s
class Row(object):
"""
Legacy support for old row, when not used with gridpos
"""
# TODO: jml would like to separate the balancing behaviour from this
# layer.
try:
panels = attr.ib(default=attr.Factory(list), converter=_balance_panels)
except TypeError:
panels = attr.ib(default=attr.Factory(list), convert=_balance_panels)
collapse = attr.ib(
default=False, validator=instance_of(bool),
)
editable = attr.ib(
default=True, validator=instance_of(bool),
)
height = attr.ib(
default=attr.Factory(lambda: DEFAULT_ROW_HEIGHT),
validator=instance_of(Pixels),
)
showTitle = attr.ib(default=None)
title = attr.ib(default=None)
repeat = attr.ib(default=None)
def _iter_panels(self):
return iter(self.panels)
def _map_panels(self, f):
return attr.evolve(self, panels=list(map(f, self.panels)))
def to_json_data(self):
showTitle = False
title = "New row"
if self.title is not None:
showTitle = True
title = self.title
if self.showTitle is not None:
showTitle = self.showTitle
return {
'collapse': self.collapse,
'editable': self.editable,
'height': self.height,
'panels': self.panels,
'showTitle': showTitle,
'title': title,
'repeat': self.repeat,
}
@attr.s
class Graph(Panel):
"""
Generates Graph panel json structure.
:param alert: List of AlertConditions
:param align: Select to align left and right Y-axes by value
:param alignLevel: Available when Align is selected. Value to use for alignment of left and right Y-axes
:param bars: Display values as a bar chart
:param dataLinks: List of data links hooked to datapoints on the graph
:param fill: Area fill, amount of color fill for a series. (default 1, 0 is none)
:param fillGradient: Degree of gradient on the area fill. (0 is no gradient, 10 is a steep gradient. Default is 0.)
:param lines: Display values as a line graph
:param points: Display points for values (default False)
:param pointRadius: Controls how large the points are
:param stack: Each series is stacked on top of another
:param percentage: Available when Stack is selected. Each series is drawn as a percentage of the total of all series
:param thresholds: List of GraphThresholds - Only valid when alert not defined
"""
alert = attr.ib(default=None)
alertThreshold = attr.ib(default=True, validator=instance_of(bool))
aliasColors = attr.ib(default=attr.Factory(dict))
align = attr.ib(default=False, validator=instance_of(bool))
alignLevel = attr.ib(default=0, validator=instance_of(int))
bars = attr.ib(default=False, validator=instance_of(bool))
dataLinks = attr.ib(default=attr.Factory(list))
error = attr.ib(default=False, validator=instance_of(bool))
fill = attr.ib(default=1, validator=instance_of(int))
fillGradient = attr.ib(default=0, validator=instance_of(int))
grid = attr.ib(default=attr.Factory(Grid), validator=instance_of(Grid))
isNew = attr.ib(default=True, validator=instance_of(bool))
legend = attr.ib(
default=attr.Factory(Legend),
validator=instance_of(Legend),
)
lines = attr.ib(default=True, validator=instance_of(bool))
lineWidth = attr.ib(default=DEFAULT_LINE_WIDTH)
nullPointMode = attr.ib(default=NULL_CONNECTED)
percentage = attr.ib(default=False, validator=instance_of(bool))
pointRadius = attr.ib(default=DEFAULT_POINT_RADIUS)
points = attr.ib(default=False, validator=instance_of(bool))
renderer = attr.ib(default=DEFAULT_RENDERER)
seriesOverrides = attr.ib(default=attr.Factory(list))
stack = attr.ib(default=False, validator=instance_of(bool))
steppedLine = attr.ib(default=False, validator=instance_of(bool))
tooltip = attr.ib(
default=attr.Factory(Tooltip),
validator=instance_of(Tooltip),
)
thresholds = attr.ib(default=attr.Factory(list))
xAxis = attr.ib(default=attr.Factory(XAxis), validator=instance_of(XAxis))
try:
yAxes = attr.ib(
default=attr.Factory(YAxes),
converter=to_y_axes,
validator=instance_of(YAxes),
)
except TypeError:
yAxes = attr.ib(
default=attr.Factory(YAxes),
convert=to_y_axes,
validator=instance_of(YAxes),
)
def to_json_data(self):
graphObject = {
'aliasColors': self.aliasColors,
'bars': self.bars,
'error': self.error,
'fill': self.fill,
'grid': self.grid,
'isNew': self.isNew,
'legend': self.legend,
'lines': self.lines,
'linewidth': self.lineWidth,
'minSpan': self.minSpan,
'nullPointMode': self.nullPointMode,
'options': {
'dataLinks': self.dataLinks,
'alertThreshold': self.alertThreshold,
},
'percentage': self.percentage,
'pointradius': self.pointRadius,
'points': self.points,
'renderer': self.renderer,
'seriesOverrides': self.seriesOverrides,
'stack': self.stack,
'steppedLine': self.steppedLine,
'tooltip': self.tooltip,
'thresholds': self.thresholds,
'type': GRAPH_TYPE,
'xaxis': self.xAxis,
'yaxes': self.yAxes,
'yaxis': {
'align': self.align,
'alignLevel': self.alignLevel
}
}
if self.alert:
graphObject['alert'] = self.alert
graphObject['thresholds'] = []
if self.thresholds and self.alert:
print("Warning: Graph threshold ignored as Alerts defined")
return self.panel_json(graphObject)
def _iter_targets(self):
for target in self.targets:
yield target
def _map_targets(self, f):
return attr.evolve(self, targets=[f(t) for t in self.targets])
def auto_ref_ids(self):
"""Give unique IDs all the panels without IDs.
Returns a new ``Graph`` that is the same as this one, except all of
the metrics have their ``refId`` property set. Any panels which had
an ``refId`` property set will keep that property, all others will
have auto-generated IDs provided for them.
"""
ref_ids = set([t.refId for t in self._iter_targets() if t.refId])
double_candidate_refs = \
[p[0] + p[1] for p
in itertools.product(string.ascii_uppercase, repeat=2)]
candidate_ref_ids = itertools.chain(
string.ascii_uppercase,
double_candidate_refs,
)
auto_ref_ids = (i for i in candidate_ref_ids if i not in ref_ids)
def set_refid(t):
return t if t.refId else attr.evolve(t, refId=next(auto_ref_ids))
return self._map_targets(set_refid)
@attr.s
class TimeSeries(Panel):
"""Generates Time Series panel json structure added in Grafana v8
Grafana doc on time series: https://grafana.com/docs/grafana/latest/panels/visualizations/time-series/
:param axisPlacement: auto(Default), left. right, hidden
:param axisLabel: axis label string
:param barAlignment: bar alignment
-1 (left), 0 (centre, default), 1
:param colorMode: Color mode
palette-classic (Default),
:param drawStyle: how to display your time series data
line (Default), bars, points
:param fillOpacity: fillOpacity
:param gradientMode: gradientMode
:param legendDisplayMode: refine how the legend appears in your visualization
list (Default), table, hidden
:param legendPlacement: bottom (Default), right
:param lineInterpolation: line interpolation
linear (Default), smooth, stepBefore, stepAfter
:param lineWidth: line width, default 1
:param mappings: To assign colors to boolean or string values, use Value mappings
:param pointSize: point size, default 5
:param scaleDistributionType: axis scale linear or log
:param scaleDistributionLog: Base of if logarithmic scale type set, default 2
:param spanNulls: connect null values, default False
:param showPoints: show points
auto (Default), always, never
:param stacking: dict to enable stacking, {"mode": "normal", "group": "A"}
:param thresholds: single stat thresholds
:param tooltipMode: When you hover your cursor over the visualization, Grafana can display tooltips
single (Default), multi, none
:param unit: units
"""
axisPlacement = attr.ib(default='auto', validator=instance_of(str))
axisLabel = attr.ib(default='', validator=instance_of(str))
barAlignment = attr.ib(default=0, validator=instance_of(int))
colorMode = attr.ib(default='palette-classic', validator=instance_of(str))
drawStyle = attr.ib(default='line', validator=instance_of(str))
fillOpacity = attr.ib(default=0, validator=instance_of(int))
gradientMode = attr.ib(default='none', validator=instance_of(str))
legendDisplayMode = attr.ib(default='list', validator=instance_of(str))
legendPlacement = attr.ib(default='bottom', validator=instance_of(str))
lineInterpolation = attr.ib(default='linear', validator=instance_of(str))
lineWidth = attr.ib(default=1, validator=instance_of(int))
mappings = attr.ib(default=attr.Factory(list))
pointSize = attr.ib(default=5, validator=instance_of(int))
scaleDistributionType = attr.ib(default='linear', validator=instance_of(str))
scaleDistributionLog = attr.ib(default=2, validator=instance_of(int))
spanNulls = attr.ib(default=False, validator=instance_of(bool))
showPoints = attr.ib(default='auto', validator=instance_of(str))
stacking = attr.ib(default={}, validator=instance_of(dict))
thresholds = attr.ib(default=attr.Factory(list))
tooltipMode = attr.ib(default='single', validator=instance_of(str))
unit = attr.ib(default='', validator=instance_of(str))
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'color': {
'mode': self.colorMode
},
'custom': {
'axisPlacement': self.axisPlacement,
'axisLabel': self.axisLabel,
'drawStyle': self.drawStyle,
'lineInterpolation': self.lineInterpolation,
'barAlignment': self.barAlignment,
'lineWidth': self.lineWidth,
'fillOpacity': self.fillOpacity,
'gradientMode': self.gradientMode,
'spanNulls': self.spanNulls,
'showPoints': self.showPoints,
'pointSize': self.pointSize,
'stacking': self.stacking,
'scaleDistribution': {
'type': self.scaleDistributionType,
'log': self.scaleDistributionLog
},
'hideFrom': {
'tooltip': False,
'viz': False,
'legend': False
},
},
'mappings': self.mappings,
'thresholds': {
'mode': 'absolute',
'steps': self.thresholds
},
'unit': self.unit
},
'overrides': []
},
'options': {
'legend': {
'displayMode': self.legendDisplayMode,
'placement': self.legendPlacement,
'calcs': []
},
'tooltip': {
'mode': self.tooltipMode
}
},
'type': TIMESERIES_TYPE,
}
)
@attr.s
class ValueMap(object):
"""
Generates json structure for a value mapping item.
:param op: comparison operator
:param value: value to map to text
:param text: text to map the value to
"""
text = attr.ib()
value = attr.ib()
op = attr.ib(default='=')
def to_json_data(self):
return {
'op': self.op,
'text': self.text,
'value': self.value,
}
@attr.s
class SparkLine(object):
fillColor = attr.ib(
default=attr.Factory(lambda: BLUE_RGBA),
validator=instance_of(RGBA),
)
full = attr.ib(default=False, validator=instance_of(bool))
lineColor = attr.ib(
default=attr.Factory(lambda: BLUE_RGB),
validator=instance_of(RGB),
)
show = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return {
'fillColor': self.fillColor,
'full': self.full,
'lineColor': self.lineColor,
'show': self.show,
}
@attr.s
class Gauge(object):
minValue = attr.ib(default=0, validator=instance_of(int))
maxValue = attr.ib(default=100, validator=instance_of(int))
show = attr.ib(default=False, validator=instance_of(bool))
thresholdLabels = attr.ib(default=False, validator=instance_of(bool))
thresholdMarkers = attr.ib(default=True, validator=instance_of(bool))
def to_json_data(self):
return {
'maxValue': self.maxValue,
'minValue': self.minValue,
'show': self.show,
'thresholdLabels': self.thresholdLabels,
'thresholdMarkers': self.thresholdMarkers,
}
@attr.s
class RangeMap(object):
start = attr.ib()
end = attr.ib()
text = attr.ib()
def to_json_data(self):
return {
'from': self.start,
'to': self.end,
'text': self.text,
}
@attr.s
class DiscreteColorMappingItem(object):
"""
Generates json structure for the value mapping item for the StatValueMappings class:
:param text: String to color
:param color: To color the text with
"""
text = attr.ib(validator=instance_of(str))
color = attr.ib(default=GREY1, validator=instance_of((str, RGBA)))
def to_json_data(self):
return {
"color": self.color,
"text": self.text,
}
@attr.s
class Discrete(Panel):
"""
Generates Discrete panel json structure.
https://grafana.com/grafana/plugins/natel-discrete-panel/
:param colorMaps: list of DiscreteColorMappingItem, to color values
(note these apply **after** value mappings)
:param backgroundColor: dito
:param lineColor: Separator line color between rows
:param metricNameColor: dito
:param timeTextColor: dito
:param valueTextColor: dito
:param decimals: number of decimals to display
:param rowHeight: dito
:param units: defines value units
:param legendSortBy: time (desc: '-ms', asc: 'ms), count (desc: '-count', asc: 'count')
:param highlightOnMouseover: whether to highlight the state of hovered time falls in.
:param showLegend: dito
:param showLegendPercent: whether to show percentage of time spent in each state/value
:param showLegendNames:
:param showLegendValues: whether to values in legend
:param legendPercentDecimals: number of decimals for legend
:param showTimeAxis: dito
:param use12HourClock: dito
:param writeMetricNames: dito
:param writeLastValue: dito
:param writeAllValues: whether to show all values
:param showDistinctCount: whether to show distinct values count
:param showLegendCounts: whether to show value occurrence count
:param showLegendTime: whether to show of each state
:param showTransitionCount: whether to show transition count
:param colorMaps: list of DiscreteColorMappingItem
:param rangeMaps: list of RangeMap
:param valueMaps: list of ValueMap
"""
backgroundColor = attr.ib(
default=RGBA(128, 128, 128, 0.1),
validator=instance_of((RGBA, RGB, str))
)
lineColor = attr.ib(
default=RGBA(0, 0, 0, 0.1),
validator=instance_of((RGBA, RGB, str))
)
metricNameColor = attr.ib(
default="#000000",
validator=instance_of((RGBA, RGB, str))
)
timeTextColor = attr.ib(
default="#d8d9da",
validator=instance_of((RGBA, RGB, str))
)
valueTextColor = attr.ib(
default="#000000",
validator=instance_of((RGBA, RGB, str))
)
decimals = attr.ib(default=0, validator=instance_of(int))
legendPercentDecimals = attr.ib(default=0, validator=instance_of(int))
rowHeight = attr.ib(default=50, validator=instance_of(int))
textSize = attr.ib(default=24, validator=instance_of(int))
textSizeTime = attr.ib(default=12, validator=instance_of(int))
units = attr.ib(default="none", validator=instance_of(str))
legendSortBy = attr.ib(
default="-ms",
validator=in_(['-ms', 'ms', '-count', 'count'])
)
highlightOnMouseover = attr.ib(default=True, validator=instance_of(bool))
showLegend = attr.ib(default=True, validator=instance_of(bool))
showLegendPercent = attr.ib(default=True, validator=instance_of(bool))
showLegendNames = attr.ib(default=True, validator=instance_of(bool))
showLegendValues = attr.ib(default=True, validator=instance_of(bool))
showTimeAxis = attr.ib(default=True, validator=instance_of(bool))
use12HourClock = attr.ib(default=False, validator=instance_of(bool))
writeMetricNames = attr.ib(default=False, validator=instance_of(bool))
writeLastValue = attr.ib(default=True, validator=instance_of(bool))
writeAllValues = attr.ib(default=False, validator=instance_of(bool))
showDistinctCount = attr.ib(default=None)
showLegendCounts = attr.ib(default=None)
showLegendTime = attr.ib(default=None)
showTransitionCount = attr.ib(default=None)
colorMaps = attr.ib(
default=[],
validator=attr.validators.deep_iterable(
member_validator=instance_of(DiscreteColorMappingItem),
iterable_validator=instance_of(list),
),
)
rangeMaps = attr.ib(
default=[],
validator=attr.validators.deep_iterable(
member_validator=instance_of(RangeMap),
iterable_validator=instance_of(list),
),
)
valueMaps = attr.ib(
default=[],
validator=attr.validators.deep_iterable(
member_validator=instance_of(ValueMap),
iterable_validator=instance_of(list),
),
)
def to_json_data(self):
graphObject = {
'type': DISCRETE_TYPE,
'backgroundColor': self.backgroundColor,
'lineColor': self.lineColor,
'metricNameColor': self.metricNameColor,
'timeTextColor': self.timeTextColor,
'valueTextColor': self.valueTextColor,
'legendPercentDecimals': self.legendPercentDecimals,
'decimals': self.decimals,
'rowHeight': self.rowHeight,
'textSize': self.textSize,
'textSizeTime': self.textSizeTime,
'units': self.units,
'legendSortBy': self.legendSortBy,
'highlightOnMouseover': self.highlightOnMouseover,
'showLegend': self.showLegend,
'showLegendPercent': self.showLegendPercent,
'showLegendNames': self.showLegendNames,
'showLegendValues': self.showLegendValues,
'showTimeAxis': self.showTimeAxis,
'use12HourClock': self.use12HourClock,
'writeMetricNames': self.writeMetricNames,
'writeLastValue': self.writeLastValue,
'writeAllValues': self.writeAllValues,
'showDistinctCount': self.showDistinctCount,
'showLegendCounts': self.showLegendCounts,
'showLegendTime': self.showLegendTime,
'showTransitionCount': self.showTransitionCount,
'colorMaps': self.colorMaps,
'rangeMaps': self.rangeMaps,
'valueMaps': self.valueMaps,
}
return self.panel_json(graphObject)
@attr.s
class Text(Panel):
"""Generates a Text panel."""
content = attr.ib(default="")
error = attr.ib(default=False, validator=instance_of(bool))
mode = attr.ib(default=TEXT_MODE_MARKDOWN)
def to_json_data(self):
return self.panel_json(
{
'content': self.content,
'error': self.error,
'mode': self.mode,
'type': TEXT_TYPE,
}
)
@attr.s
class AlertList(object):
"""Generates the AlertList Panel.
:param dashboardTags: A list of tags (strings) for the panel.
:param description: Panel description, supports markdown and links.
:param gridPos: describes the panel size and position in grid coordinates.
:param id: panel id
:param limit: Max number of alerts that can be displayed in the list.
:param nameFilter: Show only alerts that contain nameFilter in their name.
:param onlyAlertsOnDashboard: If true, shows only alerts from the current dashboard.
:param links: Additional web links to be presented in the panel. A list of instantiation of
DataLink objects.
:param show: Show the current alert list (ALERTLIST_SHOW_CURRENT) or only the alerts that were
changed (ALERTLIST_SHOW_CHANGES).
:param sortOrder: Defines the sorting order of the alerts. Gets one of the following values as
input: SORT_ASC, SORT_DESC and SORT_IMPORTANCE.
:param span: Defines the number of spans that will be used for the panel.
:param stateFilter: Show alerts with statuses from the stateFilter list. The list can contain a
subset of the following statuses:
[ALERTLIST_STATE_ALERTING, ALERTLIST_STATE_OK, ALERTLIST_STATE_NO_DATA,
ALERTLIST_STATE_PAUSED, ALERTLIST_STATE_EXECUTION_ERROR, ALERTLIST_STATE_PENDING].
An empty list means all alerts.
:param title: The panel title.
:param transparent: If true, display the panel without a background.
"""
dashboardTags = attr.ib(
default=attr.Factory(list),
validator=attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
description = attr.ib(default="", validator=instance_of(str))
gridPos = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(GridPos)))
id = attr.ib(default=None)
limit = attr.ib(default=DEFAULT_LIMIT)
links = attr.ib(
default=attr.Factory(list),
validator=attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(DataLink),
iterable_validator=attr.validators.instance_of(list)))
nameFilter = attr.ib(default="", validator=instance_of(str))
onlyAlertsOnDashboard = attr.ib(default=True, validator=instance_of(bool))
show = attr.ib(default=ALERTLIST_SHOW_CURRENT)
sortOrder = attr.ib(default=SORT_ASC, validator=in_([1, 2, 3]))
span = attr.ib(default=6)
stateFilter = attr.ib(default=attr.Factory(list))
title = attr.ib(default="")
transparent = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return {
'dashboardTags': self.dashboardTags,
'description': self.description,
'gridPos': self.gridPos,
'id': self.id,
'limit': self.limit,
'links': self.links,
'nameFilter': self.nameFilter,
'onlyAlertsOnDashboard': self.onlyAlertsOnDashboard,
'show': self.show,
'sortOrder': self.sortOrder,
'span': self.span,
'stateFilter': self.stateFilter,
'title': self.title,
'transparent': self.transparent,
'type': ALERTLIST_TYPE,
}
@attr.s
class Stat(Panel):
"""Generates Stat panel json structure
Grafana doc on stat: https://grafana.com/docs/grafana/latest/panels/visualizations/stat-panel/
:param alignment: defines value & title positioning: keys 'auto' 'centre'
:param colorMode: defines if Grafana will color panel background: keys "value" "background"
:param decimals: number of decimals to display
:param format: defines value units
:param graphMode: defines if Grafana will draw graph: keys 'area' 'none'
:param noValue: define the default value if no value is found
:param mappings: the list of values to text mappings
This should be a list of StatMapping objects
https://grafana.com/docs/grafana/latest/panels/field-configuration-options/#value-mapping
:param orientation: Stacking direction in case of multiple series or fields: keys 'auto' 'horizontal' 'vertical'
:param reduceCalc: algorithm for reduction to a single value: keys
'mean' 'lastNotNull' 'last' 'first' 'firstNotNull' 'min' 'max' 'sum' 'total'
:param textMode: define Grafana will show name or value: keys: 'auto' 'name' 'none' 'value' 'value_and_name'
:param thresholds: single stat thresholds
"""
alignment = attr.ib(default='auto')
colorMode = attr.ib(default='value')
decimals = attr.ib(default=None)
format = attr.ib(default='none')
graphMode = attr.ib(default='area')
mappings = attr.ib(default=attr.Factory(list))
noValue = attr.ib(default='none')
orientation = attr.ib(default='auto')
reduceCalc = attr.ib(default='mean', type=str)
textMode = attr.ib(default='auto')
thresholds = attr.ib(default="")
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'custom': {},
'decimals': self.decimals,
'mappings': self.mappings,
'thresholds': {
'mode': ABSOLUTE_TYPE,
'steps': self.thresholds,
},
'unit': self.format,
'noValue': self.noValue
}
},
'options': {
'textMode': self.textMode,
'colorMode': self.colorMode,
'graphMode': self.graphMode,
'justifyMode': self.alignment,
'orientation': self.orientation,
'reduceOptions': {
'calcs': [
self.reduceCalc
],
'fields': '',
'values': False
}
},
'type': STAT_TYPE,
}
)
@attr.s
class StatValueMappingItem(object):
"""
Generates json structure for the value mapping item for the StatValueMappings class:
:param text: String that will replace input value
:param mapValue: Value to be replaced
:param color: How to color the text if mapping occurs
:param index: index
"""
text = attr.ib()
mapValue = attr.ib(default="", validator=instance_of(str))
color = attr.ib(default="", validator=instance_of(str))
index = attr.ib(default=None)
def to_json_data(self):
return {
self.mapValue: {
'text': self.text,
'color': self.color,
'index': self.index
}
}
@attr.s(init=False)
class StatValueMappings(object):
"""
Generates json structure for the value mappings for the StatPanel:
:param mappingItems: List of StatValueMappingItem objects
mappings=[
core.StatValueMappings(
core.StatValueMappingItem('Offline', '0', 'red'), # Value must a string
core.StatValueMappingItem('Online', '1', 'green')
),
],
"""
mappingItems = attr.ib(
default=[],
validator=attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(StatValueMappingItem),
iterable_validator=attr.validators.instance_of(list),
),
)
def __init__(self, *mappings: StatValueMappingItem):
self.__attrs_init__([*mappings])
def to_json_data(self):
ret_dict = {
'type': 'value',
'options': {
}
}
for item in self.mappingItems:
ret_dict['options'].update(item.to_json_data())
return ret_dict
@attr.s
class StatRangeMappings(object):
"""
Generates json structure for the range mappings for the StatPanel:
:param text: Sting that will replace input value
:param startValue: When using a range, the start value of the range
:param endValue: When using a range, the end value of the range
:param color: How to color the text if mapping occurs
:param index: index
"""
text = attr.ib()
startValue = attr.ib(default=0, validator=instance_of(int))
endValue = attr.ib(default=0, validator=instance_of(int))
color = attr.ib(default="", validator=instance_of(str))
index = attr.ib(default=None)
def to_json_data(self):
return {
'type': 'range',
'options': {
'from': self.startValue,
'to': self.endValue,
'result': {
'text': self.text,
'color': self.color,
'index': self.index
}
}
}
@attr.s
class StatMapping(object):
"""
Deprecated Grafana v8
Generates json structure for the value mapping for the Stat panel:
:param text: Sting that will replace input value
:param value: Value to be replaced
:param startValue: When using a range, the start value of the range
:param endValue: When using a range, the end value of the range
:param id: panel id
"""
text = attr.ib()
mapValue = attr.ib(default="", validator=instance_of(str))
startValue = attr.ib(default="", validator=instance_of(str))
endValue = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=None)
def to_json_data(self):
mappingType = MAPPING_TYPE_VALUE_TO_TEXT if self.mapValue else MAPPING_TYPE_RANGE_TO_TEXT
ret_dict = {
'operator': '',
'text': self.text,
'type': mappingType,
'value': self.mapValue,
'from': self.startValue,
'to': self.endValue,
'id': self.id
}
return ret_dict
@attr.s
class StatValueMapping(object):
"""
Deprecated Grafana v8
Generates json structure for the value mappings for the StatPanel:
:param text: Sting that will replace input value
:param mapValue: Value to be replaced
:param id: panel id
"""
text = attr.ib()
mapValue = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=None)
def to_json_data(self):
return StatMapping(
self.text,
mapValue=self.mapValue,
id=self.id,
)
@attr.s
class StatRangeMapping(object):
"""
Deprecated Grafana v8
Generates json structure for the range mappings for the StatPanel:
:param text: Sting that will replace input value
:param startValue: When using a range, the start value of the range
:param endValue: When using a range, the end value of the range
:param id: panel id
"""
text = attr.ib()
startValue = attr.ib(default="", validator=instance_of(str))
endValue = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=None)
def to_json_data(self):
return StatMapping(
self.text,
startValue=self.startValue,
endValue=self.endValue,
id=self.id
)
@attr.s
class SingleStat(Panel):
"""Generates Single Stat panel json structure
This panel was deprecated in Grafana 7.0, please use Stat instead
Grafana doc on singlestat: https://grafana.com/docs/grafana/latest/features/panels/singlestat/
:param cacheTimeout: metric query result cache ttl
:param colors: the list of colors that can be used for coloring
panel value or background. Additional info on coloring in docs:
https://grafana.com/docs/grafana/latest/features/panels/singlestat/#coloring
:param colorBackground: defines if grafana will color panel background
:param colorValue: defines if grafana will color panel value
:param decimals: override automatic decimal precision for legend/tooltips
:param format: defines value units
:param gauge: draws and additional speedometer-like gauge based
:param mappingType: defines panel mapping type.
Additional info can be found in docs:
https://grafana.com/docs/grafana/latest/features/panels/singlestat/#value-to-text-mapping
:param mappingTypes: the list of available mapping types for panel
:param nullText: defines what to show if metric query result is undefined
:param nullPointMode: defines how to render undefined values
:param postfix: defines postfix that will be attached to value
:param postfixFontSize: defines postfix font size
:param prefix: defines prefix that will be attached to value
:param prefixFontSize: defines prefix font size
:param rangeMaps: the list of value to text mappings
:param sparkline: defines if grafana should draw an additional sparkline.
Sparkline grafana documentation:
https://grafana.com/docs/grafana/latest/features/panels/singlestat/#spark-lines
:param thresholds: single stat thresholds
:param valueFontSize: defines value font size
:param valueName: defines value type. possible values are:
min, max, avg, current, total, name, first, delta, range
:param valueMaps: the list of value to text mappings
"""
cacheTimeout = attr.ib(default=None)
colors = attr.ib(default=attr.Factory(lambda: [GREEN, ORANGE, RED]))
colorBackground = attr.ib(default=False, validator=instance_of(bool))
colorValue = attr.ib(default=False, validator=instance_of(bool))
decimals = attr.ib(default=None)
format = attr.ib(default='none')
gauge = attr.ib(default=attr.Factory(Gauge),
validator=instance_of(Gauge))
mappingType = attr.ib(default=MAPPING_TYPE_VALUE_TO_TEXT)
mappingTypes = attr.ib(
default=attr.Factory(lambda: [
MAPPING_VALUE_TO_TEXT,
MAPPING_RANGE_TO_TEXT,
]),
)
minSpan = attr.ib(default=None)
nullText = attr.ib(default=None)
nullPointMode = attr.ib(default='connected')
postfix = attr.ib(default="")
postfixFontSize = attr.ib(default='50%')
prefix = attr.ib(default="")
prefixFontSize = attr.ib(default='50%')
rangeMaps = attr.ib(default=attr.Factory(list))
sparkline = attr.ib(
default=attr.Factory(SparkLine),
validator=instance_of(SparkLine),
)
thresholds = attr.ib(default="")
valueFontSize = attr.ib(default='80%')
valueName = attr.ib(default=VTYPE_DEFAULT)
valueMaps = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return self.panel_json(
{
'cacheTimeout': self.cacheTimeout,
'colorBackground': self.colorBackground,
'colorValue': self.colorValue,
'colors': self.colors,
'decimals': self.decimals,
'format': self.format,
'gauge': self.gauge,
'mappingType': self.mappingType,
'mappingTypes': self.mappingTypes,
'minSpan': self.minSpan,
'nullPointMode': self.nullPointMode,
'nullText': self.nullText,
'postfix': self.postfix,
'postfixFontSize': self.postfixFontSize,
'prefix': self.prefix,
'prefixFontSize': self.prefixFontSize,
'rangeMaps': self.rangeMaps,
'sparkline': self.sparkline,
'thresholds': self.thresholds,
'type': SINGLESTAT_TYPE,
'valueFontSize': self.valueFontSize,
'valueMaps': self.valueMaps,
'valueName': self.valueName,
}
)
@attr.s
class DateColumnStyleType(object):
TYPE = 'date'
dateFormat = attr.ib(default="YYYY-MM-DD HH:mm:ss")
def to_json_data(self):
return {
'dateFormat': self.dateFormat,
'type': self.TYPE,
}
@attr.s
class NumberColumnStyleType(object):
TYPE = 'number'
colorMode = attr.ib(default=None)
colors = attr.ib(default=attr.Factory(lambda: [GREEN, ORANGE, RED]))
thresholds = attr.ib(default=attr.Factory(list))
decimals = attr.ib(default=2, validator=instance_of(int))
unit = attr.ib(default=SHORT_FORMAT)
def to_json_data(self):
return {
'colorMode': self.colorMode,
'colors': self.colors,
'decimals': self.decimals,
'thresholds': self.thresholds,
'type': self.TYPE,
'unit': self.unit,
}
@attr.s
class StringColumnStyleType(object):
TYPE = 'string'
decimals = attr.ib(default=2, validator=instance_of(int))
colorMode = attr.ib(default=None)
colors = attr.ib(default=attr.Factory(lambda: [GREEN, ORANGE, RED]))
thresholds = attr.ib(default=attr.Factory(list))
preserveFormat = attr.ib(validator=instance_of(bool), default=False)
sanitize = attr.ib(validator=instance_of(bool), default=False)
unit = attr.ib(default=SHORT_FORMAT)
mappingType = attr.ib(default=MAPPING_TYPE_VALUE_TO_TEXT)
valueMaps = attr.ib(default=attr.Factory(list))
rangeMaps = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return {
'decimals': self.decimals,
'colorMode': self.colorMode,
'colors': self.colors,
'thresholds': self.thresholds,
'unit': self.unit,
'mappingType': self.mappingType,
'valueMaps': self.valueMaps,
'rangeMaps': self.rangeMaps,
'preserveFormat': self.preserveFormat,
'sanitize': self.sanitize,
'type': self.TYPE,
}
@attr.s
class HiddenColumnStyleType(object):
TYPE = 'hidden'
def to_json_data(self):
return {
'type': self.TYPE,
}
@attr.s
class ColumnStyle(object):
alias = attr.ib(default="")
pattern = attr.ib(default="")
align = attr.ib(default='auto', validator=in_(
['auto', 'left', 'right', 'center']))
link = attr.ib(validator=instance_of(bool), default=False)
linkOpenInNewTab = attr.ib(validator=instance_of(bool), default=False)
linkUrl = attr.ib(validator=instance_of(str), default="")
linkTooltip = attr.ib(validator=instance_of(str), default="")
type = attr.ib(
default=attr.Factory(NumberColumnStyleType),
validator=instance_of((
DateColumnStyleType,
HiddenColumnStyleType,
NumberColumnStyleType,
StringColumnStyleType,
))
)
def to_json_data(self):
data = {
'alias': self.alias,
'pattern': self.pattern,
'align': self.align,
'link': self.link,
'linkTargetBlank': self.linkOpenInNewTab,
'linkUrl': self.linkUrl,
'linkTooltip': self.linkTooltip,
}
data.update(self.type.to_json_data())
return data
@attr.s
class ColumnSort(object):
col = attr.ib(default=None)
desc = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return {
'col': self.col,
'desc': self.desc,
}
@attr.s
class Column(object):
"""Details of an aggregation column in a table panel.
:param text: name of column
:param value: aggregation function
"""
text = attr.ib(default='Avg')
value = attr.ib(default='avg')
def to_json_data(self):
return {
'text': self.text,
'value': self.value,
}
@attr.s
class Table(Panel):
"""Generates Table panel json structure
Now supports Grafana v8+
Grafana doc on table: https://grafana.com/docs/grafana/latest/visualizations/table/
:param align: Align cell contents; auto (default), left, center, right
:param colorMode: Default thresholds
:param columns: Table columns for Aggregations view
:param displayMode: By default, Grafana automatically chooses display settings, you can choose;
color-text, color-background, color-background-solid, gradient-gauge, lcd-gauge, basic, json-view
:param fontSize: Defines value font size
:param filterable: Allow user to filter columns, default False
:param mappings: To assign colors to boolean or string values, use Value mappings
:param showHeader: Show the table header
:param thresholds: List of thresholds
"""
align = attr.ib(default='auto', validator=instance_of(str))
colorMode = attr.ib(default='thresholds', validator=instance_of(str))
columns = attr.ib(default=attr.Factory(list))
displayMode = attr.ib(default='auto', validator=instance_of(str))
fontSize = attr.ib(default='100%')
filterable = attr.ib(default=False, validator=instance_of(bool))
mappings = attr.ib(default=attr.Factory(list))
showHeader = attr.ib(default=True, validator=instance_of(bool))
span = attr.ib(default=6)
thresholds = attr.ib(default=attr.Factory(list))
@classmethod
def with_styled_columns(cls, columns, styles=None, **kwargs):
"""Styled columns is not support in Grafana v8 Table"""
print("Error: Styled columns is not support in Grafana v8 Table")
print("Please see https://grafana.com/docs/grafana/latest/visualizations/table/ for more options")
raise NotImplementedError
def to_json_data(self):
return self.panel_json(
{
"color": {
"mode": self.colorMode
},
'columns': self.columns,
'fontSize': self.fontSize,
'fieldConfig': {
'defaults': {
'custom': {
'align': self.align,
'displayMode': self.displayMode,
'filterable': self.filterable
},
"thresholds": {
"mode": "absolute",
"steps": self.thresholds
}
}
},
'hideTimeOverride': self.hideTimeOverride,
'mappings': self.mappings,
'minSpan': self.minSpan,
'options': {
'showHeader': self.showHeader
},
'type': TABLE_TYPE,
}
)
@attr.s
class BarGauge(Panel):
"""Generates Bar Gauge panel json structure
:param allValue: If All values should be shown or a Calculation
:param calc: Calculation to perform on metrics
:param dataLinks: list of data links hooked to datapoints on the graph
:param decimals: override automatic decimal precision for legend/tooltips
:param displayMode: style to display bar gauge in
:param format: defines value units
:param labels: option to show gauge level labels
:param limit: limit of number of values to show when not Calculating
:param max: maximum value of the gauge
:param min: minimum value of the gauge
:param orientation: orientation of the bar gauge
:param rangeMaps: the list of value to text mappings
:param thresholdLabel: label for gauge. Template Variables:
"$__series_namei" "$__field_name" "$__cell_{N} / $__calc"
:param thresholdMarkers: option to show marker of level on gauge
:param thresholds: single stat thresholds
:param valueMaps: the list of value to text mappings
"""
allValues = attr.ib(default=False, validator=instance_of(bool))
calc = attr.ib(default=GAUGE_CALC_MEAN)
dataLinks = attr.ib(default=attr.Factory(list))
decimals = attr.ib(default=None)
displayMode = attr.ib(
default=GAUGE_DISPLAY_MODE_LCD,
validator=in_(
[
GAUGE_DISPLAY_MODE_LCD,
GAUGE_DISPLAY_MODE_BASIC,
GAUGE_DISPLAY_MODE_GRADIENT,
]
),
)
format = attr.ib(default='none')
label = attr.ib(default=None)
limit = attr.ib(default=None)
max = attr.ib(default=100)
min = attr.ib(default=0)
orientation = attr.ib(
default=ORIENTATION_HORIZONTAL,
validator=in_([ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL]),
)
rangeMaps = attr.ib(default=attr.Factory(list))
thresholdLabels = attr.ib(default=False, validator=instance_of(bool))
thresholdMarkers = attr.ib(default=True, validator=instance_of(bool))
thresholds = attr.ib(
default=attr.Factory(
lambda: [
Threshold('green', 0, 0.0),
Threshold('red', 1, 80.0)
]
),
validator=instance_of(list),
)
valueMaps = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return self.panel_json(
{
'options': {
'displayMode': self.displayMode,
'fieldOptions': {
'calcs': [self.calc],
'defaults': {
'decimals': self.decimals,
'max': self.max,
'min': self.min,
'title': self.label,
'unit': self.format,
'links': self.dataLinks,
},
'limit': self.limit,
'mappings': self.valueMaps,
'override': {},
'thresholds': self.thresholds,
'values': self.allValues,
},
'orientation': self.orientation,
'showThresholdLabels': self.thresholdLabels,
'showThresholdMarkers': self.thresholdMarkers,
},
'type': BARGAUGE_TYPE,
}
)
@attr.s
class GaugePanel(Panel):
"""Generates Gauge panel json structure
:param allValue: If All values should be shown or a Calculation
:param calc: Calculation to perform on metrics
:param dataLinks: list of data links hooked to datapoints on the graph
:param decimals: override automatic decimal precision for legend/tooltips
:param format: defines value units
:param labels: option to show gauge level labels
:param limit: limit of number of values to show when not Calculating
:param max: maximum value of the gauge
:param min: minimum value of the gauge
:param rangeMaps: the list of value to text mappings
:param thresholdLabel: label for gauge. Template Variables:
"$__series_namei" "$__field_name" "$__cell_{N} / $__calc"
:param thresholdMarkers: option to show marker of level on gauge
:param thresholds: single stat thresholds
:param valueMaps: the list of value to text mappings
"""
allValues = attr.ib(default=False, validator=instance_of(bool))
calc = attr.ib(default=GAUGE_CALC_MEAN)
dataLinks = attr.ib(default=attr.Factory(list))
decimals = attr.ib(default=None)
format = attr.ib(default='none')
label = attr.ib(default=None)
limit = attr.ib(default=None)
max = attr.ib(default=100)
min = attr.ib(default=0)
rangeMaps = attr.ib(default=attr.Factory(list))
thresholdLabels = attr.ib(default=False, validator=instance_of(bool))
thresholdMarkers = attr.ib(default=True, validator=instance_of(bool))
thresholds = attr.ib(
default=attr.Factory(
lambda: [
Threshold('green', 0, 0.0),
Threshold('red', 1, 80.0)
]
),
validator=instance_of(list),
)
valueMaps = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return self.panel_json(
{
'options': {
'fieldOptions': {
'calcs': [self.calc],
'defaults': {
'decimals': self.decimals,
'max': self.max,
'min': self.min,
'title': self.label,
'unit': self.format,
'links': self.dataLinks,
},
'limit': self.limit,
'mappings': self.valueMaps,
'override': {},
'thresholds': self.thresholds,
'values': self.allValues,
},
'showThresholdLabels': self.thresholdLabels,
'showThresholdMarkers': self.thresholdMarkers,
},
'type': GAUGE_TYPE,
}
)
@attr.s
class HeatmapColor(object):
"""A Color object for heatmaps
:param cardColor: color
:param colorScale: scale
:param colorScheme: scheme
:param exponent: exponent
:param max: max
:param min: min
:param mode: mode
"""
# Maybe cardColor should validate to RGBA object, not sure
cardColor = attr.ib(default='#b4ff00', validator=instance_of(str))
colorScale = attr.ib(default='sqrt', validator=instance_of(str))
colorScheme = attr.ib(default='interpolateOranges')
exponent = attr.ib(default=0.5, validator=instance_of(float))
mode = attr.ib(default='spectrum', validator=instance_of(str))
max = attr.ib(default=None)
min = attr.ib(default=None)
def to_json_data(self):
return {
'mode': self.mode,
'cardColor': self.cardColor,
'colorScale': self.colorScale,
'exponent': self.exponent,
'colorScheme': self.colorScheme,
'max': self.max,
'min': self.min,
}
@attr.s
class Heatmap(Panel):
"""Generates Heatmap panel json structure (https://grafana.com/docs/grafana/latest/features/panels/heatmap/)
:param heatmap: dict
:param cards: A heatmap card object: keys "cardPadding", "cardRound"
:param color: Heatmap color object
:param dataFormat: 'timeseries' or 'tsbuckets'
:param yBucketBound: 'auto', 'upper', 'middle', 'lower'
:param reverseYBuckets: boolean
:param xBucketSize: Size
:param xBucketNumber: Number
:param yBucketSize: Size
:param yBucketNumber: Number
:param highlightCards: boolean
:param hideZeroBuckets: boolean
:param transparent: defines if the panel should be transparent
"""
# The below does not really like the Legend class we have defined above
legend = attr.ib(default={'show': False})
tooltip = attr.ib(
default=attr.Factory(Tooltip),
validator=instance_of(Tooltip),
)
cards = attr.ib(
default={
'cardPadding': None,
'cardRound': None
}
)
color = attr.ib(
default=attr.Factory(HeatmapColor),
validator=instance_of(HeatmapColor),
)
dataFormat = attr.ib(default='timeseries')
heatmap = {}
hideZeroBuckets = attr.ib(default=False)
highlightCards = attr.ib(default=True)
options = attr.ib(default=None)
xAxis = attr.ib(
default=attr.Factory(XAxis),
validator=instance_of(XAxis)
)
xBucketNumber = attr.ib(default=None)
xBucketSize = attr.ib(default=None)
yAxis = attr.ib(
default=attr.Factory(YAxis),
validator=instance_of(YAxis)
)
yBucketBound = attr.ib(default=None)
yBucketNumber = attr.ib(default=None)
yBucketSize = attr.ib(default=None)
reverseYBuckets = attr.ib(default=False)
def to_json_data(self):
return self.panel_json(
{
'cards': self.cards,
'color': self.color,
'dataFormat': self.dataFormat,
'heatmap': self.heatmap,
'hideZeroBuckets': self.hideZeroBuckets,
'highlightCards': self.highlightCards,
'legend': self.legend,
'options': self.options,
'reverseYBuckets': self.reverseYBuckets,
'tooltip': self.tooltip,
'type': HEATMAP_TYPE,
'xAxis': self.xAxis,
'xBucketNumber': self.xBucketNumber,
'xBucketSize': self.xBucketSize,
'yAxis': self.yAxis,
'yBucketBound': self.yBucketBound,
'yBucketNumber': self.yBucketNumber,
'yBucketSize': self.yBucketSize
}
)
@attr.s
class StatusmapColor(object):
"""A Color object for Statusmaps
:param cardColor: colour
:param colorScale: scale
:param colorScheme: scheme
:param exponent: exponent
:param max: max
:param min: min
:param mode: mode
:param thresholds: threshold
"""
# Maybe cardColor should validate to RGBA object, not sure
cardColor = attr.ib(default='#b4ff00', validator=instance_of(str))
colorScale = attr.ib(default='sqrt', validator=instance_of(str))
colorScheme = attr.ib(default='GnYlRd', validator=instance_of(str))
exponent = attr.ib(default=0.5, validator=instance_of(float))
mode = attr.ib(default='spectrum', validator=instance_of(str))
thresholds = attr.ib(default=[], validator=instance_of(list))
max = attr.ib(default=None)
min = attr.ib(default=None)
def to_json_data(self):
return {
'mode': self.mode,
'cardColor': self.cardColor,
'colorScale': self.colorScale,
'exponent': self.exponent,
'colorScheme': self.colorScheme,
'max': self.max,
'min': self.min,
'thresholds': self.thresholds
}
@attr.s
class Statusmap(Panel):
"""Generates json structure for the flant-statusmap-panel visualisation plugin
(https://grafana.com/grafana/plugins/flant-statusmap-panel/).
:param alert: Alert
:param cards: A statusmap card object: keys 'cardRound', 'cardMinWidth', 'cardHSpacing', 'cardVSpacing'
:param color: A StatusmapColor object
:param isNew: isNew
:param legend: Legend object
:param nullPointMode: null
:param tooltip: Tooltip object
:param xAxis: XAxis object
:param yAxis: YAxis object
"""
alert = attr.ib(default=None)
cards = attr.ib(
default={
'cardRound': None,
'cardMinWidth': 5,
'cardHSpacing': 2,
'cardVSpacing': 2,
}, validator=instance_of(dict))
color = attr.ib(
default=attr.Factory(StatusmapColor),
validator=instance_of(StatusmapColor),
)
isNew = attr.ib(default=True, validator=instance_of(bool))
legend = attr.ib(
default=attr.Factory(Legend),
validator=instance_of(Legend),
)
nullPointMode = attr.ib(default=NULL_AS_ZERO)
tooltip = attr.ib(
default=attr.Factory(Tooltip),
validator=instance_of(Tooltip),
)
xAxis = attr.ib(
default=attr.Factory(XAxis),
validator=instance_of(XAxis)
)
yAxis = attr.ib(
default=attr.Factory(YAxis),
validator=instance_of(YAxis)
)
def to_json_data(self):
graphObject = {
'color': self.color,
'isNew': self.isNew,
'legend': self.legend,
'minSpan': self.minSpan,
'nullPointMode': self.nullPointMode,
'tooltip': self.tooltip,
'type': STATUSMAP_TYPE,
'xaxis': self.xAxis,
'yaxis': self.yAxis,
}
if self.alert:
graphObject['alert'] = self.alert
return self.panel_json(graphObject)
@attr.s
class Svg(Panel):
"""Generates SVG panel json structure
Grafana doc on SVG: https://grafana.com/grafana/plugins/marcuscalidus-svg-panel
:param format: defines value units
:param jsCodeFilePath: path to javascript file to be run on dashboard refresh
:param jsCodeInitFilePath: path to javascript file to be run after the first initialization of the SVG
:param reduceCalc: algorithm for reduction to a single value,
keys 'mean' 'lastNotNull' 'last' 'first' 'firstNotNull' 'min' 'max' 'sum' 'total'
:param svgFilePath: path to SVG image file to be displayed
"""
format = attr.ib(default='none')
jsCodeFilePath = attr.ib(default="", validator=instance_of(str))
jsCodeInitFilePath = attr.ib(default="", validator=instance_of(str))
height = attr.ib(default=None)
svgFilePath = attr.ib(default="", validator=instance_of(str))
@staticmethod
def read_file(file_path):
if file_path:
with open(file_path) as f:
read_data = f.read()
return read_data
else:
return ''
def to_json_data(self):
js_code = self.read_file(self.jsCodeFilePath)
js_init_code = self.read_file(self.jsCodeInitFilePath)
svg_data = self.read_file(self.svgFilePath)
return self.panel_json(
{
'format': self.format,
'js_code': js_code,
'js_init_code': js_init_code,
'svg_data': svg_data,
'type': SVG_TYPE,
'useSVGBuilder': False
}
)
@attr.s
class PieChart(Panel):
"""Generates Pie Chart panel json structure
This panel was deprecated in Grafana 8.0, please use PieChartv2 instead
Grafana doc on Pie Chart: https://grafana.com/grafana/plugins/grafana-piechart-panel
:param aliasColors: dictionary of color overrides
:param format: defines value units
:param pieType: defines the shape of the pie chart (pie or donut)
:param percentageDecimals: Number of decimal places to show if percentages shown in legned
:param showLegend: defines if the legend should be shown
:param showLegendValues: defines if the legend should show values
:param showLegendPercentage: Show percentages in the legend
:param legendType: defines where the legend position
:param thresholds: defines thresholds
"""
aliasColors = attr.ib(default=attr.Factory(dict))
format = attr.ib(default='none')
legendType = attr.ib(default='Right side')
pieType = attr.ib(default='pie')
percentageDecimals = attr.ib(default=0, validator=instance_of(int))
showLegend = attr.ib(default=True)
showLegendValues = attr.ib(default=True)
showLegendPercentage = attr.ib(default=False, validator=instance_of(bool))
thresholds = attr.ib(default="")
def to_json_data(self):
print('PieChart panel was deprecated in Grafana 8.0, please use PieChartv2 instead')
return self.panel_json(
{
'aliasColors': self.aliasColors,
'format': self.format,
'pieType': self.pieType,
'height': self.height,
'fieldConfig': {
'defaults': {
'custom': {},
},
'overrides': []
},
'legend': {
'show': self.showLegend,
'values': self.showLegendValues,
'percentage': self.showLegendPercentage,
'percentageDecimals': self.percentageDecimals
},
'legendType': self.legendType,
'type': PIE_CHART_TYPE,
}
)
@attr.s
class PieChartv2(Panel):
"""Generates Pie Chart panel json structure
Grafana docs on Pie Chart: https://grafana.com/docs/grafana/latest/visualizations/pie-chart-panel/
:param custom: Custom overides
:param colorMode: Color mode
palette-classic (Default),
:param legendDisplayMode: Display mode of legend: list, table or hidden
:param legendPlacement: Location of the legend in the panel: bottom or right
:param legendValues: List of value to be shown in legend eg. ['value', 'percent']
:param mappings: To assign colors to boolean or string values, use Value mappings
:param overrides: Overrides
:param pieType: Pie chart type
pie (Default), donut
:param reduceOptionsCalcs: Reducer function / calculation
:param reduceOptionsFields: Fields that should be included in the panel
:param reduceOptionsValues: Calculate a single value per column or series or show each row
:param tooltipMode: Tooltip mode
single (Default), multi, none
:param unit: units
"""
custom = attr.ib(default={}, validator=instance_of(dict))
colorMode = attr.ib(default='palette-classic', validator=instance_of(str))
legendDisplayMode = attr.ib(default='list', validator=instance_of(str))
legendPlacement = attr.ib(default='bottom', validator=instance_of(str))
legendValues = attr.ib(default=[], validator=instance_of(list))
mappings = attr.ib(default=attr.Factory(list))
overrides = attr.ib(default=[], validator=instance_of(list))
pieType = attr.ib(default='pie', validator=instance_of(str))
reduceOptionsCalcs = attr.ib(default=['lastNotNull'], validator=instance_of(list))
reduceOptionsFields = attr.ib(default='', validator=instance_of(str))
reduceOptionsValues = attr.ib(default=False, validator=instance_of(bool))
tooltipMode = attr.ib(default='single', validator=instance_of(str))
unit = attr.ib(default='', validator=instance_of(str))
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'color': {
'mode': self.colorMode
},
'custom': self.custom,
'mappings': self.mappings,
'unit': self.unit,
},
'overrides': self.overrides,
},
'options': {
'reduceOptions': {
'values': self.reduceOptionsValues,
'calcs': self.reduceOptionsCalcs,
'fields': self.reduceOptionsFields
},
'pieType': self.pieType,
'tooltip': {
'mode': self.tooltipMode
},
'legend': {
'displayMode': self.legendDisplayMode,
'placement': self.legendPlacement,
'values': self.legendValues
},
},
'type': PIE_CHART_V2_TYPE,
}
)
@attr.s
class DashboardList(Panel):
"""Generates Dashboard list panel json structure
Grafana doc on Dashboard list: https://grafana.com/docs/grafana/latest/panels/visualizations/dashboard-list-panel/
:param showHeadings: The chosen list selection (Starred, Recently viewed, Search) is shown as a heading
:param showSearch: Display dashboards by search query or tags.
Requires you to enter at least one value in Query or Tags
:param showRecent: Display recently viewed dashboards in alphabetical order
:param showStarred: Display starred dashboards in alphabetical order
:param maxItems: Sets the maximum number of items to list per section
:param searchQuery: Enter the query you want to search by
:param searchTags: List of tags you want to search by
"""
showHeadings = attr.ib(default=True, validator=instance_of(bool))
showSearch = attr.ib(default=False, validator=instance_of(bool))
showRecent = attr.ib(default=False, validator=instance_of(bool))
showStarred = attr.ib(default=True, validator=instance_of(bool))
maxItems = attr.ib(default=10, validator=instance_of(int))
searchQuery = attr.ib(default='', validator=instance_of(str))
searchTags = attr.ib(default=attr.Factory(list), validator=instance_of(list))
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'custom': {},
},
'overrides': []
},
'headings': self.showHeadings,
'search': self.showSearch,
'recent': self.showRecent,
'starred': self.showStarred,
'limit': self.maxItems,
'query': self.searchQuery,
'tags': self.searchTags,
'type': DASHBOARDLIST_TYPE,
}
)
@attr.s
class Logs(Panel):
"""Generates Logs panel json structure
Grafana doc on Logs panel: https://grafana.com/docs/grafana/latest/panels/visualizations/logs-panel/
:param showLabels: Show or hide the unique labels column, which shows only non-common labels
:param showCommonLabels: Show or hide the common labels.
:param showTime: Show or hide the log timestamp column
:param wrapLogMessages: Toggle line wrapping
:param sortOrder: Display results in 'Descending' or 'Ascending' time order. The default is Descending,
showing the newest logs first.
:param dedupStrategy: One of none, exact, numbers, signature. Default is none
:param enableLogDetails: Set this to True to see the log details view for each log row.
:param prettifyLogMessage: Set this to true to pretty print all JSON logs. This setting does not affect logs in any format other than JSON.
"""
showLabels = attr.ib(default=False, validator=instance_of(bool))
showCommonLabels = attr.ib(default=False, validator=instance_of(bool))
showTime = attr.ib(default=False, validator=instance_of(bool))
wrapLogMessage = attr.ib(default=False, validator=instance_of(bool))
sortOrder = attr.ib(default='Descending', validator=instance_of(str))
dedupStrategy = attr.ib(default='none', validator=instance_of(str))
enableLogDetails = attr.ib(default=False, validator=instance_of(bool))
prettifyLogMessage = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'custom': {},
},
'overrides': []
},
'options': {
'showLabels': self.showLabels,
'showCommonLabels': self.showCommonLabels,
'showTime': self.showTime,
'wrapLogMessage': self.wrapLogMessage,
'sortOrder': self.sortOrder,
'dedupStrategy': self.dedupStrategy,
'enableLogDetails': self.enableLogDetails,
'prettifyLogMessage': self.prettifyLogMessage
},
'type': LOGS_TYPE,
}
)
@attr.s
class Threshold(object):
"""Threshold for for panels
(https://grafana.com/docs/grafana/latest/panels/thresholds/)
:param color: Color of threshold
:param index: Index of color in panel
:param line: Display Threshold line, defaults to True
:param value: When to use this color will be null if index is 0
:param op: EVAL_LT for less than or EVAL_GT for greater than to indicate what the threshold applies to.
:param yaxis: Choose left or right for panels
Care must be taken in the order in which the Threshold objects are specified,
Grafana expects the value to increase.
Example::
thresholds = [
Threshold('green', 0, 0.0),
Threshold('red', 1, 80.0)]
"""
color = attr.ib()
index = attr.ib(validator=instance_of(int))
value = attr.ib(validator=instance_of(float))
line = attr.ib(default=True, validator=instance_of(bool))
op = attr.ib(default=EVAL_GT)
yaxis = attr.ib(default='left')
def to_json_data(self):
return {
'op': self.op,
'yaxis': self.yaxis,
'color': self.color,
'line': self.line,
'index': self.index,
'value': 'null' if self.index == 0 else self.value,
}
@attr.s
class GraphThreshold(object):
"""Threshold for for Graph panel
(https://grafana.com/docs/grafana/latest/panels/thresholds/)
:param colorMode: Color mode of the threshold, value can be `ok`, `warning`, `critical` or `custom`.
If `custom` is selcted a lineColor and fillColor should be provided
:param fill: Display threshold fill, defaults to True
:param line: Display threshold line, defaults to True
:param value: When to use this color will be null if index is 0
:param op: EVAL_LT for less than or EVAL_GT for greater than to indicate what the threshold applies to.
:param yaxis: Choose left or right for Graph panels
:param fillColor: Fill color of the threshold, when colorMode = "custom"
:param lineColor: Line color of the threshold, when colorMode = "custom"
Example:
thresholds = [
GraphThreshold(colorMode="ok", value=10.0),
GraphThreshold(colorMode="critical", value=90.0)
]
"""
value = attr.ib(validator=instance_of(float))
colorMode = attr.ib(default="critical")
fill = attr.ib(default=True, validator=instance_of(bool))
line = attr.ib(default=True, validator=instance_of(bool))
op = attr.ib(default=EVAL_GT)
yaxis = attr.ib(default='left')
fillColor = attr.ib(default=RED)
lineColor = attr.ib(default=RED)
def to_json_data(self):
data = {
'value': self.value,
'colorMode': self.colorMode,
'fill': self.fill,
'line': self.line,
'op': self.op,
'yaxis': self.yaxis,
}
if self.colorMode == "custom":
data['fillColor'] = self.fillColor
data['lineColor'] = self.lineColor
return data
@attr.s
class SeriesOverride(object):
alias = attr.ib()
bars = attr.ib(default=False)
lines = attr.ib(default=True)
yaxis = attr.ib(default=1)
color = attr.ib(default=None)
def to_json_data(self):
return {
'alias': self.alias,
'bars': self.bars,
'lines': self.lines,
'yaxis': self.yaxis,
'color': self.color,
}
WORLDMAP_CENTER = ['(0°, 0°)', 'North America', 'Europe', 'West Asia', 'SE Asia', 'Last GeoHash', 'custom']
WORLDMAP_LOCATION_DATA = ['countries', 'countries_3letter', 'states', 'probes', 'geohash', 'json_endpoint', 'jsonp endpoint', 'json result', 'table']
@attr.s
class Worldmap(Panel):
"""Generates Worldmap panel json structure
Grafana doc on Worldmap: https://grafana.com/grafana/plugins/grafana-worldmap-panel/
:param aggregation: metric aggregation: min, max, avg, current, total
:param circleMaxSize: Maximum map circle size
:param circleMinSize: Minimum map circle size
:param decimals: Number of decimals to show
:param geoPoint: Name of the geo_point/geohash column. This is used to calculate where the circle should be drawn.
:param locationData: Format of the location data, options in `WORLDMAP_LOCATION_DATA`
:param locationName: Name of the Location Name column. Used to label each circle on the map. If it is empty then the geohash value is used.
:param metric: Name of the metric column. This is used to give the circle a value - this determines how large the circle is.
:param mapCenter: Where to centre the map, default center (0°, 0°). Options: North America, Europe, West Asia, SE Asia, Last GeoHash, custom
:param mapCenterLatitude: If mapCenter=custom set the initial map latitude
:param mapCenterLongitude: If mapCenter=custom set the initial map longitude
:param hideEmpty: Hide series with only nulls
:param hideZero: Hide series with only zeros
:param initialZoom: Initial map zoom
:param jsonUrl: URL for JSON location data if `json_endpoint` or `jsonp endpoint` used
:param jsonpCallback: Callback if `jsonp endpoint` used
:param mouseWheelZoom: Zoom map on scroll of mouse wheel
:param stickyLabels: Sticky map labels
:param thresholds: String of thresholds eg. '0,10,20'
:param thresholdsColors: List of colors to be used in each threshold
:param unitPlural: Units plural
:param unitSingle: Units single
:param unitSingular: Units singular
"""
circleMaxSize = attr.ib(default=30, validator=instance_of(int))
circleMinSize = attr.ib(default=2, validator=instance_of(int))
decimals = attr.ib(default=0, validator=instance_of(int))
geoPoint = attr.ib(default='geohash', validator=instance_of(str))
locationData = attr.ib(default='countries', validator=attr.validators.in_(WORLDMAP_LOCATION_DATA))
locationName = attr.ib(default='')
hideEmpty = attr.ib(default=False, validator=instance_of(bool))
hideZero = attr.ib(default=False, validator=instance_of(bool))
initialZoom = attr.ib(default=1, validator=instance_of(int))
jsonUrl = attr.ib(default='', validator=instance_of(str))
jsonpCallback = attr.ib(default='', validator=instance_of(str))
mapCenter = attr.ib(default='(0°, 0°)', validator=attr.validators.in_(WORLDMAP_CENTER))
mapCenterLatitude = attr.ib(default=0, validator=instance_of(int))
mapCenterLongitude = attr.ib(default=0, validator=instance_of(int))
metric = attr.ib(default='Value')
mouseWheelZoom = attr.ib(default=False, validator=instance_of(bool))
stickyLabels = attr.ib(default=False, validator=instance_of(bool))
thresholds = attr.ib(default='0,100,150', validator=instance_of(str))
thresholdColors = attr.ib(default=["#73BF69", "#73BF69", "#FADE2A", "#C4162A"], validator=instance_of(list))
unitPlural = attr.ib(default='', validator=instance_of(str))
unitSingle = attr.ib(default='', validator=instance_of(str))
unitSingular = attr.ib(default='', validator=instance_of(str))
aggregation = attr.ib(default='total', validator=instance_of(str))
def to_json_data(self):
return self.panel_json(
{
'circleMaxSize': self.circleMaxSize,
'circleMinSize': self.circleMinSize,
'colors': self.thresholdColors,
'decimals': self.decimals,
'esGeoPoint': self.geoPoint,
'esMetric': self.metric,
'locationData': self.locationData,
'esLocationName': self.locationName,
'hideEmpty': self.hideEmpty,
'hideZero': self.hideZero,
'initialZoom': self.initialZoom,
'jsonUrl': self.jsonUrl,
'jsonpCallback': self.jsonpCallback,
'mapCenter': self.mapCenter,
'mapCenterLatitude': self.mapCenterLatitude,
'mapCenterLongitude': self.mapCenterLongitude,
'mouseWheelZoom': self.mouseWheelZoom,
'stickyLabels': self.stickyLabels,
'thresholds': self.thresholds,
'unitPlural': self.unitPlural,
'unitSingle': self.unitSingle,
'unitSingular': self.unitSingular,
'valueName': self.aggregation,
'tableQueryOptions': {
'queryType': 'geohash',
'geohashField': 'geohash',
'latitudeField': 'latitude',
'longitudeField': 'longitude',
'metricField': 'metric'
},
'type': WORLD_MAP_TYPE
}
)
@attr.s
class StateTimeline(Panel):
"""Generates State Timeline panel json structure
Grafana docs on State Timeline panel: https://grafana.com/docs/grafana/latest/visualizations/state-timeline/
:param alignValue: Controls value alignment inside state regions, default left
:param colorMode: Default thresholds
:param fillOpacity: Controls the opacity of state regions, default 0.9
:param legendDisplayMode: refine how the legend appears, list, table or hidden
:param legendPlacement: bottom or top
:param lineWidth: Controls line width of state regions
:param mappings: To assign colors to boolean or string values, use Value mappings
:param mergeValues: Controls whether Grafana merges identical values if they are next to each other, default True
:param rowHeight: Controls how much space between rows there are. 1 = no space = 0.5 = 50% space
:param showValue: Controls whether values are rendered inside the state regions. Auto will render values if there is sufficient space.
:param tooltipMode: Default single
:param thresholds: Thresholds are used to turn the time series into discrete colored state regions
"""
alignValue = attr.ib(default='left', validator=instance_of(str))
colorMode = attr.ib(default='thresholds', validator=instance_of(str))
fillOpacity = attr.ib(default=70, validator=instance_of(int))
legendDisplayMode = attr.ib(default='list', validator=instance_of(str))
legendPlacement = attr.ib(default='bottom', validator=instance_of(str))
lineWidth = attr.ib(default=0, validator=instance_of(int))
mappings = attr.ib(default=attr.Factory(list))
mergeValues = attr.ib(default=True, validator=instance_of(bool))
rowHeight = attr.ib(default=0.9, validator=instance_of(float))
showValue = attr.ib(default='auto', validator=instance_of(str))
tooltipMode = attr.ib(default='single', validator=instance_of(str))
thresholds = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return self.panel_json(
{
'fieldConfig': {
'defaults': {
'custom': {
'lineWidth': self.lineWidth,
'fillOpacity': self.fillOpacity
},
'color': {
'mode': self.colorMode
},
'thresholds': {
'mode': ABSOLUTE_TYPE,
'steps': self.thresholds,
},
'mappings': self.mappings
},
'overrides': []
},
'options': {
'mergeValues': self.mergeValues,
'showValue': self.showValue,
'alignValue': self.alignValue,
'rowHeight': self.rowHeight,
'legend': {
'displayMode': self.legendDisplayMode,
'placement': self.legendPlacement
},
'tooltip': {
'mode': self.tooltipMode
}
},
'type': STATE_TIMELINE_TYPE,
}
)
@attr.s
class News(Panel):
"""Generates News panel json structure
Grafana docs on State Timeline panel: https://grafana.com/docs/grafana/next/visualizations/news-panel/
:param feedUrl: URL to query, only RSS feed formats are supported (not Atom).
:param showImage: Controls if the news item social (og:image) image is shown above text content
:param useProxy: If the feed is unable to connect, consider a CORS proxy
"""
feedUrl = attr.ib(default='', validator=instance_of(str))
showImage = attr.ib(default=True, validator=instance_of(bool))
useProxy = attr.ib(default=False, validator=instance_of(bool))
def to_json_data(self):
return self.panel_json(
{
'options': {
'feedUrl': self.feedUrl,
'showImage': self.showImage,
'useProxy': self.useProxy
},
'type': NEWS_TYPE,
}
)
|
py | b4100631b3b7772915ebe05dec4fbfffe6ddaa68 | import os
from django.core.management.base import BaseCommand
from corehq.apps.app_manager.models import Application
from corehq.apps.commtrack.util import unicode_slug
class Command(BaseCommand):
help = """
Downloads an app's forms in a more convenient directory structure for working with offline.
See also: upload_app_forms
"""
def add_arguments(self, parser):
parser.add_argument('app_id')
parser.add_argument('path')
def handle(self, app_id, path, **options):
# setup directory
if not os.path.exists(path):
os.mkdir(path)
app = Application.get(app_id)
for module_index, module in enumerate(app.get_modules()):
module_dir_name = '{index} - {name}'.format(index=module_index, name=unicode_slug(module.default_name()))
module_dir = os.path.join(path, module_dir_name)
if not os.path.exists(module_dir):
os.mkdir(module_dir)
for form_index, form in enumerate(module.get_forms()):
form_name = ('{index} - {name}.xml'.format(index=form_index, name=unicode_slug(form.default_name())))
form_path = os.path.join(module_dir, form_name)
with open(form_path, 'wb') as f:
f.write(form.source.encode('utf-8'))
print('wrote {}'.format(form_path))
|
py | b41007071f54c9c1a21ebc5c4e7ac7997c49f4a9 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command-line interface to inspect and execute a graph in a SavedModel.
For detailed usages and examples, please refer to:
https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel
"""
import argparse
import ast
import os
import re
import sys
from absl import app # pylint: disable=unused-import
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import meta_graph as meta_graph_lib
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.tools import saved_model_aot_compile
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.tpu import tpu
from tensorflow.python.util.compat import collections_abc
_XLA_DEBUG_OPTIONS_URL = (
'https://github.com/tensorflow/tensorflow/blob/master/'
'tensorflow/compiler/xla/debug_options_flags.cc')
# Set of ops to denylist.
_OP_DENYLIST = set(['WriteFile', 'ReadFile', 'PrintV2'])
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print('%r' % ', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
Raises:
ValueError if `signature_def_key` is not found in the MetaGraphDef.
"""
if signature_def_key not in meta_graph_def.signature_def:
raise ValueError(
f'Could not find signature "{signature_def_key}". Please choose from: '
f'{", ".join(meta_graph_def.signature_def.keys())}')
return meta_graph_def.signature_def[signature_def_key].inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return meta_graph_def.signature_def[signature_def_key].outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
indent: How far (in increments of 2 spaces) to indent each line of output.
"""
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
in_print(' inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor, indent+1)
in_print('The given SavedModel SignatureDef contains the following '
'output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
in_print(' outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor, indent+1)
in_print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _show_defined_functions(saved_model_dir):
"""Prints the callable concrete and polymorphic functions of the Saved Model.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
meta_graphs = saved_model_utils.read_saved_model(saved_model_dir).meta_graphs
has_object_graph_def = False
for meta_graph_def in meta_graphs:
has_object_graph_def |= meta_graph_def.HasField('object_graph_def')
if not has_object_graph_def:
return
with ops_lib.Graph().as_default():
trackable_object = load.load(saved_model_dir)
print('\nConcrete Functions:', end='')
children = list(
save._AugmentedGraphView(trackable_object) # pylint: disable=protected-access
.list_children(trackable_object))
children = sorted(children, key=lambda x: x.name)
for name, child in children:
concrete_functions = []
if isinstance(child, defun.ConcreteFunction):
concrete_functions.append(child)
elif isinstance(child, def_function.Function):
concrete_functions.extend(
child._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
else:
continue
print('\n Function Name: \'%s\'' % name)
concrete_functions = sorted(concrete_functions, key=lambda x: x.name)
for index, concrete_function in enumerate(concrete_functions, 1):
args, kwargs = None, None
if concrete_function.structured_input_signature:
args, kwargs = concrete_function.structured_input_signature
elif concrete_function._arg_keywords: # pylint: disable=protected-access
# For pure ConcreteFunctions we might have nothing better than
# _arg_keywords.
args = concrete_function._arg_keywords # pylint: disable=protected-access
if args:
print(' Option #%d' % index)
print(' Callable with:')
_print_args(args, indent=4)
if kwargs:
_print_args(kwargs, 'Named Argument', indent=4)
def _print_args(arguments, argument_type='Argument', indent=0):
"""Formats and prints the argument of the concrete functions defined in the model.
Args:
arguments: Arguments to format print.
argument_type: Type of arguments.
indent: How far (in increments of 2 spaces) to indent each line of
output.
"""
indent_str = ' ' * indent
def _maybe_add_quotes(value):
is_quotes = '\'' * isinstance(value, str)
return is_quotes + str(value) + is_quotes
def in_print(s, end='\n'):
print(indent_str + s, end=end)
for index, element in enumerate(arguments, 1):
if indent == 4:
in_print('%s #%d' % (argument_type, index))
if isinstance(element, six.string_types):
in_print(' %s' % element)
elif isinstance(element, tensor_spec.TensorSpec):
print((indent + 1) * ' ' + '%s: %s' % (element.name, repr(element)))
elif (isinstance(element, collections_abc.Iterable) and
not isinstance(element, dict)):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: [', end='')
for value in element:
print('%s' % _maybe_add_quotes(value), end=', ')
print('\b\b]')
elif isinstance(element, dict):
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: {', end='')
for (key, value) in element.items():
print('\'%s\': %s' % (str(key), _maybe_add_quotes(value)), end=', ')
print('\b\b}')
else:
in_print(' DType: %s' % type(element).__name__)
in_print(' Value: %s' % str(element))
def _print_tensor_info(tensor_info, indent=0):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
indent: How far (in increments of 2 spaces) to indent each line output
"""
indent_str = ' ' * indent
def in_print(s):
print(indent_str + s)
in_print(' dtype: ' +
{value: key
for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
in_print(' shape: ' + shape)
in_print(' name: ' + tensor_info.name)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
print("\nMetaGraphDef with tag-set: '%s' "
"contains the following SignatureDefs:" % ', '.join(tag_set))
tag_set = ','.join(tag_set)
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key,
indent=1)
_show_defined_functions(saved_model_dir)
def get_meta_graph_def(saved_model_dir, tag_set):
"""DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.
Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given
tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def scan_meta_graph_def(meta_graph_def):
"""Scans meta_graph_def and reports if there are ops on denylist.
Print ops if they are on black list, or print success if no denylisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
"""
all_ops_set = set(
meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
denylisted_ops = _OP_DENYLIST & all_ops_set
if denylisted_ops:
# TODO(yifeif): print more warnings
print(
'MetaGraph with tag set %s contains the following denylisted ops:' %
meta_graph_def.meta_info_def.tags, denylisted_ops)
else:
print('MetaGraph with tag set %s does not contain denylisted ops.' %
meta_graph_def.meta_info_def.tags)
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag, worker=None, init_tpu=False,
tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
worker: If provided, the session will be run on the worker. Valid worker
specification is a bns or gRPC path.
init_tpu: If true, the TPU system will be initialized after the session
is created.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
ValueError: When any of the input tensor keys is not valid.
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,
tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Check if input tensor keys are valid.
for input_key_name in input_tensor_key_feed_dict.keys():
if input_key_name not in inputs_tensor_info:
raise ValueError(
'"%s" is not a valid input key. Please choose from %s, or use '
'--show option.' %
(input_key_name, '"' + '", "'.join(inputs_tensor_info.keys()) + '"'))
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(worker, graph=ops_lib.Graph()) as sess:
if init_tpu:
print('Initializing TPU System ...')
# This is needed for freshly started worker, or if the job
# restarts after a preemption.
sess.run(tpu.initialize_system())
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_inputs_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and variable name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Inputs are
separated by semicolons.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
Returns:
A dictionary that maps input keys to a tuple of file name and variable name.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(';')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Format of input=filename[variable_name]'
match = re.match(r'([^=]+)=([^\[\]]+)\[([^\[\]]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), match.group(3)
else:
# Format of input=filename'
match = re.match(r'([^=]+)=([^\[\]]+)$', input_raw)
if match:
input_dict[match.group(1)] = match.group(2), None
else:
raise RuntimeError(
'--inputs "%s" format is incorrect. Please follow'
'"<input_key>=<filename>", or'
'"<input_key>=<filename>[<variable_name>]"' % input_raw)
return input_dict
def preprocess_input_exprs_arg_string(input_exprs_str, safe=True):
"""Parses input arg into dictionary that maps input key to python expression.
Parses input string in the format of 'input_key=<python expression>' into a
dictionary that maps each input_key to its python expression.
Args:
input_exprs_str: A string that specifies python expression for input keys.
Each input is separated by semicolon. For each input key:
'input_key=<python expression>'
safe: Whether to evaluate the python expression as literals or allow
arbitrary calls (e.g. numpy usage).
Returns:
A dictionary that maps input keys to their values.
Raises:
RuntimeError: An error when the given input string is in a bad format.
"""
input_dict = {}
for input_raw in filter(bool, input_exprs_str.split(';')):
if '=' not in input_exprs_str:
raise RuntimeError('--input_exprs "%s" format is incorrect. Please follow'
'"<input_key>=<python expression>"' % input_exprs_str)
input_key, expr = input_raw.split('=', 1)
if safe:
try:
input_dict[input_key] = ast.literal_eval(expr)
except:
raise RuntimeError(
f'Expression "{expr}" is not a valid python literal.')
else:
# ast.literal_eval does not work with numpy expressions
input_dict[input_key] = eval(expr) # pylint: disable=eval-used
return input_dict
def preprocess_input_examples_arg_string(input_examples_str):
"""Parses input into dict that maps input keys to lists of tf.Example.
Parses input string in the format of 'input_key1=[{feature_name:
feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary
that maps each input_key to its list of serialized tf.Example.
Args:
input_examples_str: A string that specifies a list of dictionaries of
feature_names and their feature_lists for each input.
Each input is separated by semicolon. For each input key:
'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'
items in feature_list can be the type of float, int, long or str.
Returns:
A dictionary that maps input keys to lists of serialized tf.Example.
Raises:
ValueError: An error when the given tf.Example is not a list.
"""
input_dict = preprocess_input_exprs_arg_string(input_examples_str)
for input_key, example_list in input_dict.items():
if not isinstance(example_list, list):
raise ValueError(
'tf.Example input must be a list of dictionaries, but "%s" is %s' %
(example_list, type(example_list)))
input_dict[input_key] = [
_create_example_string(example) for example in example_list
]
return input_dict
def _create_example_string(example_dict):
"""Create a serialized tf.example from feature dictionary."""
example = example_pb2.Example()
for feature_name, feature_list in example_dict.items():
if not isinstance(feature_list, list):
raise ValueError('feature value must be a list, but %s: "%s" is %s' %
(feature_name, feature_list, type(feature_list)))
if isinstance(feature_list[0], float):
example.features.feature[feature_name].float_list.value.extend(
feature_list)
elif isinstance(feature_list[0], str):
example.features.feature[feature_name].bytes_list.value.extend(
[f.encode('utf8') for f in feature_list])
elif isinstance(feature_list[0], bytes):
example.features.feature[feature_name].bytes_list.value.extend(
feature_list)
elif isinstance(feature_list[0], six.integer_types):
example.features.feature[feature_name].int64_list.value.extend(
feature_list)
else:
raise ValueError(
'Type %s for value %s is not supported for tf.train.Feature.' %
(type(feature_list[0]), feature_list[0]))
return example.SerializeToString()
def load_inputs_from_input_arg_string(inputs_str, input_exprs_str,
input_examples_str):
"""Parses input arg strings and create inputs feed_dict.
Parses '--inputs' string for inputs to be loaded from file, and parses
'--input_exprs' string for inputs to be evaluated from python expression.
'--input_examples' string for inputs to be created from tf.example feature
dictionary list.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by semicolon.
* For each input key:
'<input_key>=<filename>' or
'<input_key>=<filename>[<variable_name>]'
* The optional 'variable_name' key will be set to None if not specified.
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
variable_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
input_exprs_str: A string that specifies python expressions for inputs.
* In the format of: '<input_key>=<python expression>'.
* numpy module is available as np.
input_examples_str: A string that specifies tf.Example with dictionary.
* In the format of: '<input_key>=<[{feature:value list}]>'
Returns:
A dictionary that maps input tensor keys to numpy ndarrays.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
inputs = preprocess_inputs_arg_string(inputs_str)
input_exprs = preprocess_input_exprs_arg_string(input_exprs_str, safe=False)
input_examples = preprocess_input_examples_arg_string(input_examples_str)
for input_tensor_key, (filename, variable_name) in inputs.items():
data = np.load(file_io.FileIO(filename, mode='rb'), allow_pickle=True) # pylint: disable=unexpected-keyword-arg
# When a variable_name key is specified for the input file
if variable_name:
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
logging.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
# When input is a python expression:
for input_tensor_key, py_expr_evaluated in input_exprs.items():
if input_tensor_key in tensor_key_feed_dict:
logging.warn(
'input_key %s has been specified with both --inputs and --input_exprs'
' options. Value in --input_exprs will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated
# When input is a tf.Example:
for input_tensor_key, example in input_examples.items():
if input_tensor_key in tensor_key_feed_dict:
logging.warn(
'input_key %s has been specified in multiple options. Value in '
'--input_examples will be used.' % input_tensor_key)
tensor_key_feed_dict[input_tensor_key] = example
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signature_def key is
# specified, display all SignatureDef keys, else show input output tensor
# information corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
Raises:
AttributeError: An error when neither --inputs nor --input_exprs is passed
to run command.
"""
if not args.inputs and not args.input_exprs and not args.input_examples:
raise AttributeError(
'At least one of --inputs, --input_exprs or --input_examples must be '
'required')
tensor_key_feed_dict = load_inputs_from_input_arg_string(
args.inputs, args.input_exprs, args.input_examples)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite, worker=args.worker,
init_tpu=args.init_tpu, tf_debug=args.tf_debug)
def scan(args):
"""Function triggered by scan command.
Args:
args: A namespace parsed from command line.
"""
if args.tag_set:
scan_meta_graph_def(
saved_model_utils.get_meta_graph_def(args.dir, args.tag_set))
else:
saved_model = saved_model_utils.read_saved_model(args.dir)
for meta_graph_def in saved_model.meta_graphs:
scan_meta_graph_def(meta_graph_def)
def convert_with_tensorrt(args):
"""Function triggered by 'convert tensorrt' command.
Args:
args: A namespace parsed from command line.
"""
# Import here instead of at top, because this will crash if TensorRT is
# not installed
from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: disable=g-import-not-at-top
if not args.convert_tf1_model:
params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
max_workspace_size_bytes=args.max_workspace_size_bytes,
precision_mode=args.precision_mode,
minimum_segment_size=args.minimum_segment_size)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=args.dir,
input_saved_model_tags=args.tag_set.split(','),
**params._asdict())
try:
converter.convert()
except Exception as e:
raise RuntimeError(
'{}. Try passing "--convert_tf1_model=True".'.format(e))
converter.save(output_saved_model_dir=args.output_dir)
else:
trt.create_inference_graph(
None,
None,
max_batch_size=1,
max_workspace_size_bytes=args.max_workspace_size_bytes,
precision_mode=args.precision_mode,
minimum_segment_size=args.minimum_segment_size,
is_dynamic_op=True,
input_saved_model_dir=args.dir,
input_saved_model_tags=args.tag_set.split(','),
output_saved_model_dir=args.output_dir)
def freeze_model(args):
"""Function triggered by freeze_model command.
Args:
args: A namespace parsed from command line.
"""
checkpoint_path = (
args.checkpoint_path
or os.path.join(args.dir, 'variables/variables'))
if not args.variables_to_feed:
variables_to_feed = []
elif args.variables_to_feed.lower() == 'all':
variables_to_feed = None # We will identify them after.
else:
variables_to_feed = args.variables_to_feed.split(',')
saved_model_aot_compile.freeze_model(
checkpoint_path=checkpoint_path,
meta_graph_def=saved_model_utils.get_meta_graph_def(
args.dir, args.tag_set),
signature_def_key=args.signature_def_key,
variables_to_feed=variables_to_feed,
output_prefix=args.output_prefix)
def aot_compile_cpu(args):
"""Function triggered by aot_compile_cpu command.
Args:
args: A namespace parsed from command line.
"""
checkpoint_path = (
args.checkpoint_path
or os.path.join(args.dir, 'variables/variables'))
if not args.variables_to_feed:
variables_to_feed = []
elif args.variables_to_feed.lower() == 'all':
variables_to_feed = None # We will identify them after.
else:
variables_to_feed = args.variables_to_feed.split(',')
saved_model_aot_compile.aot_compile_cpu_meta_graph_def(
checkpoint_path=checkpoint_path,
meta_graph_def=saved_model_utils.get_meta_graph_def(
args.dir, args.tag_set),
signature_def_key=args.signature_def_key,
variables_to_feed=variables_to_feed,
output_prefix=args.output_prefix,
target_triple=args.target_triple,
target_cpu=args.target_cpu,
cpp_class=args.cpp_class,
multithreading=args.multithreading.lower() not in ('f', 'false', '0'))
def add_show_subparser(subparsers):
"""Add parser for `show`."""
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \';\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
' --signature_def serving_default\n\n'
'To show all available information in the SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all information in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
def add_run_subparser(subparsers):
"""Add parser for `run`."""
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\n'
' --signature_def serving_default \\\n'
' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy '
'\\\n'
' --input_exprs \'input3_key=np.ones(2)\' \\\n'
' --input_examples '
'\'input4_key=[{"id":[26],"weights":[0.5, 0.5]}]\' \\\n'
' --outdir=/out\n\n'
'For more information about input file format, please see:\n'
'https://www.tensorflow.org/guide/saved_model_cli\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('Loading inputs from files, in the format of \'<input_key>=<filename>,'
' or \'<input_key>=<filename>[<variable_name>]\', separated by \';\'.'
' The file format can only be from .npy, .npz or pickle.')
parser_run.add_argument('--inputs', type=str, default='', help=msg)
msg = ('Specifying inputs by python expressions, in the format of'
' "<input_key>=\'<python expression>\'", separated by \';\'. '
'numpy module is available as \'np\'. Please note that the expression '
'will be evaluated as-is, and is susceptible to code injection. '
'When this is set, the value will override duplicate input keys from '
'--inputs option.')
parser_run.add_argument('--input_exprs', type=str, default='', help=msg)
msg = (
'Specifying tf.Example inputs as list of dictionaries. For example: '
'<input_key>=[{feature0:value_list,feature1:value_list}]. Use ";" to '
'separate input keys. Will override duplicate input keys from --inputs '
'and --input_exprs option.')
parser_run.add_argument('--input_examples', type=str, default='', help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.add_argument(
'--worker',
type=str,
default=None,
help='if specified, a Session will be run on the worker. '
'Valid worker specification is a bns or gRPC path.')
parser_run.add_argument(
'--init_tpu',
action='store_true',
default=None,
help='if specified, tpu.initialize_system will be called on the Session. '
'This option should be only used if the worker is a TPU job.')
parser_run.set_defaults(func=run)
def add_scan_subparser(subparsers):
"""Add parser for `scan`."""
scan_msg = ('Usage example:\n'
'To scan for denylisted ops in SavedModel:\n'
'$saved_model_cli scan --dir /tmp/saved_model\n'
'To scan a specific MetaGraph, pass in --tag_set\n')
parser_scan = subparsers.add_parser(
'scan',
description=scan_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_scan.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_scan.add_argument(
'--tag_set',
type=str,
help='tag-set of graph in SavedModel to scan, separated by \',\'')
parser_scan.set_defaults(func=scan)
def add_convert_subparser(subparsers):
"""Add parser for `convert`."""
convert_msg = ('Usage example:\n'
'To convert the SavedModel to one that have TensorRT ops:\n'
'$saved_model_cli convert \\\n'
' --dir /tmp/saved_model \\\n'
' --tag_set serve \\\n'
' --output_dir /tmp/saved_model_trt \\\n'
' tensorrt \n')
parser_convert = subparsers.add_parser(
'convert',
description=convert_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_convert.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to convert')
parser_convert.add_argument(
'--output_dir',
type=str,
required=True,
help='output directory for the converted SavedModel')
parser_convert.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to convert, separated by \',\'')
convert_subparsers = parser_convert.add_subparsers(
title='conversion methods',
description='valid conversion methods',
help='the conversion to run with the SavedModel')
parser_convert_with_tensorrt = convert_subparsers.add_parser(
'tensorrt',
description='Convert the SavedModel with Tensorflow-TensorRT integration',
formatter_class=argparse.RawTextHelpFormatter)
parser_convert_with_tensorrt.add_argument(
'--max_workspace_size_bytes',
type=int,
default=2 << 20,
help=('the maximum GPU temporary memory which the TRT engine can use at '
'execution time'))
parser_convert_with_tensorrt.add_argument(
'--precision_mode',
type=str,
default='FP32',
help='one of FP32, FP16 and INT8')
parser_convert_with_tensorrt.add_argument(
'--minimum_segment_size',
type=int,
default=3,
help=('the minimum number of nodes required for a subgraph to be replaced'
'in a TensorRT node'))
parser_convert_with_tensorrt.add_argument(
'--convert_tf1_model',
type=bool,
default=False,
help='support TRT conversion for TF1 models')
parser_convert_with_tensorrt.set_defaults(func=convert_with_tensorrt)
def _parse_common_freeze_and_aot(parser_compile):
"""Parse arguments shared by freeze model and aot_compile."""
parser_compile.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to convert')
parser_compile.add_argument(
'--output_prefix',
type=str,
required=True,
help=('output directory + filename prefix for the resulting header(s) '
'and object file(s)'))
parser_compile.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to convert, separated by \',\'')
parser_compile.add_argument(
'--signature_def_key',
type=str,
default=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
help=('signature_def key to use. '
'default: DEFAULT_SERVING_SIGNATURE_DEF_KEY'))
parser_compile.add_argument(
'--checkpoint_path',
type=str,
default=None,
help='Custom checkpoint to use (default: use the SavedModel variables)')
parser_compile.add_argument(
'--variables_to_feed',
type=str,
default='',
help=('The names of variables that will be fed into the network. '
'Options are: empty (default; all variables are frozen, none may '
'be fed), \'all\' (all variables may be fed), or a '
'comma-delimited list of names of variables that may be fed. In '
'the last case, the non-fed variables will be frozen in the graph.'
'**NOTE** Any variables passed to `variables_to_feed` *must be set '
'by the user*. These variables will NOT be frozen and their '
'values will be uninitialized in the compiled object '
'(this applies to all input arguments from the signature as '
'well).'))
def add_freeze_model_subparser(subparsers):
"""Add parser for `freeze_model`."""
compile_msg = '\n'.join(
['Usage example:',
'To freeze a SavedModel in preparation for tfcompile:',
'$saved_model_cli freeze_model \\',
' --dir /tmp/saved_model \\',
' --tag_set serve \\',
' --output_prefix /tmp/saved_model_xla_aot',
])
parser_compile = subparsers.add_parser(
'freeze_model',
description=compile_msg,
formatter_class=argparse.RawTextHelpFormatter)
_parse_common_freeze_and_aot(parser_compile)
parser_compile.set_defaults(func=freeze_model)
def add_aot_compile_cpu_subparser(subparsers):
"""Add parser for `aot_compile_cpu`."""
compile_msg = '\n'.join(
['Usage example:',
'To compile a SavedModel signature via (CPU) XLA AOT:',
'$saved_model_cli aot_compile_cpu \\',
' --dir /tmp/saved_model \\',
' --tag_set serve \\',
' --output_dir /tmp/saved_model_xla_aot',
'', '',
'Note: Additional XLA compilation options are available by setting the ',
'XLA_FLAGS environment variable. See the XLA debug options flags for ',
'all the options: ',
' {}'.format(_XLA_DEBUG_OPTIONS_URL),
'',
'For example, to disable XLA fast math when compiling:',
'',
'XLA_FLAGS="--xla_cpu_enable_fast_math=false" $saved_model_cli '
'aot_compile_cpu ...',
'',
'Some possibly useful flags:',
' --xla_cpu_enable_fast_math=false',
' --xla_force_host_platform_device_count=<num threads>',
' (useful in conjunction with disabling multi threading)'
])
parser_compile = subparsers.add_parser(
'aot_compile_cpu',
description=compile_msg,
formatter_class=argparse.RawTextHelpFormatter)
_parse_common_freeze_and_aot(parser_compile)
parser_compile.add_argument(
'--target_triple',
type=str,
default='x86_64-pc-linux',
help=('Target triple for LLVM during AOT compilation. Examples: '
'x86_64-none-darwin, x86_64-apple-ios, arm64-none-ios, '
'armv7-none-android. More examples are available in tfcompile.bzl '
'in the tensorflow codebase.'))
parser_compile.add_argument(
'--target_cpu',
type=str,
default='',
help=('Target cpu name for LLVM during AOT compilation. Examples: '
'x86_64, skylake, haswell, westmere, <empty> (unknown). For '
'a complete list of options, run (for x86 targets): '
'`llc -march=x86 -mcpu=help`'))
parser_compile.add_argument(
'--cpp_class',
type=str,
required=True,
help=('The name of the generated C++ class, wrapping the generated '
'function. The syntax of this flag is '
'[[<optional_namespace>::],...]<class_name>. This mirrors the '
'C++ syntax for referring to a class, where multiple namespaces '
'may precede the class name, separated by double-colons. '
'The class will be generated in the given namespace(s), or if no '
'namespaces are given, within the global namespace.'))
parser_compile.add_argument(
'--multithreading',
type=str,
default='False',
help=('Enable multithreading in the compiled computation. '
'Note that if using this option, the resulting object files '
'may have external dependencies on multithreading libraries '
'like nsync.'))
parser_compile.set_defaults(func=aot_compile_cpu)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
add_show_subparser(subparsers)
# run command
add_run_subparser(subparsers)
# scan command
add_scan_subparser(subparsers)
# tensorrt convert command
add_convert_subparser(subparsers)
# aot_compile_cpu command
add_aot_compile_cpu_subparser(subparsers)
# freeze_model command
add_freeze_model_subparser(subparsers)
return parser
def main():
logging.set_verbosity(logging.INFO)
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.error('too few arguments')
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
py | b410081f89b053462831ae2f600632d3fa121d44 | """Will there be enough space?
You have to write a function that accepts three parameters:
cap is the amount of people the bus can hold excluding the driver,
on is the number of people on the bus,
and wait is the number of people waiting to get on to the bus.
If there is enough space, return 0, and if there isn't,
return the number of passengers he can't take.
#1 Best Practices Solution by vaskinyy, AlexBaier, DCoulter
def enough(cap, on, wait):
return max(0, wait - (cap - on))
"""
def enough(cap, on, wait):
"""determines if there is spave available."""
room_available = cap - on
getting_on = room_available - wait
if getting_on < 0:
return abs(getting_on)
else:
return 0 |
py | b410092c31ba3dfdd1805b7dc41cf416d32d8f53 | # -*- coding: utf-8 -*-
import schedule
import time
import sys
import os
import random
import yaml #->added to make pics upload -> see job8
import glob #->added to make pics upload -> see job8
from tqdm import tqdm
import threading #->added to make multithreadening possible -> see fn run_threaded
sys.path.append(os.path.join(sys.path[0],'../../'))
from instabot import Bot
import config
bot = Bot(comments_file=config.COMMENTS_FILE, blacklist=config.BLACKLIST_FILE, whitelist=config.WHITELIST_FILE)
bot.login()
bot.logger.info("ULTIMATE script. 24hours save")
random_user_file = bot.read_list_from_file(config.USERS_FILE)
random_hashtag_file = bot.read_list_from_file(config.HASHTAGS_FILE)
photo_captions = bot.read_list_from_file(config.PHOTO_CAPTIONS_FILE)
#to get pics and autopost it
posted_pic_list = []
try:
with open(config.POSTED_PICS_FILE, 'r') as f:
posted_pic_list = f.read().splitlines()
except:
posted_pic_list = []
#Get the filenames of the photos in the path ->
pics = [os.path.basename(x) for x in glob.glob(config.PICS_PATH + "/*.jpg")]
pics = sorted(pics)
#Return a random value from a list, used in various jobs below
def get_random(from_list):
_random=random.choice(from_list)
return _random
def stats(): bot.save_user_stats(bot.user_id)
def job1(): bot.like_hashtag(get_random(random_hashtag_file), amount=int(700/24))
def job2(): bot.like_timeline(amount=int(300/24))
def job3(): bot.like_followers(get_random(random_user_file), nlikes=3)
def job4(): bot.follow_followers(get_random(random_user_file), nfollows=config.NUMBER_OF_FOLLOWERS_TO_FOLLOW)
def job5(): bot.comment_medias(bot.get_timeline_medias())
def job6(): bot.unfollow_non_followers(n_to_unfollows=config.NUMBER_OF_NON_FOLLOWERS_TO_UNFOLLOW)
def job7(): bot.follow_users(bot.get_hashtag_users(get_random(random_hashtag_file)))
def job8(): #Comment posts with an hashtag in HASHTAGS_FILE
hashtag = get_random(random_hashtag_file)
bot.logger.info("Commenting on hashtag: " + hashtag)
bot.comment_hashtag(hashtag)
def job9(): #Automatically post a pic in 'pics' folder
try:
for pic in pics:
if pic in posted_pic_list:
continue
caption = get_random(photo_captions)
full_caption = caption + "\n" + config.FOLLOW_MESSAGE
bot.logger.info("Uploading pic with caption: " + caption)
bot.uploadPhoto(config.PICS_PATH + pic, caption=full_caption)
if bot.LastResponse.status_code != 200:
bot.logger.error("Something went wrong, read the following ->\n")
bot.logger.error(bot.LastResponse)
break
if not pic in posted_pic_list:
#After posting a pic, comment it with all the hashtags specified
#In config.PICS_HASHTAGS
posted_pic_list.append(pic)
with open('pics.txt', 'a') as f:
f.write(pic + "\n")
bot.logger.info("Succesfully uploaded: " + pic)
bot.logger.info("Commenting uploaded photo with hashtags...")
medias = bot.get_your_medias()
last_photo = medias[0] #Get the last photo posted
bot.comment(last_photo, config.PICS_HASHTAGS)
break
except Exception as e:
bot.logger.error("Couldn't upload pic")
bot.logger.error(str(e))
def job10(): # put non followers on blacklist
try:
bot.logger.info("Creating non-followers list")
followings = bot.get_user_following(bot.user_id) # getting following
followers = bot.get_user_followers(bot.user_id) # getting followers
friends_file = bot.read_list_from_file("friends.txt") # same whitelist (just user ids)
nonfollowerslist = list((set(followings) - set(followers)) - set(friends_file))
with open(config.BLACKLIST_FILE, 'a') as file: # writing to the blacklist
for user_id in nonfollowerslist:
file.write(str(user_id) + "\n")
bot.logger.info("Removing duplicates...")
lines = open(config.BLACKLIST_FILE, 'r').readlines()
lines_set = set(lines)
out = open(config.BLACKLIST_FILE, 'w')
for line in lines_set:
out.write(line)
bot.logger.info("Done.")
except Exception as e:
bot.logger.error("Couldn't update blacklist")
bot.logger.error(str(e))
# function to make threads -> details here http://bit.ly/faq_schedule
def run_threaded(job_fn):
job_thread=threading.Thread(target=job_fn)
job_thread.start()
schedule.every(1).hour.do(run_threaded, stats) #get stats
schedule.every(8).hours.do(run_threaded, job1) #like hashtag
schedule.every(2).hours.do(run_threaded, job2) #like timeline
schedule.every(1).days.at("16:00").do(run_threaded, job3) #like followers of users from file
schedule.every(2).days.at("11:00").do(run_threaded, job4) #follow followers
schedule.every(16).hours.do(run_threaded, job5) #comment medias
schedule.every(1).days.at("08:00").do(run_threaded, job6) #unfollow non-followers
schedule.every(12).hours.do(run_threaded, job7) #follow users from hashtag from file
schedule.every(6).hours.do(run_threaded, job8) #comment hashtag
schedule.every(1).days.at("21:28").do(run_threaded, job9) #upload pics
schedule.every(4).days.at("07:50").do(run_threaded, job10) #non-followers blacklist
while True:
schedule.run_pending()
time.sleep(1)
|
py | b4100937e696d75b76989e7c3d1eb76b0cd51467 | """
MatheX, le 07 juin 2020
Série d'introduction à Pygame
Prérequis:
- les bases de Python
- les listes et les fonctions
#1 premier épisode de la série:
- fenêtre principale du jeu
- horloge du jeu
- le joueur (graphisme simplifié)
- mouvement du joueur (mouvement simple)
- collision avec un méchant (graphisme simplifié)
"""
# import des bibliothèques
import pygame
import random
# constantes
BLUE = (0,0,255) # couleur (R,G,B)
RED = (255,0,0) # couleur (R,G,B)
GREEN = (0,255,0) # couleur (R,G,B)
BLACK = (0,0,0) # couleur (R,G,B)
WHITE = (255,255,255) # couleur (R,G,B)
pas = 20 # vitesse du joueur (nombre de pixels par mouvement)
# initialisation de pygame
pygame.init()
# fenêtre principle du jeu
window = pygame.display.set_mode((500, 500), pygame.RESIZABLE)
# joueur
player = pygame.draw.rect(window,BLUE,(100,100,20,20))
# ennemi
enemy = pygame.draw.rect(window,RED,(300,300,20,20))
# horloge du jeu
clock = pygame.time.Clock()
run = True # mettre à False pour quitter le jeu
# boucle principale du jeu
while run:
clock.tick(10) #20 images/sec max
# pour quitter la fenêtre principale
events = pygame.event.get() # récupère tous les évènements
for event in events:
if event.type == pygame.QUIT: # vérifie si on veut quitter
run = False
# mouvement du joueur
keys = pygame.key.get_pressed() # récupère toutes les touches pressées
if keys[pygame.K_LEFT]:
player.move_ip(-pas,0)
elif keys[pygame.K_RIGHT]:
player.move_ip(pas,0)
elif keys[pygame.K_DOWN]:
player.move_ip(0,pas) # l'axe y est dirigé vers le bas
elif keys[pygame.K_UP]:
player.move_ip(0,-pas)
# détection de collision entre le joueur et l'ennemi
if player.colliderect(enemy):
enemy.x = random.randint(0,window.get_width()-enemy.width)
enemy.y = random.randint(0,window.get_height()-enemy.height)
# mis à jour de l'affichage du fond de la fenêtre principale
window.fill(BLACK)
# desssine le joueur à sa position actuelle
pygame.draw.rect(window, BLUE, player)
# desssine l'ennemi à sa position actuelle
pygame.draw.rect(window, RED, enemy)
# mis à jour de l'affichage global
pygame.display.update()
#quitte pygame
pygame.quit()
|
py | b4100954565b442be329e5a76fe3e9a6db342621 | from .parser import parse, partial_parse
from wisepy.talking import Talking
from subprocess import check_output
talking = Talking()
@talking
def fix(fu=None, tu=None, fe=None, te=None):
"""
log: git log
fu: from user/author name to change.
tu: to user/name to be changed to.
fe: from email/email to change.
te: to email/email to be changed to.
"""
def eq(a, b):
return b is None or a == b
log = check_output('git log').decode()
branch = check_output('git branch').decode().split()[-1]
for commit, author, email in parse(log).result:
if eq(author, fu) and eq(email, fe):
print('processing', commit)
author = tu or author
email = te or email
check_output(f'git checkout {commit}')
check_output(
f'git commit --amend --author="{author} <{email}>" --no-edit')
log = check_output('git log').decode().strip()
res = partial_parse(log)
(new_commit, author_, email_) = res.result
assert author == author_ and email_ == email
check_output(f'git checkout {branch}')
check_output(f'git replace {commit} {new_commit}')
print(f'transformed to new commit: {new_commit}')
check_output('git filter-branch -f -- --all')
def main():
talking.on()
|
py | b4100acc52be748826777f02a02a8204dd6085a0 | produto = float(input('Infome o valor do produto (R$): '))
opcao = int(input('''Escolha uma forma de pagamento:
[ 1 ] À VISTA -------------------------10% de desconto
[ 2 ] À VISTA NO CARTÃO ---------------- 5% de desconto
[ 3 ] ATÉ 3x NO CARTÃO S/JUROS -------- 0% de desconto
[ 4 ] ACIMA 3x NO CARTÃO C/ JUROS ----- 20%
'''))
if opcao == 1:
desc = produto - (produto * 10 / 100)
print('Sua compra de {:.2f}R$ com desconto de 10% sairá {:.2f}R$'.format(produto, desc))
elif opcao == 2:
desc = produto - (produto * 5 / 100)
print('Sua compra de {:.2f} com desconto de 5% sairá {:.2f}R$'.format(produto, desc))
elif opcao == 3:
qtdpar = int(input('Quantas parcelas? '))
desc = produto
parcela = produto / qtdpar
if qtdpar <= 3:
print('Sua compra de {:.2f}R$ será divida em {}x de {:.2f}R$'.format(desc, qtdpar, parcela))
print('O valor final da sua compra será de {}R$'.format(produto))
else:
print('Opção Inválida')
elif opcao == 4:
juros = produto + (produto * 20 / 100)
qtdpar = int(input('Quantas parcelas? '))
parcela = juros / qtdpar
if qtdpar > 3 and qtdpar <= 10:
print('Sua compra de {:.2f} será divida em {}x de {:.2f}R$'.format(produto, qtdpar, parcela))
print('O valor final a pagar será de {:.2f}R$'.format(juros))
else:
print('Não dividimos mais do que isso!!!')
else:
print('Opção inválida') |
py | b4100b56c67722e0e8db581e473f4714691cb518 | import pytest
from click.testing import CliRunner
from bocadillo_cli.main import cli
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture(name="cli")
def fixture_cli():
return cli
|
py | b4100d259c65f3b01fc0bfdb85e0e8eca4eb71c6 | def _intable(num):
try:
return True if int(float(format(num).replace(' ',''))) == float(format(num).replace(' ','')) else False
except Exception:
return False
def _numable(num):
if not intable(num):
try:
float(format(num).replace(' ', ''))
except Exception:
return False
return True
def _alnumity(num):
from .miscs import alnum
al = alnum(num)
return True if isinstance(alnum(num), (int, float)) else False
def intable_(num):
try:
return True if int(float(num)) == float(num) else False
except Exception:
return False
def numable(*num):
return all([_numable(nums) for nums in num])
def intable(*num):
return all([_intable(nums) for nums in num])
def alnumity(*num):
return all([_alnumity(nums) for nums in num])
def counterable(num):
return num.__str__().isalpha()
def roundnumable(nums):
return True if round(float(nums), 10) == int(float(nums)) + 1 else False
|
py | b4100df60d0d240927b4c5ed5de49caef45a419e | # Generated by Django 4.0.5 on 2022-06-17 12:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0098_alter_producttranslation_search_terms'),
]
operations = [
migrations.DeleteModel(
name='ProductTranslation',
),
]
|
py | b4100df61e521033f468e148cce82e9af7097989 | #!/usr/bin/env python
import os
import shutil
import sys
MARS_OPEN_LIBRARIES_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "../../../mars/libraries")
sys.path.append(MARS_OPEN_LIBRARIES_PATH)
from build_apple import *
from mars_utils import *
SAMPLE_LIBS_DIRECTORY = "samplexloglibs"
proj = APPLE_PROJECTS[3]
SAMPLE_LIBS_PATH = RELATIVE_PATH + "libraries/" + SAMPLE_LIBS_DIRECTORY + "/" + os.path.splitext(os.path.split(proj.path)[1])[0] + proj.other_cflags + "/" + proj.framework_name
DES_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "MarsLib/" + proj.framework_name)
def main():
if not check_python_version():
exit("python env error")
if build_apple(proj, SAMPLE_LIBS_DIRECTORY):
pass
else:
exit("build mars fail!")
if os.path.exists(DES_PATH):
shutil.rmtree(DES_PATH)
shutil.copytree(SAMPLE_LIBS_PATH, DES_PATH)
if __name__ == "__main__":
main()
|
py | b4100e749cc6785bca1228fb04c0c8b3814b522c | import logging
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../../FedML")))
try:
from fedml_core.distributed.client.client_manager import ClientManager
from fedml_core.distributed.communication.message import Message
except ImportError:
from FedML.fedml_core.distributed.client.client_manager import ClientManager
from FedML.fedml_core.distributed.communication.message import Message
from .message_define import MyMessage
from .utils import transform_list_to_tensor, post_complete_message_to_sweep_process
class FedAVGClientManager(ClientManager):
def __init__(self, args, trainer, comm=None, rank=0, size=0, backend="MPI"):
super().__init__(args, comm, rank, size, backend)
self.trainer = trainer
self.num_rounds = args.comm_round
self.round_idx = 0
def run(self):
super().run()
def register_message_receive_handlers(self):
self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_INIT_CONFIG,
self.handle_message_init)
self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,
self.handle_message_receive_model_from_server)
def handle_message_init(self, msg_params):
global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
if self.args.is_mobile == 1:
global_model_params = transform_list_to_tensor(global_model_params)
self.trainer.update_model(global_model_params)
self.trainer.update_dataset(int(client_index))
self.round_idx = 0
self.__train()
def start_training(self):
self.round_idx = 0
self.__train()
def handle_message_receive_model_from_server(self, msg_params):
logging.info("handle_message_receive_model_from_server.")
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
if self.args.is_mobile == 1:
model_params = transform_list_to_tensor(model_params)
self.trainer.update_model(model_params)
self.trainer.update_dataset(int(client_index))
self.round_idx += 1
self.__train()
# if self.round_idx == self.num_rounds - 1:
# post_complete_message_to_sweep_process(self.args)
# self.finish()
def send_model_to_server(self, receive_id, weights, local_sample_num):
message = Message(MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER, self.get_sender_id(), receive_id)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, weights)
message.add_params(MyMessage.MSG_ARG_KEY_NUM_SAMPLES, local_sample_num)
self.send_message(message)
def __train(self):
logging.info("#######training########### round_id = %d" % self.round_idx)
weights, local_sample_num = self.trainer.train(self.round_idx)
self.send_model_to_server(0, weights, local_sample_num)
|
py | b4100ff5c6bca4fb725259747bda71f56ecad488 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects import *
from common import FileSystemConfig
from topologies.BaseTopology import SimpleTopology
# Creates a Mesh topology with 4 directories, one at each corner.
# One L1 (and L2, depending on the protocol) are connected to each router.
# XY routing is enforced (using link weights) to guarantee deadlock freedom.
class MeshDirCorners_XY(SimpleTopology):
description='MeshDirCorners_XY'
def __init__(self, controllers):
self.nodes = controllers
def makeTopology(self, options, network, IntLink, ExtLink, Router):
nodes = self.nodes
num_routers = options.num_cpus
num_rows = options.mesh_rows
# default values for link latency and router latency.
# Can be over-ridden on a per link/router basis
link_latency = options.link_latency # used by simple and garnet
router_latency = options.router_latency # only used by garnet
# First determine which nodes are cache cntrls vs. dirs vs. dma
cache_nodes = []
dir_nodes = []
dma_nodes = []
for node in nodes:
if node.type == 'L1Cache_Controller' or \
node.type == 'L2Cache_Controller':
cache_nodes.append(node)
elif node.type == 'Directory_Controller':
dir_nodes.append(node)
elif node.type == 'DMA_Controller':
dma_nodes.append(node)
# Obviously the number or rows must be <= the number of routers
# and evenly divisible. Also the number of caches must be a
# multiple of the number of routers and the number of directories
# must be four.
assert(num_rows > 0 and num_rows <= num_routers)
num_columns = int(num_routers / num_rows)
assert(num_columns * num_rows == num_routers)
caches_per_router, remainder = divmod(len(cache_nodes), num_routers)
assert(remainder == 0)
assert(len(dir_nodes) == 4)
# Create the routers in the mesh
routers = [Router(router_id=i, latency = router_latency) \
for i in range(num_routers)]
network.routers = routers
# link counter to set unique link ids
link_count = 0
# Connect each cache controller to the appropriate router
ext_links = []
for (i, n) in enumerate(cache_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < caches_per_router)
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id],
latency = link_latency))
link_count += 1
# NUMA Node for each quadrant
# With odd columns or rows, the nodes will be unequal
numa_nodes = [ [], [], [], []]
for i in range(num_routers):
if i % num_columns < num_columns / 2 and \
i < num_routers / 2:
numa_nodes[0].append(i)
elif i % num_columns >= num_columns / 2 and \
i < num_routers / 2:
numa_nodes[1].append(i)
elif i % num_columns < num_columns / 2 and \
i >= num_routers / 2:
numa_nodes[2].append(i)
else:
numa_nodes[3].append(i)
num_numa_nodes = 0
for n in numa_nodes:
if n:
num_numa_nodes += 1
# Connect the dir nodes to the corners.
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[0],
int_node=routers[0],
latency = link_latency))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[1],
int_node=routers[num_columns - 1],
latency = link_latency))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[2],
int_node=routers[num_routers - num_columns],
latency = link_latency))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[3],
int_node=routers[num_routers - 1],
latency = link_latency))
link_count += 1
# Connect the dma nodes to router 0. These should only be DMA nodes.
for (i, node) in enumerate(dma_nodes):
assert(node.type == 'DMA_Controller')
ext_links.append(ExtLink(link_id=link_count, ext_node=node,
int_node=routers[0],
latency = link_latency))
network.ext_links = ext_links
# Create the mesh links.
int_links = []
# East output to West input links (weight = 1)
for row in range(num_rows):
for col in range(num_columns):
if (col + 1 < num_columns):
east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[east_out],
dst_node=routers[west_in],
src_outport="East",
dst_inport="West",
latency = link_latency,
weight=1))
link_count += 1
# West output to East input links (weight = 1)
for row in range(num_rows):
for col in range(num_columns):
if (col + 1 < num_columns):
east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[west_out],
dst_node=routers[east_in],
src_outport="West",
dst_inport="East",
latency = link_latency,
weight=1))
link_count += 1
# North output to South input links (weight = 2)
for col in range(num_columns):
for row in range(num_rows):
if (row + 1 < num_rows):
north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[north_out],
dst_node=routers[south_in],
src_outport="North",
dst_inport="South",
latency = link_latency,
weight=2))
link_count += 1
# South output to North input links (weight = 2)
for col in range(num_columns):
for row in range(num_rows):
if (row + 1 < num_rows):
north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
src_node=routers[south_out],
dst_node=routers[north_in],
src_outport="South",
dst_inport="North",
latency = link_latency,
weight=2))
link_count += 1
network.int_links = int_links
# Register nodes with filesystem
def registerTopology(self, options):
i = 0
for n in numa_nodes:
if n:
FileSystemConfig.register_node(n,
MemorySize(options.mem_size) // num_numa_nodes, i)
i += 1
|
py | b41010220b1283684234dcaa4cb9872f6dec2b8b | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;dynamixel_sdk".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldynamixel_workbench_toolbox".split(';') if "-ldynamixel_workbench_toolbox" != "" else []
PROJECT_NAME = "dynamixel_workbench_toolbox"
PROJECT_SPACE_DIR = "/workspace/install"
PROJECT_VERSION = "2.2.0"
|
py | b41010a18257f7570b9e8abc84993fd776ff7afe | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module importing all policies."""
from tf_agents.bandits.policies import categorical_policy
from tf_agents.bandits.policies import greedy_reward_prediction_policy
from tf_agents.bandits.policies import lin_ucb_policy
from tf_agents.bandits.policies import linalg
from tf_agents.bandits.policies import linear_thompson_sampling_policy
from tf_agents.bandits.policies import neural_linucb_policy
|
py | b41010dec2b5a997ede546fd6dc26c628abc9353 | #!/usr/bin/env python
# coding: utf-8
# # OpTaliX: Geometric Analysis
#
# These are ...
# In[1]:
import pandas as pd
import itables
from itables import init_notebook_mode, show
import itables.options as opt
init_notebook_mode(all_interactive=True)
opt.lengthMenu = [50, 100, 200, 500]
#opt.classes = ["display", "cell-border"]
#opt.classes = ["display", "nowrap"]
opt.columnDefs = [{"className": "dt-left", "targets": "_all"}, {"width": "500px", "targets": 1}]
# In[2]:
import os
cwd = os.getcwd()
filename = os.path.join(cwd, os.path.join('Excel', 'OpTaliX_optimization_operands.xlsx'))
df_var = pd.read_excel(filename, sheet_name = "Geometric Analysis", header = 0, index_col = 0)
df_var = df_var.dropna() # drop nan values
# In[3]:
df_var
# In[ ]:
# In[ ]:
|
py | b41010f2d8e5f2e8581619cfa8c11a594d39bbf5 | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: [email protected]
@site:
@software: PyCharm
@time: 2019/11/10 18:17
"""
import unittest
from graph.medium.max_area_of_islands_695.max_area_of_islands_695 import \
Solution
class TestMaxAreaOfIslands(unittest.TestCase):
def setUp(self): pass
def tearDown(self): pass
def test_1(self):
grid = [[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]]
exp = 6
act = Solution().maxAreaOfIsland(grid)
self.assertEqual(exp, act)
|
py | b410114eb3f6a8c9b60eb2f3370445868d0e70a9 | import pytest
import time
dockerfile = u'''
FROM python:3
RUN adduser --disabled-password --gecos '' test
ENV SEED "{seed}"
WORKDIR /src
USER test
RUN echo 'eval $(thefuck --alias)' > /home/test/.bashrc
RUN echo > /home/test/.bash_history
RUN git config --global user.email "[email protected]"
RUN git config --global user.name "Your Name"
USER root
'''.format(seed=time.time())
def plot(proc, TIMEOUT):
proc.sendline(u'cd /home/test/')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'No fucks given'])
proc.sendline(u'git init')
proc.sendline(u'git add .')
proc.sendline(u'git commit -a -m init')
proc.sendline(u'git brnch')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'git branch'])
proc.send('\n')
assert proc.expect([TIMEOUT, u'master'])
proc.sendline(u'echo test')
proc.sendline(u'echo tst')
proc.sendline(u'fuck')
assert proc.expect([TIMEOUT, u'echo test'])
proc.send('\n')
assert proc.expect([TIMEOUT, u'test'])
@pytest.mark.functional
@pytest.mark.benchmark(min_rounds=10)
def test_performance(spawnu, TIMEOUT, benchmark):
proc = spawnu(u'thefuck/python3-bash-performance',
dockerfile, u'bash')
proc.sendline(u'pip install /src')
proc.sendline(u'su test')
assert benchmark(plot, proc, TIMEOUT) is None
|
py | b41012072b678dc787c63c240ad195423e4e80b8 | import argparse
import os
import urllib
import flickrapi
def dl_flickr_images(query, folder, limit):
"""Download flickr images into local drive
Read from FLICKR official API Docs:
1) API Docs on Search: https://www.flickr.com/services/api/flickr.photos.search.html
2) API Testing Site: https://www.flickr.com/services/api/explore/flickr.photos.search
3) Sample API Query: https://api.flickr.com/services/rest/?format=json&api_key=<apikey>&method=flickr.photos.search&sort=relevance&text=<text>&extras=url_l&per_page=100
Args
----
text (str): keyword to search photos
folder (str): folder where images will be downloaded & stored in
limit (int): set limit on how many photos to download"""
FLICKR_ACCESS_KEY = os.environ["FLICKR_ACCESS_KEY"]
FLICKR_SECRET_KEY = os.environ["FLICKR_SECRET_KEY"]
print("Downloading {}".format(query))
flickr = flickrapi.FlickrAPI(FLICKR_ACCESS_KEY, FLICKR_SECRET_KEY, cache=True)
photos = flickr.walk(text=query, sort="relevance", extras="url_c", per_page=100)
folder = folder.replace(" ", "_")
folder = os.path.join("images", folder)
if not os.path.exists(folder):
os.makedirs(folder)
cnt = 0
for photo in photos:
url = photo.get("url_c")
if url is not None:
cnt += 1
if cnt >= limit:
break
else:
fill = str(cnt).zfill(3)
imgpath = os.path.join(folder, "{}_{}.jpg".format(fill, text))
urllib.request.urlretrieve(url, imgpath)
if cnt % 50 == 0:
print("{} downloaded".format(cnt))
print("{} photos downloaded!".format(cnt))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", required=True, help="search query to in Flickr API")
parser.add_argument("-o", "--output", required=True, help="path to output directory of images")
parser.add_argument("-l", "--limit", required=True, help="no. images to search")
args = parser.parse_args()
query = args["query"]
output = args["output"]
limit = args["limit"]
dl_flickr_images(query, output, limit) |
py | b410138bfe9f504dfc39a20ca5fdd4c40322d4ab | """
Copyright (C) Ghost Robotics - All Rights Reserved
Written by Adarsh Kulkarni <[email protected]>
"""
import rospy
from geometry_msgs.msg import Twist
from serializers.Base_Serializer import BaseSerializer
import numpy as np
class TwistSerializer(BaseSerializer):
def __init__(self, topic_name, skip_frame=1, directory_name='./', bag_file=''):
rospy.logwarn("INIT Twist SERIALIZER")
super(TwistSerializer, self).__init__(topic_name, skip_frame, directory_name, bag_file)
self.file_ext = ""
rospy.Subscriber(topic_name, Twist, self.callback)
def callback(self, twist_msg):
pose_arr = [twist_msg.linear.x,
twist_msg.linear.y,
twist_msg.linear.z,
twist_msg.angular.x,
twist_msg.angular.y,
twist_msg.angular.z,
rospy.get_time()]
np_arr = np.asarray(pose_arr)
# save
file_name = (self.filename_base + self.file_ext).format(self.counter)
np.save(file_name, np_arr)
# increment
self.counter += 1 |
py | b4101521f70e4d800579b30572cdd4ae34c341a5 | from collections import defaultdict
def triangle(n):
return int((n * (n + 1)) / 2)
def square(n):
return n * n
def pentagon(n):
return int((n * (3 * n - 1)) / 2)
def hexagon(n):
return n * (2 * n - 1)
def heptagon(n):
return int((n * (5 * n - 3)) / 2)
def octagon(n):
return n * (3 * n - 2)
def find_values_for_digits(first_two, poly_values):
return {poly_num: {last_two for last_two in val_map[first_two]}
for poly_num, val_map in poly_values}
def find_longest_sequence(poly_values, sequence):
if not poly_values:
if sequence[-1] % 100 == sequence[0] // 100:
return sequence
else:
# The ends don't match -- remove the last one :)
return sequence[:-1]
best_sequence = sequence
first_two = sequence[-1] % 100
for poly_num, digit_map in poly_values.items():
for last_two in digit_map[first_two]:
new_sequence = find_longest_sequence(
{p: dm for p, dm in poly_values.items() if p != poly_num},
sequence + [first_two * 100 + last_two],
)
if len(new_sequence) > len(best_sequence):
best_sequence = new_sequence
return best_sequence
if __name__ == '__main__':
poly_funs = [triangle, square, pentagon, hexagon, heptagon, octagon]
poly_values = defaultdict(lambda: defaultdict(set))
for poly_num, poly_fun in enumerate(poly_funs, start=3):
for n in range(1, 10000):
val = poly_fun(n)
if val >= 10000:
break
elif val >= 1000:
poly_values[poly_num][val // 100].add(val % 100)
for first_two, last_twos in poly_values.pop(3).items():
for last_two in last_twos:
sequence = find_longest_sequence(
poly_values,
[first_two * 100 + last_two],
)
if len(sequence) == 6:
print('Sequence: {}'.format(', '.join(map(str, sequence))))
print('Sum is {}'.format(sum(sequence)))
|
py | b410153077ba3bcc26d0de0599c2f00ce31aae1f | from logging import getLogger
import requests
from django.contrib.gis.geos import Point
from django.db import transaction
from spaces.models import Location, Unit
logger = getLogger(__name__)
class UnitImporter:
"""Imports units from given json data source url.
Unit importer uses field map dict to map django db fields
and the data source's fields. Field map also should define the default values
to be used for missing values.
Field map can be given as a kwarg in __init__ and should be formatted like:
field_map = {
"unit": {
"<unit model field>": "<data source field>",
...
},
"location": {
"<location model field>": "<data source field>",
...
},
}
"""
# Field map is used to map to django model fields to api data.
field_map = {
"unit": {
"tprek_id": "id",
"name": "name_fi",
"name_fi": "name_fi",
"name_en": "name_en",
"name_sv": "name_sv",
"description": "desc_fi",
"short_description": "short_desc_fi",
"web_page": "www_fi",
"email": "email",
"phone": "phone",
},
"location": {
"address_street": "street_address_fi",
"address_zip": "address_zip",
"address_city": "address_city_fi",
"lat": "latitude",
"lon": "longitude",
},
# These values we default to.
"defaults": {
"tprek_id": None,
"name": None,
"description": "",
"short_description": "",
"web_page": "",
"email": "",
"phone": "",
"address_street": None,
"address_zip": None,
"address_city": None,
"lat": None,
"lon": None,
},
}
def __init__(self, url: str, single: bool = False, field_map: dict = None):
self.url = url
self.single = single
if field_map:
self.field_map = field_map
@transaction.atomic
def import_units(self):
self.creation_counter = 0
self.update_counter = 0
resp = requests.get(self.url)
resp.raise_for_status()
unit_data = resp.json()
if self.single:
unit_data = [unit_data]
for row in unit_data:
created = self.create_unit(row)
self._update_counters(created)
logger.info(
"Created %s\nUpdated %s" % (self.creation_counter, self.update_counter)
)
def _update_counters(self, created: bool):
if created:
self.creation_counter += 1
return
self.update_counter += 1
def create_unit(self, importer_data: dict) -> bool:
"""Creates or updates an Unit object"""
unit_data = {}
for model_field, data_field in self.field_map["unit"].items():
unit_data[model_field] = importer_data.get(
data_field, self.field_map["defaults"].get(model_field)
)
unit, unit_created = Unit.objects.update_or_create(
tprek_id=importer_data.get("id"), defaults=unit_data
)
location_data = {}
for model_field, data_field in self.field_map["location"].items():
location_data[model_field] = importer_data.get(
data_field, self.field_map["defaults"].get(model_field)
)
location_data["unit"] = unit
point = None
lon = location_data.pop("lon", self.field_map["defaults"].get("lon"))
lat = location_data.pop("lat", self.field_map["defaults"].get("lat"))
if lon and lat:
point = Point(lon, lat)
location_data["coordinates"] = point
location, _ = Location.objects.update_or_create(
unit=unit, defaults=location_data
)
return unit_created
|
py | b410166e9b1f2a2b0768f527fbf415703f7737dd | """Adds an update to an existing ticket."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import helpers
from SoftLayer.CLI import ticket
import click
@click.command()
@click.argument('identifier')
@click.option('--body', help="The entry that will be appended to the ticket")
@environment.pass_env
def cli(env, identifier, body):
"""Adds an update to an existing ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if body is None:
body = click.edit('\n\n' + ticket.TEMPLATE_MSG)
mgr.update_ticket(ticket_id=ticket_id, body=body)
env.fout("Ticket Updated!")
|
py | b410168ab7ed8befe9d6da1873f75ca62e9f6812 | import os
import torch
class Settings:
PROJ_NAME = 'Text-Summarization-Using-T5'
root_path = os.getcwd().split(PROJ_NAME)[0] + PROJ_NAME + "\\"
APPLICATION_PATH = root_path + "backend\\services\\text_summarization\\application\\"
# setting up logs path
LOGS_DIRECTORY = root_path + "backend\\services\\text_summarization\\logs\\logs.txt"
MODEL_TYPE = "t5"
MODEL_NAME = "t5-base"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# training data directory
TRAIN_DATA = APPLICATION_PATH + "ai\\data\\news_summary.csv"
Columns = ['headlines', 'text']
USE_GPU = None
if str(DEVICE) == "cuda":
USE_GPU = True
else:
USE_GPU = False
EPOCHS = 5
encoding = 'latin-1'
columns_dict = {"headlines": "target_text", "text": "source_text"}
df_column_list = ['source_text', 'target_text']
SUMMARIZE_KEY = "summarize: "
SOURCE_TEXT_KEY = 'source_text'
TEST_SIZE = 0.2
BATCH_SIZE = 8
source_max_token_len = 128
target_max_token_len = 50
train_df_len = 5000
test_df_len = 100
WEIGHTS_PATH = APPLICATION_PATH + "ai\\weights\\SimpleT5-weights"
# constants
DATA_KEY = 'data'
|
py | b41016b60f30a2f02d7382ab07eb3ac903d61134 | #!/usr/bin/env python3
def vendors():
vendors = ["cisco", "juniper", "big_ip", "f5", "arista", "alta3", "zach", "stuart"]
approved_vendors = ["cisco", "juniper", "big_ip"]
for x in vendors:
print(f"\nThe vendor is {x}", end="")
if x not in approved_vendors:
print(" - NOT AN APPROVED VENDOR!", end="")
print("\nOur loop has ended.")
vendors()
def farms():
farms = [{"name": "NE Farm", "agriculture": ["sheep", "cows", "pigs", "chickens", "llamas", "cats"]},
{"name": "W Farm", "agriculture": ["pigs", "chickens", "llamas"]},
{"name": "SE Farm", "agriculture": ["chickens", "carrots", "celery"]}]
for y in farms:
print(y.get("name", "Animal Farm"), end=":\n")
for agri in y.get("agriculture"):
print(" -", agri)
farms()
|
py | b41016c5fee2c9744c3095b4a02e40b0420be390 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2022, William Guilherme <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: zpa_cloud_connector_group_info
short_description: Retrieves cloud connector group information.
description:
- This module will allow the retrieval of information about a cloud connector group.
author:
- William Guilherme (@willguibr)
version_added: "1.0.0"
options:
client_id:
description: ""
required: false
type: str
client_secret:
description: ""
required: false
type: str
customer_id:
description: ""
required: false
type: str
name:
description:
- Name of the Cloud Connector Group.
required: false
type: str
id:
description:
- ID of the Cloud Connector Group.
required: false
type: str
"""
EXAMPLES = """
- name: Get Information Details of All Cloud Connector Groups
willguibr.zpacloud.zpa_cloud_connector_group_info:
- name: Get Information Details of a Cloud Connector Group by Name
willguibr.zpacloud.zpa_cloud_connector_group_info:
name: zs-cc-vpc-096108eb5d9e68d71-ca-central-1a
- name: Get Information Details of a Cloud Connector Group by ID
willguibr.zpacloud.zpa_cloud_connector_group_info:
id: "216196257331292017"
"""
RETURN = """
# Returns information on a specified Cloud Connector Group.
"""
from re import T
from traceback import format_exc
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_client import (
ZPAClientHelper,
)
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_cloud_connector_group import (
CloudConnectorGroupService,
)
def core(module):
cloud_connector_name = module.params.get("name", None)
cloud_connector_id = module.params.get("id", None)
customer_id = module.params.get("customer_id", None)
service = CloudConnectorGroupService(module, customer_id)
connectors = []
if cloud_connector_id is not None:
connector = service.getByID(cloud_connector_id)
if connector is None:
module.fail_json(
msg="Failed to retrieve App Connector Group ID: '%s'" % (id)
)
connectors = [connector]
elif cloud_connector_name is not None:
connector = service.getByName(cloud_connector_name)
if connector is None:
module.fail_json(
msg="Failed to retrieve App Connector Group Name: '%s'"
% (cloud_connector_name)
)
connectors = [connector]
else:
connectors = service.getAll()
module.exit_json(changed=False, data=connectors)
def main():
argument_spec = ZPAClientHelper.zpa_argument_spec()
argument_spec.update(
name=dict(type="str", required=False),
id=dict(type="str", required=False),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == "__main__":
main()
|
py | b41016de457c71877a8b107e5438645949776cc9 | import json
import datasets
_DESCRIPTION = """\
The MBPP (Mostly Basic Python Problems) dataset consists of around 1,000 crowd-sourced Python
programming problems, designed to be solvable by entry level programmers, covering programming
fundamentals, standard library functionality, and so on. Each problem consists of a task
description, code solution and 3 automated test cases.
"""
_URLs = {
"full": "https://raw.githubusercontent.com/google-research/google-research/master/mbpp/mbpp.jsonl",
"sanitized": "https://raw.githubusercontent.com/google-research/google-research/master/mbpp/sanitized-mbpp.json",
}
_SPLITS = ["full", "sanitized"]
_CITATION = """\
@article{austin2021program,
title={Program Synthesis with Large Language Models},
author={Austin, Jacob and Odena, Augustus and Nye, Maxwell and Bosma, Maarten and Michalewski, Henryk and Dohan, David and Jiang, Ellen and Cai, Carrie and Terry, Michael and Le, Quoc and others},
journal={arXiv preprint arXiv:2108.07732},
year={2021}
}"""
_HOMEPAGE = "https://github.com/google-research/google-research/tree/master/mbpp"
_LICENSE = "CC-BY-4.0"
class MBPP(datasets.GeneratorBasedBuilder):
"""MBPP: Mostly Basic Python Problems Dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=f"{split}",
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
)
for split in _SPLITS
]
DEFAULT_CONFIG_NAME = "full"
def _info(self):
if self.config.name == "full":
features = datasets.Features(
{
"task_id": datasets.Value("int32"),
"text": datasets.Value("string"),
"code": datasets.Value("string"),
"test_list": datasets.Sequence(datasets.Value("string")),
"test_setup_code": datasets.Value("string"),
"challenge_test_list": datasets.Sequence(datasets.Value("string")),
}
)
else:
features = datasets.Features(
{
"source_file": datasets.Value("string"),
"task_id": datasets.Value("int32"),
"prompt": datasets.Value("string"),
"code": datasets.Value("string"),
"test_imports": datasets.Sequence(datasets.Value("string")),
"test_list": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
config_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(config_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir,
},
)
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as file:
if self.config.name == "full":
data = [json.loads(line) for line in file]
else:
data = json.load(file)
id_ = 0
for sample in data:
yield id_, sample
id_ += 1
|
py | b41017323045dc18aa95e313008bca70147df2ae | # 37 Creating a dictionary that contains lists
customer_29876 = {
"first name": "David",
"last name": "Elliott",
"address": "4803 Wellesley St.",
}
# add discounts
customer_29876 = {
"first name": "David",
"last name": "Elliott",
"address": "4803 Wellesley St.",
"discounts": ["standard", "volume", "loyalty"],
} |
py | b4101752bce1ec1733d460702de9ec102960911b | def test_ports(api, utils):
"""Demonstrates adding ports to a configuration and setting the
configuration on the traffic generator.
The traffic generator should have no items configured other than
the ports in this test.
"""
tx_port = utils.settings.ports[0]
rx_port = utils.settings.ports[1]
config = api.config()
config.ports.port(name="tx_port", location=tx_port).port(
name="rx_port", location=rx_port
).port(name="port with no location")
config.options.port_options.location_preemption = True
api.set_config(config)
config = api.config()
api.set_config(config)
|
py | b41017c5291476187740c56362fc9f2b00983bc1 | from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
parser.add_argument('--sn_gan', type=int, default=1,help='Spectral Normalization Cycle GAN')
parser.set_defaults(model='test')
# To avoid cropping, the loadSize should be the same as fineSize
parser.set_defaults(loadSize=parser.get_default('fineSize'))
self.isTrain = False
return parser
|
py | b4101b34b8981db78b9cf688d8085eaa05c90f1f | from django.test import TestCase
from django.contrib.auth import get_user_model, authenticate
from accounts.models import Token
User = get_user_model()
class AuthenticationTest(TestCase):
def test_returns_None_if_no_such_token(self):
result = authenticate(
'2bbec969-e289-4b1f-a080-d8de58045094')
self.assertIsNone(result)
def test_returns_new_user_with_correct_email_if_token_exists(self):
email = '[email protected]'
token = Token.objects.create(email=email)
user = authenticate(token.uid)
new_user = User.objects.get(email=email)
self.assertEqual(user, new_user)
def test_returns_existing_user_with_correct_email_if_token_exists(self):
email = '[email protected]'
existing_user = User.objects.create(email=email)
token = Token.objects.create(email=email)
user = authenticate(token.uid)
self.assertEqual(user, existing_user)
|
py | b4101cfd571913152ca73fbb5f6f9ee584ddfeed | from __future__ import unicode_literals
from django.test import TestCase
from dcim.models import *
class RackTestCase(TestCase):
def setUp(self):
self.site = Site.objects.create(
name='TestSite1',
slug='my-test-site'
)
self.rack = Rack.objects.create(
name='TestRack1',
facility_id='A101',
site=self.site,
u_height=42
)
self.manufacturer = Manufacturer.objects.create(
name='Acme',
slug='acme'
)
self.device_type = {
'ff2048': DeviceType.objects.create(
manufacturer=self.manufacturer,
model='FrameForwarder 2048',
slug='ff2048'
),
'cc5000': DeviceType.objects.create(
manufacturer=self.manufacturer,
model='CurrentCatapult 5000',
slug='cc5000',
u_height=0
),
}
self.role = {
'Server': DeviceRole.objects.create(
name='Server',
slug='server',
),
'Switch': DeviceRole.objects.create(
name='Switch',
slug='switch',
),
'Console Server': DeviceRole.objects.create(
name='Console Server',
slug='console-server',
),
'PDU': DeviceRole.objects.create(
name='PDU',
slug='pdu',
),
}
def test_mount_single_device(self):
device1 = Device(
name='TestSwitch1',
device_type=DeviceType.objects.get(manufacturer__slug='acme', slug='ff2048'),
device_role=DeviceRole.objects.get(slug='switch'),
site=self.site,
rack=self.rack,
position=10,
face=RACK_FACE_REAR,
)
device1.save()
# Validate rack height
self.assertEqual(list(self.rack.units), list(reversed(range(1, 43))))
# Validate inventory (front face)
rack1_inventory_front = self.rack.get_front_elevation()
self.assertEqual(rack1_inventory_front[-10]['device'], device1)
del(rack1_inventory_front[-10])
for u in rack1_inventory_front:
self.assertIsNone(u['device'])
# Validate inventory (rear face)
rack1_inventory_rear = self.rack.get_rear_elevation()
self.assertEqual(rack1_inventory_rear[-10]['device'], device1)
del(rack1_inventory_rear[-10])
for u in rack1_inventory_rear:
self.assertIsNone(u['device'])
def test_mount_zero_ru(self):
pdu = Device.objects.create(
name='TestPDU',
device_role=self.role.get('PDU'),
device_type=self.device_type.get('cc5000'),
site=self.site,
rack=self.rack,
position=None,
face=None,
)
self.assertTrue(pdu)
class InterfaceTestCase(TestCase):
def setUp(self):
self.site = Site.objects.create(
name='TestSite1',
slug='my-test-site'
)
self.rack = Rack.objects.create(
name='TestRack1',
facility_id='A101',
site=self.site,
u_height=42
)
self.manufacturer = Manufacturer.objects.create(
name='Acme',
slug='acme'
)
self.device_type = DeviceType.objects.create(
manufacturer=self.manufacturer,
model='FrameForwarder 2048',
slug='ff2048'
)
self.role = DeviceRole.objects.create(
name='Switch',
slug='switch',
)
def test_interface_order_natural(self):
device1 = Device.objects.create(
name='TestSwitch1',
device_type=self.device_type,
device_role=self.role,
site=self.site,
rack=self.rack,
position=10,
face=RACK_FACE_REAR,
)
interface1 = Interface.objects.create(
device=device1,
name='Ethernet1/3/1'
)
interface2 = Interface.objects.create(
device=device1,
name='Ethernet1/5/1'
)
interface3 = Interface.objects.create(
device=device1,
name='Ethernet1/4'
)
interface4 = Interface.objects.create(
device=device1,
name='Ethernet1/3/2/4'
)
interface5 = Interface.objects.create(
device=device1,
name='Ethernet1/3/2/1'
)
interface6 = Interface.objects.create(
device=device1,
name='Loopback1'
)
self.assertEqual(
list(Interface.objects.all().order_naturally()),
[interface1, interface5, interface4, interface3, interface2, interface6]
)
def test_interface_order_natural_subinterfaces(self):
device1 = Device.objects.create(
name='TestSwitch1',
device_type=self.device_type,
device_role=self.role,
site=self.site,
rack=self.rack,
position=10,
face=RACK_FACE_REAR,
)
interface1 = Interface.objects.create(
device=device1,
name='GigabitEthernet0/0/3'
)
interface2 = Interface.objects.create(
device=device1,
name='GigabitEthernet0/0/2.2'
)
interface3 = Interface.objects.create(
device=device1,
name='GigabitEthernet0/0/0.120'
)
interface4 = Interface.objects.create(
device=device1,
name='GigabitEthernet0/0/0'
)
interface5 = Interface.objects.create(
device=device1,
name='GigabitEthernet0/0/1.117'
)
interface6 = Interface.objects.create(
device=device1,
name='GigabitEthernet0'
)
self.assertEqual(
list(Interface.objects.all().order_naturally()),
[interface4, interface3, interface5, interface2, interface1, interface6]
)
|
py | b4101d8a0de03c0ffc726e6cbfdf3721a3671049 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 14:55:26 2018
@author: nathan
"""
import assets
from classes.texFile import TexFile
import os
class MoreTex (TexFile):
def __init__(self, parentTex, context):
TexFile.__init__ (self, parentTex, context)
self.texFiles = []
self.focus = 0
self.name = "hey"
def drawArch (self, draw, x, y):
draw.text ((x,y), self.name, fill = assets.archColor, font = assets.font)
x_ = x + assets.charwidth
y += assets.fontsize
for f in self.texFiles:
(x__,y) = f.drawArch (draw, x_,y)
return (x,y)
def changeFocus (self, x):
temp = True
if isinstance (self.texFiles[self.focus], MoreTex):
temp = self.texFiles[self.focus].changeFocus(x)
if temp:
self.focus += x
if self.focus < 0:
self.focus = 0
return True
elif self.focus > len (self.texFiles) - 1:
self.focus = len (self.texFiles) - 1
return True
self.context.parent.reloadFocusFile (self.texFiles[self.focus])
return False
def getFocus (self):
mt = self
while isinstance (mt, MoreTex):
mt = mt.texFiles [mt.focus]
return mt
def getFocusIndex (self):
mt = self
while isinstance (mt.texFiles[mt.focus], MoreTex):
mt = mt.texFiles [mt.focus]
return mt.focus
def checkNameNumber (self, x):
n = 0
for f in self.texFiles:
if f.name == x and f.nbr > n:
n = f.nbr
return n + 1
def append (self, x, context):
self.texFiles.append (x (self, context) )
def quickAppend (self, x):
self.texFiles.append (x)
def write (self, path, join):
directory = path + "/" + self.name + "_" + str(self.nbr)
if not os.path.exists(directory):
os.mkdir (directory)
for file in self.texFiles:
file.write (directory, join) |
py | b4101dcfb3bcca9bd1173d871985de12dadf23e7 | """empty message
Revision ID: 4dcd1257e2a0
Revises: 9f5e026c9e0f
Create Date: 2020-05-09 17:25:39.504446
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '4dcd1257e2a0'
down_revision = '9f5e026c9e0f'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
# 1 -> שדה דוב
# Select the teams that we need to delete
res = conn.execute("""
SELECT teams.id AS teams_id
FROM teams JOIN neighborhoods ON neighborhoods.id = teams.neighborhood_id
WHERE neighborhoods.id = 1;
""")
results = res.fetchall()
team_ids_to_delete = [str(r[0]) for r in results]
if team_ids_to_delete:
# Delete the teams' building
conn.execute(f"""
DELETE
FROM buildings_teams
WHERE buildings_teams.team_id in ({",".join(team_ids_to_delete)});
""")
# Delete the teams' donations
conn.execute("""
DELETE FROM donations WHERE donations.id in (
SELECT donations.id AS donations_id
FROM donations JOIN teams ON teams.id = donations.team_id JOIN neighborhoods ON neighborhoods.id = teams.neighborhood_id
WHERE neighborhoods.id = 1
);
""")
# Delete the teams' users
conn.execute("""
DELETE FROM users WHERE users.id IN (
SELECT users.id AS users_id
FROM users JOIN teams ON teams.id = users.team_id JOIN neighborhoods ON neighborhoods.id = teams.neighborhood_id
WHERE neighborhoods.id = 1
);
""")
# Delete the teams
conn.execute("""
DELETE FROM teams WHERE teams.id IN (
SELECT teams.id AS teams_id
FROM teams JOIN neighborhoods ON neighborhoods.id = teams.neighborhood_id
WHERE neighborhoods.id = 1
);
""")
# Delete the neighborhood's buildings
conn.execute("""
DELETE FROM buildings WHERE buildings.id IN (
SELECT buildings.id AS buildings_id
FROM buildings JOIN neighborhoods ON neighborhoods.id = buildings.neighborhood_id
WHERE neighborhoods.id = 1
);
""")
# Delete the neighborhood
conn.execute("DELETE FROM neighborhoods WHERE neighborhoods.id = 1;")
def downgrade():
pass
|
py | b4101e954470ef3b9d63818e0125388baa555784 | # coding:utf-8
import grpc
import beehive_job_info_pb2
import beehive_job_info_pb2_grpc
class Client(object):
def __init__(self, host):
self.host = host
def __call__(self, *args, **kwargs):
channel = grpc.insecure_channel(self.host)
client = beehive_job_info_pb2_grpc.JobServiceStub(channel)
return client |
py | b4101eb49cbb41ff267f23fe29939b8ff8bb0b21 | from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
# END SITE CONFIGURATION
INSTALLED_APPS += ["gunicorn"]
# https://github.com/django/django/blob/2.0.5/django/utils/log.py
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"django.server": {
"()": "django.utils.log.ServerFormatter",
"format": "[{server_time}] {message}",
"style": "{",
}
},
"handlers": {
"console": {
"level": "INFO",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
},
"django.server": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "django.server",
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
},
"loggers": {
"django": {"handlers": ["console", "mail_admins"], "level": "INFO"},
"django.server": {
"handlers": ["django.server"],
"level": "INFO",
"propagate": False,
},
},
}
# URLs
# ------------------------------------------------------------------------------
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = env("DJANGO_ADMIN_URL", default="admin")
# CORS
CORS_ORIGIN_WHITELIST = env.list("CORS_ORIGIN_WHITELIST")
CSRF_TRUSTED_ORIGINS = CORS_ORIGIN_WHITELIST
|
py | b4101ec3414a55bff895382aebc563c6f6be8472 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebhookNotification(Model):
"""Webhook notification of an autoscale event.
:param service_uri: the service address to receive the notification.
:type service_uri: str
:param properties: a property bag of settings. This value can be empty.
:type properties: dict
"""
_attribute_map = {
'service_uri': {'key': 'serviceUri', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(self, service_uri=None, properties=None):
self.service_uri = service_uri
self.properties = properties
|
py | b4101f3d99f7f72fc466eefb0cd6ef4dc912e6f0 | # -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib.auth.models import User
from django.core.validators import RegexValidator
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _, ugettext_noop
from translatable.models import TranslatableModel, get_translation_model
class LAN(TranslatableModel):
SLUG_REGEX = r'[a-zA-Z][a-zA-Z0-9]*'
FULL_SLUG_REGEX = r'^' + SLUG_REGEX + r'$'
MEDIA_TYPE_IMAGE = 'image'
MEDIA_TYPE_VIDEO = 'video'
MEDIA_TYPE_STREAM = 'stream'
MEDIA_TYPES = (
(MEDIA_TYPE_IMAGE, _(u'Image')),
(MEDIA_TYPE_VIDEO, _(u'Video')),
(MEDIA_TYPE_STREAM, _(u'Stream')),
)
slug = models.SlugField(
_(u'slug'), db_index=True, blank=True, validators=[RegexValidator(regex=FULL_SLUG_REGEX)],
help_text=_(u'Optional. Must be alphanumeric and start with a letter.'),
)
title = models.CharField(_(u'title'), max_length=100)
start_date = models.DateTimeField(_(u'start date'))
end_date = models.DateTimeField(_(u'end date'))
allow_manual_payment = models.BooleanField(_(u'allow manual payment'), default=False)
location = models.CharField(_(u'location'), max_length=100)
map_link = models.CharField(_(u'map link'), max_length=300, help_text=_(u'URL for an embedded map.'), blank=True)
media_link = models.CharField(_(u'media link'), max_length=300, help_text=_(u'URL for embedded media.'), blank=True)
media_type = models.CharField(_(u'media type'), max_length=10, choices=MEDIA_TYPES, default=MEDIA_TYPE_IMAGE, help_text=_(u'Type of the optional embedded media.'))
frontpage_media_link = models.CharField(_(u'frontpage media link'), max_length=300, help_text=_(u'URL for embedded media on front page.'), blank=True)
frontpage_media_type = models.CharField(_(u'frontpage media type'), max_length=10, choices=MEDIA_TYPES, default=MEDIA_TYPE_IMAGE, help_text=_(u'Type of the optional embedded media on front page.'))
@property
def attendees(self):
return map(lambda x: getattr(x, 'user'), Attendee.objects.filter(lan=self))
@property
def paid_attendees(self):
return map(lambda x: getattr(x, 'user'), Attendee.objects.filter(lan=self, has_paid=True))
def status(self):
now = datetime.now()
if now < self.start_date:
return ugettext_noop(u'upcoming')
else:
if now < self.end_date:
return ugettext_noop(u'in progress')
else:
return ugettext_noop(u'ended')
def tickets(self):
ticket_types = TicketType.objects.filter(lan=self)
return Ticket.objects.filter(ticket_type__in=ticket_types)
def has_ticket(self, user):
ticket_types = TicketType.objects.filter(lan=self)
tickets = Ticket.objects.filter(ticket_type__in=ticket_types, user=user)
if tickets:
return tickets[0]
else:
return None
def is_ended(self):
return self.end_date < datetime.now()
def get_absolute_url(self):
if self.slug:
return reverse('lan_details_slug', kwargs={'lan_slug': self.slug})
return reverse('lan_details', kwargs={'lan_id': self.id})
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'LAN')
verbose_name_plural = _(u'LANs')
ordering = ['start_date']
permissions = (
('export_paying_participants', u'Can export list of paying participants to downloadable file'),
('register_arrivals', u'Can show and register arrivals'),
('show_arrivals_statistics', u'Can show statistics about arrivals'),
('register_new_user', u'Can directly register a new user'),
)
class LANTranslation(get_translation_model(LAN, 'LAN')):
description = models.TextField(_(u'description'))
class Meta:
verbose_name = _(u'LAN translation')
verbose_name_plural = _(u'LAN translations')
class Attendee(models.Model):
user = models.ForeignKey(User, verbose_name=_(u'user'))
lan = models.ForeignKey(LAN, verbose_name=_(u'LAN'))
has_paid = models.BooleanField(_(u'has paid'), default=False)
arrived = models.BooleanField(_(u'has arrived'), default=False)
def __unicode__(self):
return self.user.username + u' – ' + self.lan.title
def get_ticket(self):
tickets = Ticket.objects.filter(user=self.user, ticket_type__lan=self.lan)
if tickets:
# Ignore extra tickets
return tickets[0]
return None
def get_seat(self):
from apps.seating.models import Seat
seats = Seat.objects.filter(user=self.user, seating__lan=self.lan)
if seats:
# Ignore extra seats
return seats[0]
return None
class Meta:
verbose_name = _(u'LAN attendee')
verbose_name_plural = _(u'LAN attendees')
ordering = ['-user', 'lan']
unique_together = ('user', 'lan')
index_together = ['user', 'lan']
class TicketType(TranslatableModel):
# Note: "seats" in this context means "tickets" or "spots", not actual seats.
lan = models.ForeignKey(LAN, verbose_name=_(u'LAN'))
price = models.IntegerField(_(u'price'), default=50)
priority = models.IntegerField(_(u'prioity'), default=0, help_text=_(u'In what priority the tickets will show, higher number will show first.'))
available_from = models.DateTimeField(_(u'release date'), default=datetime.now, help_text=_(u'When the tickets will be made available.'))
number_of_seats = models.IntegerField(_(u'seats'))
@property
def verbose_price(self):
return _(u'{price}kr').format(price=self.price)
def number_of_seats_used(self):
return self.ticket_set.count()
def is_available(self):
return datetime.now() >= self.available_from
def number_of_free_seats(self):
return self.number_of_seats - self.number_of_seats_used()
def is_sold_out(self):
return self.number_of_seats <= self.number_of_seats_used()
class Meta:
verbose_name = _(u'ticket type')
verbose_name_plural = _(u'ticket types')
class TicketTypeTranslation(get_translation_model(TicketType, 'TicketType')):
title = models.CharField(_(u'title'), max_length=50)
description = models.TextField(_(u'description'), blank=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'ticket type translation')
verbose_name_plural = _(u'ticket type translation')
class Ticket(models.Model):
user = models.ForeignKey(User, verbose_name=_(u'user'))
ticket_type = models.ForeignKey(TicketType, verbose_name=_(u'ticket type'))
bought_date = models.DateField(_(u'bought date'))
valid = models.BooleanField(_(u'is valid'), default=True)
invalid_date = models.DateField(_(u'invalid date'), null=True, blank=True)
invalid_description = models.TextField(_(u'invalid description'), null=True, blank=True)
def __unicode__(self):
return unicode(self.ticket_type) + u' – ' + self.user.username
class Meta:
verbose_name = _(u'ticket')
verbose_name_plural = _(u'tickets')
index_together = ['user', 'ticket_type']
class Directions(models.Model):
lan = models.ForeignKey(LAN, verbose_name=_(u'LAN'))
title = models.CharField(_(u'title'), max_length=100, null=True)
description = models.TextField(_(u'description'), null=True, blank=True, help_text=_(u'Directions.'))
class Meta:
verbose_name = _(u'LAN directions')
verbose_name_plural = _(u'LAN directions')
def __unicode__(self):
return unicode(self.lan) + u' – ' + self.title
class Stream(models.Model):
title = models.CharField(_(u'title'), max_length=100)
visible_title = models.CharField(_(u'visible title'), max_length=100, blank=True, help_text=_(u'Title to show above stream. May be empty.'))
description = models.TextField(_(u'description'), blank=True, help_text=_(u'Short description that will show on front page.'))
link = models.CharField(_(u'link'), max_length=300, help_text=_(u'Link to the embedding stream.'))
active = models.BooleanField(_(u'is active'), default=False, help_text=_(u'No more than one stream can be active at any given time.'))
def is_active(self):
return self.active
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'stream')
verbose_name_plural = _(u'streams')
|
py | b4101f7ba5a6182334ca1fc306abe72584c0b05b | """
evoke base database interface
"""
from data import makeDataClass, RecordNotFoundError
from DB import execute, init_db
from schema import *
from patch import pre_schema |
py | b410201bbaa0013e3f9ad9a77ae1bd0246028c71 | from django.db import models
class SurveyResponse(models.Model):
name = models.CharField(max_length=200)
answers = models.JSONField(default=dict)
|
py | b4102061cf0c623f83944269043594df2649aa5a | from numpy import eye
from numpy.testing import assert_allclose
from glimix_core.cov import EyeCov
def test_eyecov():
cov = EyeCov(2)
cov.scale = 1.5
assert_allclose(cov.value(), 1.5 * eye(2))
assert_allclose(cov._check_grad(), 0, atol=1e-5)
|
py | b410208e017a4097aca59ae8342d9ebea6ecf054 | # Test out integrating kernels that are half-filled.
from math import *
from Spheral import *
class Wintegral(ScalarFunctor):
def __init__(self, W, ndim, useGradientAsKernel):
assert ndim in (1, 2, 3)
self.W = W
self.ndim = ndim
self.useGradientAsKernel = useGradientAsKernel
ScalarFunctor.__init__(self)
return
def __call__(self, x):
if self.useGradientAsKernel:
result = abs(W.gradValue(x, 1.0))
else:
result = W.kernelValue(x, 1.0)
if self.ndim == 1:
return result
elif self.ndim == 2:
return pi*x*result
else:
return 2.0*pi*x*x*result
nperh = 2.0
deta = 1.0/nperh
neta = 5
etas1d, etas2d, etas3d = [], [], []
for ix in xrange(neta):
etas1d.append(Vector1d((ix + 0.5)*deta))
for iy in xrange(-neta + 1, neta):
etas2d.append(Vector2d((ix + 0.5)*deta, (iy + 0.5)*deta))
for iz in xrange(-neta + 1, neta):
etas3d.append(Vector3d((ix + 0.5)*deta, (iy + 0.5)*deta, (iz + 0.5)*deta))
for (W, ndim, etas, zero) in ((TableKernel1d(BSplineKernel1d(), 1000), 1, etas1d, Vector1d.zero),
(TableKernel2d(BSplineKernel2d(), 1000), 2, etas2d, Vector2d.zero),
(TableKernel3d(BSplineKernel3d(), 1000), 3, etas3d, Vector3d.zero)):
result = simpsonsIntegrationDouble(Wintegral(W, ndim, True), 0.0, W.kernelExtent, 1000)
print "Expected half zeroth moment in %i dimensions: %g" % (ndim, result)
Wsum = 0.0
W1sum = zero
for eta in etas:
Wi = abs(W.gradValue(eta.magnitude(), 1.0))
Wsum += Wi
W1sum += Wi*eta
W1sum /= Wsum
print "Result of summing W: ", Wsum, Wsum**(1.0/ndim), W1sum.magnitude() # , (Wsum/W.volumeNormalization)**(1.0/ndim), Wsum**(1.0/ndim)/W.volumeNormalization
|
py | b4102092a449af5fb5812189b55b86e39bd7af61 | import os
import sys
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_for_pending_references(self, model, style, pending_references):
"SQLite3 doesn't support constraints"
return []
def sql_remove_table_constraints(self, model, references_to_delete, style):
"SQLite3 doesn't support constraints"
return []
def _create_test_db(self, verbosity, autoclobber):
test_database_name = self.connection.settings_dict['TEST_NAME']
if test_database_name and test_database_name != ":memory:":
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
os.remove(test_database_name)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
else:
test_database_name = ":memory:"
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and test_database_name != ":memory:":
# Remove the SQLite database file
os.remove(test_database_name)
|
py | b410217094acd70dd245cb3e5113f2633847f4a6 | import pytest
import json
from common.serializers.serialization import config_state_serializer
from indy_common.state import config
from indy_node.server.request_handlers.config_req_handlers.auth_rule.static_auth_rule_helper import StaticAuthRuleHelper
from plenum.common.constants import TRUSTEE, STEWARD, DATA
from plenum.common.exceptions import RequestRejectedException, \
RequestNackedException
from indy_common.authorize.auth_actions import ADD_PREFIX, EDIT_PREFIX
from indy_common.authorize.auth_constraints import ROLE, CONSTRAINT_ID, ConstraintsEnum, SIG_COUNT, NEED_TO_BE_OWNER, \
METADATA, OFF_LEDGER_SIGNATURE
from indy_common.constants import AUTH_ACTION, OLD_VALUE, NYM, ENDORSER, CONFIG_LEDGER_ID, CONSTRAINT
from plenum.test.helper import sdk_gen_request, sdk_sign_and_submit_req_obj, sdk_get_and_check_replies
from indy_node.test.auth_rule.helper import sdk_send_and_check_req_json, sdk_send_and_check_get_auth_rule_request
from indy_node.test.auth_rule.helper import (
generate_constraint_entity, generate_constraint_list,
sdk_send_and_check_auth_rule_request, generate_auth_rule_operation,
generate_key,
sdk_send_and_check_auth_rule_invalid_request
)
def test_auth_rule_transaction_for_edit(looper,
sdk_wallet_trustee,
sdk_pool_handle):
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=EDIT_PREFIX)
def test_auth_rule_transaction(looper,
sdk_wallet_trustee,
sdk_pool_handle):
sdk_send_and_check_auth_rule_request(
looper, sdk_pool_handle, sdk_wallet_trustee
)
def test_auth_rule_transaction_with_large_constraint(looper,
sdk_wallet_trustee,
sdk_pool_handle):
constraint = generate_constraint_list(auth_constraints=[generate_constraint_entity(role=TRUSTEE),
generate_constraint_entity(role=STEWARD)])
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
constraint=constraint)
def test_reject_with_unacceptable_role_in_constraint(looper,
sdk_wallet_trustee,
sdk_pool_handle):
constraint = generate_constraint_entity()
unacceptable_role = 'olololo'
constraint[ROLE] = unacceptable_role
with pytest.raises(RequestNackedException) as e:
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
constraint=constraint)
e.match('InvalidClientRequest')
e.match('client request invalid')
e.match('Role {} is not acceptable'.format(unacceptable_role))
def test_reject_auth_rule_transaction(looper,
sdk_wallet_steward,
sdk_pool_handle):
with pytest.raises(RequestRejectedException) as e:
sdk_send_and_check_auth_rule_request(
looper, sdk_pool_handle, sdk_wallet_steward
)
e.match('Not enough TRUSTEE signatures')
def test_reqnack_auth_rule_transaction_with_wrong_key(looper,
sdk_wallet_trustee,
sdk_pool_handle):
with pytest.raises(RequestNackedException) as e:
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type="*")
e.match('InvalidClientRequest')
e.match("client request invalid")
e.match("is not found in authorization map")
def test_reqnack_auth_rule_edit_transaction_with_wrong_format(looper,
sdk_wallet_trustee,
sdk_pool_handle):
op = generate_auth_rule_operation(auth_action=EDIT_PREFIX)
op.pop(OLD_VALUE)
req_obj = sdk_gen_request(op, identifier=sdk_wallet_trustee[1])
req_json = json.dumps(req_obj.as_dict)
with pytest.raises(RequestNackedException) as e:
sdk_send_and_check_req_json(
looper, sdk_pool_handle, sdk_wallet_trustee, req_json,
)
e.match('InvalidClientRequest')
e.match("client request invalid")
e.match("Transaction for change authentication "
"rule for {}={} must contain field {}".
format(AUTH_ACTION, EDIT_PREFIX, OLD_VALUE))
def test_reqnack_auth_rule_add_transaction_with_wrong_format(looper,
sdk_wallet_trustee,
sdk_pool_handle):
with pytest.raises(RequestNackedException) as e:
sdk_send_and_check_auth_rule_invalid_request(
looper,
sdk_pool_handle,
sdk_wallet_trustee,
**generate_key(old_value="*")
)
e.match('InvalidClientRequest')
e.match("client request invalid")
e.match("Transaction for change authentication "
"rule for {}={} must not contain field {}".
format(AUTH_ACTION, ADD_PREFIX, OLD_VALUE))
@pytest.mark.parametrize("off_ledger_signature", [True, False])
def test_auth_rule_state_format(
looper, sdk_pool_handle, sdk_wallet_trustee, txnPoolNodeSet, off_ledger_signature
):
auth_action = ADD_PREFIX
auth_type = NYM
field = ROLE
new_value = ENDORSER
constraint = {CONSTRAINT_ID: ConstraintsEnum.ROLE_CONSTRAINT_ID,
ROLE: "*",
SIG_COUNT: 1,
NEED_TO_BE_OWNER: False,
METADATA: {}}
if off_ledger_signature:
constraint[OFF_LEDGER_SIGNATURE] = off_ledger_signature
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=auth_action,
auth_type=auth_type,
field=field,
new_value=new_value,
constraint=constraint)
state = txnPoolNodeSet[0].db_manager.get_database(CONFIG_LEDGER_ID).state
key = generate_key(auth_action, auth_type, field, new_value)
path = config.make_state_path_for_auth_rule(StaticAuthRuleHelper.get_auth_key(key))
state_constraint = config_state_serializer.deserialize(state.get(path))
assert state_constraint == constraint
_, before_resp = sdk_send_and_check_get_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_type=auth_type,
auth_action=auth_action,
field=field,
new_value=new_value
)[0]
for rule in before_resp["result"][DATA]:
if rule[CONSTRAINT][CONSTRAINT_ID] == 'ROLE' and off_ledger_signature:
assert OFF_LEDGER_SIGNATURE in rule[CONSTRAINT]
|
py | b41021b852e80f7978ed46bd19aecf74aa707f49 | from Tkinter import *
from backend import Database
database=Database("books.db")
class Window(object):
def __init__(self,window):
self.window = window
self.window.wm_title("BookStore")
l1=Label(window,text="Title")
l1.grid(row=0,column=0)
l2=Label(window,text="Author")
l2.grid(row=0,column=2)
l3=Label(window,text="Year")
l3.grid(row=1,column=0)
l4=Label(window,text="ISBN")
l4.grid(row=1,column=2)
self.title_text=StringVar()
self.e1=Entry(window,textvariable=self.title_text)
self.e1.grid(row=0,column=1)
self.author_text=StringVar()
self.e2=Entry(window,textvariable=self.author_text)
self.e2.grid(row=0,column=3)
self.year_text=StringVar()
self.e3=Entry(window,textvariable=self.year_text)
self.e3.grid(row=1,column=1)
self.isbn_text=StringVar()
self.e4=Entry(window,textvariable=self.isbn_text)
self.e4.grid(row=1,column=3)
self.list1=Listbox(window, height=6,width=35)
self.list1.grid(row=2,column=0,rowspan=6,columnspan=2)
sb1=Scrollbar(window)
sb1.grid(row=2,column=2,rowspan=6)
self.list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=self.list1.yview)
self.list1.bind('<<ListboxSelect>>',self.get_selected_row)
b1=Button(window,text="View all", width=12,command=self.view_command)
b1.grid(row=2,column=3)
b2=Button(window,text="Search entry", width=12,command=self.search_command)
b2.grid(row=3,column=3)
b3=Button(window,text="Add entry", width=12,command=self.add_command)
b3.grid(row=4,column=3)
b4=Button(window,text="Update selected", width=12,command=self.update_command)
b4.grid(row=5,column=3)
b5=Button(window,text="Delete selected", width=12,command=self.delete_command)
b5.grid(row=6,column=3)
b6=Button(window,text="Close", width=12,command=window.destroy)
b6.grid(row=7,column=3)
def get_selected_row(self,event):
index=self.list1.curselection()[0]
self.selected_tuple=self.list1.get(index)
self.e1.delete(0,END)
self.e1.insert(END,self.selected_tuple[1])
self.e2.delete(0,END)
self.e2.insert(END,self.selected_tuple[2])
self.e3.delete(0,END)
self.e3.insert(END,self.selected_tuple[3])
self.e4.delete(0,END)
self.e4.insert(END,self.selected_tuple[4])
def view_command(self):
self.list1.delete(0,END)
for row in database.view():
self.list1.insert(END,row)
def search_command(self):
self.list1.delete(0,END)
for row in database.search(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get()):
self.list1.insert(END,row)
def add_command(self):
database.insert(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get())
self.list1.delete(0,END)
self.list1.insert(END,(self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get()))
def delete_command(self):
database.delete(self.selected_tuple[0])
def update_command(self):
database.update(self.selected_tuple[0],self.title_text.get(),self.author_text.get(),self.year_text.get(),self.isbn_text.get())
window=Tk()
Window(window)
window.mainloop()
|
py | b41022250a71172652f7d545a9168385f26a1425 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilties for V2 control flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
class CondBranchFuncGraph(function.FuncGraph):
"""FuncGraph for branches of tf.cond().
This is used to distinguish cond branches from other functions.
"""
pass
class WhileCondFuncGraph(function.FuncGraph):
"""FuncGraph for the condition of tf.while_loop().
This is used to distinguish while conditions from other functions.
"""
pass
class WhileBodyFuncGraph(function.FuncGraph):
"""FuncGraph for the body of tf.while_loop().
This is used to distinguish while bodies from other functions.
"""
pass
def in_defun():
"""Returns if the current graph is, or is nested in, a defun."""
if context.executing_eagerly(): return False
graph = ops.get_default_graph()
while (isinstance(graph, CondBranchFuncGraph) or
isinstance(graph, WhileBodyFuncGraph)):
graph = graph.outer_graph
return isinstance(graph, function.FuncGraph)
def create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: function.FuncGraph
Returns:
The name of the new TF_Function.
"""
func = function._EagerDefinedFunction( # pylint: disable=protected-access
func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {})
func.add_to_graph(func_graph.outer_graph)
return func_graph.name
def unique_fn_name(scope, name):
"""Returns a unique name to use for a control flow function.
Args:
scope: A name scope string.
name: An identifier for this function (e.g. "true", "body").
Returns:
A string, the name to use for the function.
"""
return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_")
def unique_grad_fn_name(forward_name):
return "%s_grad_%s" % (forward_name, ops.uid())
|
py | b4102266ca2a03d29c1c16c90b688865ce493b7a | from .mutator import RandomMutator
|
py | b4102462b1c46a05d2d4db3c845e7c7d8ffab13c | # -*- coding: utf-8 -*-
x = int(input())
for i in range(1, x + 1):
if (i % 2 != 0):
print(i) |
py | b41024a5a846acba3a00afc08a94d424971c3209 | """
Testing hard-coded decomposition of images
Image will decompose with a certain threshold according to the error map
How about that?
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import optimizer
import torchvision
import torchvision.transforms as transforms
import torchvision.utils
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
# from torchsummary import summary
import matplotlib.pyplot as plt
import os
import numpy as np
import argparse
import csv
import random
from tqdm import tqdm
from utils import *
from kpcn import *
from kpal import *
from multiscale import *
from decomp import *
from path import *
from losses import *
from dataset import MSDenoiseDataset, init_data
# from test_cython import *
# L = 9 # number of convolutional layers
# n_kernels = 100 # number of kernels in each layer
# kernel_size = 5 # size of kernel (square)
# # input_channels = dataset[0]['X_diff'].shape[-1]
# hidden_channels = 100
permutation = [0, 3, 1, 2]
eps = 0.00316
parser = argparse.ArgumentParser(description='Train the model')
'''
Needed parameters
1. Data & Model specifications
device : which device will the data & model should be loaded
mode : which kind of model should it train
input_channel : input channel
hidden_channel : hidden channel
num_layer : number of layers / depth of models
'''
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--mode', default='kpcn')
parser.add_argument('--num_layers', default=9, type=int)
parser.add_argument('--input_channels', default=34, type=int)
parser.add_argument('--hidden_channels', default=100, type=int)
parser.add_argument('--kernel_size', default=5, type=int)
parser.set_defaults(do_discrete=False)
parser.add_argument('--do_discrete', dest='do_discrete', action='store_true')
'''
2. Preprocessing specifications
eps
'''
parser.add_argument('--eps', default=0.00316, type=float)
'''
3. Training Specification
val : should it perform validation
early_stopping : should it perform early stopping
trainset : dataset for training
valset : dataset for validation
lr : learning rate
epoch : epoch
criterion : which loss function should it use
'''
parser.set_defaults(do_feature_dropout=False)
parser.add_argument('--do_feature_dropout', dest='do_feature_dropout', action='store_true')
parser.set_defaults(do_finetune=False)
parser.add_argument('--do_finetune', dest='do_finetune', action='store_true')
parser.add_argument('--use_llpm_buf', default=False, type=bool)
parser.set_defaults(do_val=False)
parser.add_argument('--do_val', dest='do_val', action='store_true')
parser.set_defaults(do_early_stopping=False)
parser.add_argument('--do_early_stopping', dest='do_early_stopping', action='store_true')
parser.add_argument('--data_dir')
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--loss', default='L1')
save_dir = 'kpcn_decomp_c1'
writer = SummaryWriter('kpcn/'+save_dir)
def validation(models, dataloader, eps, criterion, device, epoch, use_llpm_buf, mode='kpcn'):
pass
lossDiff1 = 0
lossSpec1 = 0
lossDiff2 = 0
lossSpec2 = 0
lossFinal = 0
relL2Final = 0
lossDiffPath = 0
lossSpecPath = 0
relL2 = RelativeMSE()
path_criterion = GlobalRelativeSimilarityLoss()
# for batch_idx, data in enumerate(dataloader):
batch_idx = 0
decompNet = models['decomp']
diffuseNet1, specularNet1, diffuseNet2, specularNet2 = models['diffuse1'].eval(), models['specular1'].eval(), models['diffuse2'].eval(), models['specular2'].eval()
diffPathNet, specPathNet = models['path_diffuse'].eval(), models['path_specular'].eval()
with torch.no_grad():
for batch in tqdm(dataloader, leave=False, ncols=70):
# Decompose image
# print(batch['kpcn_specular_in'].shape)
for k, v in batch.items():
batch[k] = v.to(device)
mask, batch1, batch2 = decompNet(batch)
# if use_llpm_buf:
paths = batch2['paths'].to(device)
p_buffer_diffuse, p_buffer_specular = diffPathNet(paths), specPathNet(paths)
'''Feature Disentanglement'''
#TODO
_, _, c, _, _ = p_buffer_diffuse.shape
assert c >= 2
# Variance
p_var_diffuse = p_buffer_diffuse.var(1).mean(1, keepdims=True)
p_var_diffuse /= p_buffer_diffuse.shape[1]
p_var_specular = p_buffer_specular.var(1).mean(1, keepdims=True)
p_var_specular /= p_buffer_specular.shape[1]
# make new batch
batch2 = {
'target_total': batch2['target_total'].to(device),
'target_diffuse': batch2['target_diffuse'].to(device),
'target_specular': batch2['target_specular'].to(device),
'kpcn_diffuse_in': torch.cat([batch2['kpcn_diffuse_in'].to(device), p_buffer_diffuse.mean(1), p_var_diffuse], 1),
'kpcn_specular_in': torch.cat([batch2['kpcn_specular_in'].to(device), p_buffer_specular.mean(1), p_var_specular], 1),
'kpcn_diffuse_buffer': batch2['kpcn_diffuse_buffer'].to(device),
'kpcn_specular_buffer': batch2['kpcn_specular_buffer'].to(device),
'kpcn_albedo': batch2['kpcn_albedo'].to(device),
}
# Denosing using only G-buffers
# inputs
X_diff1 = batch1['kpcn_diffuse_in'].to(device)
Y_diff1 = batch1['target_diffuse'].to(device)
X_spec1 = batch1['kpcn_specular_in'].to(device)
Y_spec1 = batch1['target_specular'].to(device)
outputDiff1 = diffuseNet1(X_diff1)
Y_diff1 = crop_like(Y_diff1, outputDiff1)
lossDiff1 += criterion(outputDiff1, Y_diff1).item()
outputSpec1 = specularNet1(X_spec1)
Y_spec1 = crop_like(Y_spec1, outputSpec1)
lossSpec1 += criterion(outputSpec1, Y_spec1).item()
# calculate final ground truth error
albedo = batch1['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff1)
outputFinal1 = outputDiff1 * (albedo + eps) + torch.exp(outputSpec1) - 1.0
# Denoising using G-buffers & P-buffers
# inputs
X_diff2 = batch2['kpcn_diffuse_in'].to(device)
Y_diff2 = batch2['target_diffuse'].to(device)
X_spec2 = batch2['kpcn_specular_in'].to(device)
Y_spec2 = batch2['target_specular'].to(device)
outputDiff2 = diffuseNet2(X_diff2)
Y_diff2 = crop_like(Y_diff2, outputDiff2)
lossDiff2 += criterion(outputDiff2, Y_diff2).item()
outputSpec2 = specularNet2(X_spec2)
Y_spec2 = crop_like(Y_spec2, outputSpec2)
lossSpec2 += criterion(outputSpec2, Y_spec2).item()
# calculate final ground truth error
albedo = batch2['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff2)
outputFinal2 = outputDiff2 * (albedo + eps) + torch.exp(outputSpec2) - 1.0
# Loss of merged denoised result
outputFinal = outputFinal1 + outputFinal2
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal += criterion(outputFinal, Y_final).item()
relL2Final += relL2(outputFinal, Y_final).item()
# if use_llpm_buf:
p_buffer_diffuse = crop_like(p_buffer_diffuse, outputDiff2)
loss_manif_diffuse = path_criterion(p_buffer_diffuse, Y_diff2)
p_buffer_specular = crop_like(p_buffer_specular, outputSpec2)
loss_manif_specular = path_criterion(p_buffer_specular, Y_spec2)
lossDiffPath += loss_manif_diffuse
lossSpecPath += loss_manif_specular
# lossDiff += 0.1 * loss_manif_diffuse
# lossSpec += 0.1 * loss_manif_specular
# visualize
if batch_idx == 20:
inputFinal = batch['kpcn_diffuse_buffer'] * (batch['kpcn_albedo'] + eps) + torch.exp(batch['kpcn_specular_buffer']) - 1.0
inputGrid = torchvision.utils.make_grid(inputFinal)
writer.add_image('noisy patches e{}'.format(epoch+1), inputGrid)
writer.add_image('noisy patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), inputGrid)
outputGrid = torchvision.utils.make_grid(outputFinal)
writer.add_image('denoised patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), outputGrid)
# writer.add_image('denoised patches e{}'.format(epoch+1), outputGrid)
cleanGrid = torchvision.utils.make_grid(Y_final)
# writer.add_image('clean patches e{}'.format(epoch+1), cleanGrid)
writer.add_image('clean patches e{}'.format(str(epoch+1)+'_'+str(batch_idx)), cleanGrid)
batch_idx += 1
return lossDiff1/(4*len(dataloader)), lossSpec1/(4*len(dataloader)), lossDiff2/(4*len(dataloader)), lossSpec2/(4*len(dataloader)), lossFinal/(4*len(dataloader)), relL2Final/(4*len(dataloader)), lossDiffPath/(4*len(dataloader)), lossSpecPath/(4*len(dataloader))
def train(mode,
device,
trainset,
validset,
eps,
L,
input_channels,
hidden_channels,
kernel_size,
epochs,
learning_rate,
loss,
do_early_stopping,
do_finetune,
use_llpm_buf,
do_discrete
):
dataloader = DataLoader(trainset, batch_size=8, num_workers=1, pin_memory=False)
print(len(dataloader))
if validset is not None:
validDataloader = DataLoader(validset, batch_size=4, num_workers=1, pin_memory=False)
# instantiate networks
print(L, input_channels, hidden_channels, kernel_size, mode)
print(mode)
# decompNet = decompModule(in_channel=26, discrete=do_discrete).to(device)
# optimizerDecomp = optim.Adam(decompNet.parameters(), lr=learning_rate, betas=(0.9, 0.99))
diffuseNet1 = KPCN(L, 34, hidden_channels, kernel_size).to(device)
specularNet1 = KPCN(L, 34, hidden_channels, kernel_size).to(device)
diffuseNet2 = KPCN(L, input_channels, hidden_channels, kernel_size).to(device)
specularNet2 = KPCN(L, input_channels, hidden_channels, kernel_size).to(device)
print('LEARNING RATE : {}'.format(learning_rate))
optimizerDiff1 = optim.Adam(diffuseNet1.parameters(), lr=learning_rate, betas=(0.9, 0.99))
optimizerSpec1 = optim.Adam(specularNet1.parameters(), lr=learning_rate, betas=(0.9, 0.99))
optimizerDiff2 = optim.Adam(diffuseNet2.parameters(), lr=learning_rate, betas=(0.9, 0.99))
optimizerSpec2 = optim.Adam(specularNet2.parameters(), lr=learning_rate, betas=(0.9, 0.99))
diffPathNet = PathNet(trainset.pnet_in_size).to(device)
optimizerDiffPath = optim.Adam(diffPathNet.parameters(), lr=1e-4, betas=(0.9, 0.99))
specPathNet = PathNet(trainset.pnet_in_size).to(device)
optimizerSpecPath = optim.Adam(specPathNet.parameters(), lr=1e-4, betas=(0.9, 0.99))
path_criterion = GlobalRelativeSimilarityLoss()
# checkpointDiffPath = torch.load('trained_model/kpcn_decomp_3/path_diff_e5.pt')
# diffPathNet.load_state_dict(checkpointDiffPath['model_state_dict'])
# optimizerDiffPath.load_state_dict(checkpointDiffPath['optimizer_state_dict'])
diffPathNet.train()
# checkpointSpecPath = torch.load('trained_model/kpcn_decomp_3/path_spec_e5.pt')
# specPathNet.load_state_dict(checkpointSpecPath['model_state_dict'])
# optimizerSpecPath.load_state_dict(checkpointSpecPath['optimizer_state_dict'])
specPathNet.train()
# else
print(diffuseNet1, "CUDA:", next(diffuseNet1.parameters()).device)
print(diffPathNet, "CUDA:", next(diffPathNet.parameters()).device)
print('# Parameter for KPCN : {}'.format(sum([p.numel() for p in diffuseNet1.parameters()])))
print('# Parameter for PathNet : {}'.format(sum([p.numel() for p in diffPathNet.parameters()])))
# print(summary(diffuseNet, input_size=(3, 128, 128)))
if loss == 'L1':
criterion = nn.L1Loss()
elif loss =='SMAPE':
criterion = SMAPE()
else:
print('Loss Not Supported')
return
# optimizerP = optim.Adam(specularNet.parameters(), lr=1e-4, betas=(0.9, 0.99))
# checkpointDiff1 = torch.load('trained_model/kpcn_decomp_3/diff1_e5.pt')
# diffuseNet1.load_state_dict(checkpointDiff1['model_state_dict'])
# optimizerDiff1.load_state_dict(checkpointDiff1['optimizer_state_dict'])
diffuseNet1.train()
# checkpointSpec1 = torch.load('trained_model/kpcn_decomp_3/spec1_e5.pt')
# specularNet1.load_state_dict(checkpointSpec1['model_state_dict'])
# optimizerSpec1.load_state_dict(checkpointSpec1['optimizer_state_dict'])
specularNet1.train()
# checkpointDiff2 = torch.load('trained_model/kpcn_decomp_3/diff2_e5.pt')
# diffuseNet2.load_state_dict(checkpointDiff2['model_state_dict'])
# optimizerDiff2.load_state_dict(checkpointDiff2['optimizer_state_dict'])
diffuseNet2.train()
# checkpointSpec2 = torch.load('trained_model/kpcn_decomp_3/spec2_e5.pt')
# specularNet2.load_state_dict(checkpointSpec2['model_state_dict'])
# optimizerSpec2.load_state_dict(checkpointSpec2['optimizer_state_dict'])
specularNet2.train()
# pNet.train()
accuLossDiff1 = 0
accuLossSpec1 = 0
accuLossDiff2 = 0
accuLossSpec2 = 0
accuLossFinal = 0
lDiff = []
lSpec = []
lFinal = []
valLDiff = []
valLSpec = []
valLFinal = []
# writer = SummaryWriter('runs/'+mode+'_2')
total_epoch = 0
init_epoch = 0
# init_epoch = checkpointDiff1['epoch'] + 1
# epoch = 0
if init_epoch == 0:
print('Check Initialization')
models = {
'diffuse1': diffuseNet1,
'specular1': specularNet1,
'diffuse2': diffuseNet2,
'specular2': specularNet2,
'path_diffuse': diffPathNet,
'path_specular': specPathNet
}
initLossDiff1, initLossSpec1, initLossDiff2, initLossSpec2, initLossFinal, relL2LossFinal, pathDiffLoss, pathSpecLoss = validation(models, validDataloader, eps, criterion, device, -1, use_llpm_buf,mode)
print("initLossDiff1: {}".format(initLossDiff1))
print("initLossSpec1: {}".format(initLossSpec1))
print("initLossFinal: {}".format(initLossFinal))
print("relL2LossFinal: {}".format(relL2LossFinal))
print("pathDiffLoss: {}".format(pathDiffLoss))
print("pathSpecLoss: {}".format(pathSpecLoss))
writer.add_scalar('Valid total relL2 loss', relL2LossFinal if relL2LossFinal != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid total loss', initLossFinal if initLossFinal != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid diffuse loss 1', initLossDiff1 if initLossDiff1 != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid specular loss 1', initLossSpec1 if initLossSpec1 != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid diffuse loss 2', initLossDiff2 if initLossDiff2 != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid specular loss 2', initLossSpec2 if initLossSpec2 != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid path diffuse loss', pathDiffLoss if pathDiffLoss != float('inf') else 0, (init_epoch + 1))
writer.add_scalar('Valid path specular loss', pathSpecLoss if pathSpecLoss != float('inf') else 0, (init_epoch + 1))
import time
start = time.time()
print('START')
for epoch in range(init_epoch, epochs):
print('EPOCH {}'.format(epoch+1))
# decompNet.train()
diffuseNet1.train()
specularNet1.train()
diffuseNet2.train()
specularNet2.train()
diffPathNet.train()
specPathNet.train()
i_batch = -1
for batch in tqdm(dataloader, leave=False, ncols=70):
i_batch += 1
# print(batch['kpcn_specular_in'].shape)
# print('DECOMPOSITION')
for k, v in batch.items():
batch[k] = v.to(device)
loss_manif = None
paths = batch2['paths'].to(device)
p_buffer_diffuse, p_buffer_specular = diffPathNet(paths), specPathNet(paths)
'''Feature Disentanglement'''
#TODO
_, _, c, _, _ = p_buffer_diffuse.shape
assert c >= 2
# Variance
p_var_diffuse = p_buffer_diffuse.var(1).mean(1, keepdims=True)
p_var_diffuse /= p_buffer_diffuse.shape[1]
p_var_specular = p_buffer_specular.var(1).mean(1, keepdims=True)
p_var_specular /= p_buffer_specular.shape[1]
# make new batch
batch2 = {
'target_total': batch2['target_total'].to(device),
'target_diffuse': batch2['target_diffuse'].to(device),
'target_specular': batch2['target_specular'].to(device),
'kpcn_diffuse_in': torch.cat([batch2['kpcn_diffuse_in'].to(device), p_buffer_diffuse.mean(1), p_var_diffuse], 1),
'kpcn_specular_in': torch.cat([batch2['kpcn_specular_in'].to(device), p_buffer_specular.mean(1), p_var_specular], 1),
'kpcn_diffuse_buffer': batch2['kpcn_diffuse_buffer'].to(device),
'kpcn_specular_buffer': batch2['kpcn_specular_buffer'].to(device),
'kpcn_albedo': batch2['kpcn_albedo'].to(device),
}
# zero the parameter gradients
optimizerDiff1.zero_grad()
optimizerSpec1.zero_grad()
optimizerDiff2.zero_grad()
optimizerSpec2.zero_grad()
optimizerDiffPath.zero_grad()
optimizerSpecPath.zero_grad()
# Denosing using only G-buffers
# inputs
X_diff1 = batch1['kpcn_diffuse_in'].to(device)
Y_diff1 = batch1['target_diffuse'].to(device)
X_spec1 = batch1['kpcn_specular_in'].to(device)
Y_spec1 = batch1['target_specular'].to(device)
outputDiff1 = diffuseNet1(X_diff1)
Y_diff1 = crop_like(Y_diff1, outputDiff1)
lossDiff1 = criterion(outputDiff1, Y_diff1)
outputSpec1 = specularNet1(X_spec1)
Y_spec1 = crop_like(Y_spec1, outputSpec1)
lossSpec1 = criterion(outputSpec1, Y_spec1)
# calculate final ground truth error
albedo = batch1['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff1)
outputFinal1 = outputDiff1 * (albedo + eps) + torch.exp(outputSpec1) - 1.0
# Denoising using G-buffers & P-buffers
# inputs
X_diff2 = batch2['kpcn_diffuse_in'].to(device)
Y_diff2 = batch2['target_diffuse'].to(device)
X_spec2 = batch2['kpcn_specular_in'].to(device)
Y_spec2 = batch2['target_specular'].to(device)
outputDiff2 = diffuseNet2(X_diff2)
Y_diff2 = crop_like(Y_diff2, outputDiff2)
lossDiff2 = criterion(outputDiff2, Y_diff2)
outputSpec2 = specularNet2(X_spec2)
Y_spec2 = crop_like(Y_spec2, outputSpec2)
lossSpec2 = criterion(outputSpec2, Y_spec2)
# calculate final ground truth error
albedo = batch2['kpcn_albedo'].to(device)
albedo = crop_like(albedo, outputDiff2)
outputFinal2 = outputDiff2 * (albedo + eps) + torch.exp(outputSpec2) - 1.0
p_buffer_diffuse = crop_like(p_buffer_diffuse, outputDiff2)
loss_manif_diffuse = path_criterion(p_buffer_diffuse, Y_diff2)
p_buffer_specular = crop_like(p_buffer_specular, outputSpec2)
loss_manif_specular = path_criterion(p_buffer_specular, Y_spec2)
lossDiff2 += 0.1 * loss_manif_diffuse
lossSpec2 += 0.1 * loss_manif_specular
if not do_finetune:
lossDiff1.backward()
optimizerDiff1.step()
lossSpec1.backward()
optimizerSpec1.step()
lossDiff2.backward()
optimizerDiff2.step()
lossSpec2.backward()
optimizerSpec2.step()
optimizerDiffPath.step()
optimizerSpecPath.step()
optimizerDecomp.step()
# Loss of merged denoised result
with torch.no_grad():
outputFinal = outputFinal1 + outputFinal2
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal = criterion(outputFinal, Y_final)
# relL2Final = relL2(outputFinal, Y_final).item()
if do_finetune:
# print('FINETUNING')
outputFinal = outputFinal1 + outputFinal2
Y_final = batch['target_total'].to(device)
Y_final = crop_like(Y_final, outputFinal)
lossFinal = criterion(outputFinal, Y_final)
lossFinal.backward()
optimizerDiff1.step()
optimizerSpec1.step()
optimizerDiff2.step()
optimizerSpec2.step()
optimizerDiffPath.step()
optimizerSpecPath.step()
accuLossDiff1 += lossDiff1.item()
accuLossSpec1 += lossSpec1.item()
accuLossDiff2 += lossDiff2.item()
accuLossSpec2 += lossSpec2.item()
accuLossFinal += lossFinal.item()
writer.add_scalar('lossFinal', lossFinal if lossFinal != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossDiff1', lossDiff1 if lossDiff1 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossSpec1', lossSpec1 if lossSpec1 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossDiff2', lossDiff2 if lossDiff2 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('lossSpec2', lossSpec2 if lossSpec2 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
accuLossDiff1, accuLossSpec1, accuLossDiff2, accuLossSpec2, accuLossFinal = accuLossDiff1/(8*len(dataloader)), accuLossSpec1/(8*len(dataloader)), accuLossDiff2/(8*len(dataloader)), accuLossSpec2/(8*len(dataloader)), accuLossFinal/(8*len(dataloader))
writer.add_scalar('Train total loss', accuLossFinal if accuLossFinal != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train diffuse loss 1', accuLossDiff1 if accuLossDiff1 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train specular loss 1', accuLossSpec1 if accuLossSpec1 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train diffuse loss 2', accuLossDiff2 if accuLossDiff2 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
writer.add_scalar('Train specular loss 2', accuLossSpec2 if accuLossSpec2 != float('inf') else 1e+35, epoch * len(dataloader) + i_batch)
if not os.path.exists('trained_model/' + save_dir):
os.makedirs('trained_model/' + save_dir)
print('MAKE DIR {}'.format('trained_model/'+save_dir))
torch.save({
'epoch': epoch,
'model_state_dict': decompNet.state_dict(),
'optimizer_state_dict': optimizerDecomp.state_dict(),
}, 'trained_model/'+ save_dir + '/decomp_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': diffuseNet1.state_dict(),
'optimizer_state_dict': optimizerDiff1.state_dict(),
}, 'trained_model/'+ save_dir + '/diff1_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': specularNet1.state_dict(),
'optimizer_state_dict': optimizerSpec1.state_dict(),
}, 'trained_model/'+ save_dir + '/spec1_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': diffuseNet2.state_dict(),
'optimizer_state_dict': optimizerDiff2.state_dict(),
}, 'trained_model/'+ save_dir + '/diff2_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': specularNet2.state_dict(),
'optimizer_state_dict': optimizerSpec2.state_dict(),
}, 'trained_model/'+ save_dir + '/spec2_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': diffPathNet.state_dict(),
'optimizer_state_dict': optimizerDiffPath.state_dict(),
}, 'trained_model/'+ save_dir + '/path_diff_e{}.pt'.format(epoch+1))
torch.save({
'epoch': epoch,
'model_state_dict': specPathNet.state_dict(),
'optimizer_state_dict': optimizerSpecPath.state_dict(),
}, 'trained_model/'+ save_dir + '/path_spec_e{}.pt'.format(epoch+1))
# print('VALIDATION WORKING!')
models = {'decomp': decompNet,
'diffuse1': diffuseNet1,
'specular1': specularNet1,
'diffuse2': diffuseNet2,
'specular2': specularNet2,
'path_diffuse': diffPathNet,
'path_specular': specPathNet
}
validLossDiff1, validLossSpec1, validLossDiff2, validLossSpec2, validLossFinal, relL2LossFinal, pathDiffLoss, pathSpecLoss = validation(models, validDataloader, eps, criterion, device, epoch, use_llpm_buf,mode)
writer.add_scalar('Valid total relL2 loss', relL2LossFinal if relL2LossFinal != float('inf') else 1e+35, (epoch + 1))
writer.add_scalar('Valid total loss', validLossFinal if accuLossFinal != float('inf') else 1e+35, (epoch + 1))
writer.add_scalar('Valid diffuse loss 1', validLossDiff1 if validLossDiff1 != float('inf') else 0, (epoch + 1))
writer.add_scalar('Valid specular loss 1', validLossSpec1 if validLossSpec1 != float('inf') else 0, (epoch + 1))
writer.add_scalar('Valid diffuse loss 2', validLossDiff2 if validLossDiff2 != float('inf') else 0, (epoch + 1))
writer.add_scalar('Valid specular loss 2', validLossSpec2 if validLossSpec2 != float('inf') else 0, (epoch + 1))
writer.add_scalar('Valid path diffuse loss', pathDiffLoss if pathDiffLoss != float('inf') else 0, (epoch + 1))
writer.add_scalar('Valid path specular loss', pathSpecLoss if pathSpecLoss != float('inf') else 0, (epoch + 1))
print("Epoch {}".format(epoch + 1))
print("ValidLossDiff1: {}".format(validLossDiff1))
print("ValidLossSpec1: {}".format(validLossSpec1))
print("ValidLossDiff2: {}".format(validLossDiff2))
print("ValidLossSpec2: {}".format(validLossSpec2))
print("ValidLossFinal: {}".format(validLossFinal))
print("ValidrelL2LossDiff: {}".format(relL2LossFinal))
print("pathDiffLoss: {}".format(pathDiffLoss))
print("pathSpecLoss: {}".format(pathSpecLoss))
# lDiff.append(accuLossDiff)
# lSpec.append(accuLossSpec)
# lFinal.append(accuLossFinal)
# valLDiff.append(validLossDiff)
# valLSpec.append(validLossSpec)
# valLFinal.append(validLossFinal)
# if not os.path.exists('trained_model/' + save_dir):
# os.makedirs('trained_model/' + save_dir)
# print('MAKE DIR {}'.format('trained_model/'+save_dir))
# # torch.save(diffuseNet.state_dict(), 'trained_model/'+ save_dir + '/diff_e{}.pt'.format(epoch+1))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': diffuseNet.state_dict(),
# 'optimizer_state_dict': optimizerDiff.state_dict(),
# }, 'trained_model/'+ save_dir + '/diff_e{}.pt'.format(epoch+1))
# # torch.save(specularNet.state_dict(), 'trained_model/' + save_dir + '/spec_e{}.pt'.format(epoch+1))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': specularNet.state_dict(),
# 'optimizer_state_dict': optimizerSpec.state_dict(),
# }, 'trained_model/'+ save_dir + '/spec_e{}.pt'.format(epoch+1))
print('SAVED {}/diff_e{}, {}/spec_e{}'.format(save_dir, epoch+1, save_dir, epoch+1))
total_epoch += 1
if do_early_stopping and len(valLFinal) > 10 and valLFinal[-1] >= valLFinal[-2]:
print('EARLY STOPPING!')
break
accuLossDiff = 0
accuLossSpec = 0
accuLossFinal = 0
writer.close()
print('Finished training in mode, {} with epoch {}'.format(mode, total_epoch))
print('Took', time.time() - start, 'seconds.')
# return diffuseNet, specularNet, lDiff, lSpec, lFinal
def main():
args = parser.parse_args()
print(args)
dataset, dataloader = init_data(args)
print(len(dataset['train']), len(dataloader['train']))
# trainset, validset = dataloader['train'], dataloader['val']
trainset, validset = dataset['train'], dataset['val']
print(trainset, validset)
input_channels = dataset['train'].dncnn_in_size
train(
args.mode,
args.device,
trainset,
validset,
eps,
args.num_layers,
input_channels,
args.hidden_channels,
args.kernel_size,
args.epochs,
args.lr,
args.loss,
args.do_early_stopping,
args.do_finetune,
args.use_llpm_buf,
args.do_discrete
)
if __name__ == '__main__':
main() |
py | b41026138598bc64858e9d9f19e6c8a44bb379c5 |
def prob_8(altura):
cont = altura
resultado = ""
while cont > 0:
for i in range(0, altura):
resultado += (" "*(altura-i-1) + "* " * (i+1) + "\n")
cont = cont - 1
return resultado |
py | b41026202c7d6d60283f8c9389cb113eed246567 | from __future__ import absolute_import, unicode_literals
from stravalib import model
from stravalib import unithelper as uh
from stravalib.tests import TestBase
from units.quantity import Quantity
class ModelTest(TestBase):
def setUp(self):
super(ModelTest, self).setUp()
def test_entity_collections(self):
a = model.Athlete()
d = {'clubs': [{'resource_state': 2, 'id': 7, 'name': 'Team Roaring Mouse'},
{'resource_state': 2, 'id': 1, 'name': 'Team Strava Cycling'},
{'resource_state': 2, 'id': 34444, 'name': 'Team Strava Cyclocross'}]
}
a.from_dict(d)
self.assertEquals(3, len(a.clubs))
self.assertEquals('Team Roaring Mouse', a.clubs[0].name)
def test_speed_units(self):
a = model.Activity()
a.max_speed = 1000 # m/s
a.average_speed = 1000 # m/s
self.assertEquals(3600.0, float(uh.kph(a.max_speed)))
self.assertEquals(3600.0, float(uh.kph(a.average_speed)))
a.max_speed = uh.mph(1.0)
#print repr(a.max_speed)
self.assertAlmostEqual(1.61, float(uh.kph(a.max_speed)), places=2)
def test_time_intervals(self):
segment = model.Segment()
# s.pr_time = XXXX
split = model.Split()
split.moving_time = 3.1
split.elapsed_time = 5.73
def test_distance_units(self):
# Gear
g = model.Gear()
g.distance = 1000
self.assertEquals(1.0, float(uh.kilometers(g.distance)))
# Metric Split
split = model.Split()
split.distance = 1000 # meters
split.elevation_difference = 1000 # meters
self.assertIsInstance(split.distance, Quantity)
self.assertIsInstance(split.elevation_difference, Quantity)
self.assertEquals(1.0, float(uh.kilometers(split.distance)))
self.assertEquals(1.0, float(uh.kilometers(split.elevation_difference)))
split = None
# Segment
s = model.Segment()
s.distance = 1000
s.elevation_high = 2000
s.elevation_low = 1000
s.pr_distance = 1000
self.assertIsInstance(s.distance, Quantity)
self.assertIsInstance(s.elevation_high, Quantity)
self.assertIsInstance(s.elevation_low, Quantity)
self.assertEquals(1.0, float(uh.kilometers(s.distance)))
self.assertEquals(2.0, float(uh.kilometers(s.elevation_high)))
self.assertEquals(1.0, float(uh.kilometers(s.elevation_low)))
self.assertEquals(1.0, float(uh.kilometers(s.pr_distance)))
# Activity
a = model.Activity()
a.distance = 1000 # m
a.total_elevation_gain = 1000 # m
self.assertIsInstance(a.distance, Quantity)
self.assertIsInstance(a.total_elevation_gain, Quantity)
self.assertEquals(1.0, float(uh.kilometers(a.distance)))
self.assertEquals(1.0, float(uh.kilometers(a.total_elevation_gain)))
def test_weight_units(self):
"""
"""
# PowerActivityZone |
py | b410269dbabe532d725bf4be6d4e1d373061e3aa | import numpy as np
class CovError(Exception):
"""Raised when the number of covariance matrix terms are incorrect.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
def log_likelihood(calibrationData, prediction, numExperiments, covarianceMatrixList, edpNamesList, edpLengthsList,
covarianceMultiplierList, scaleFactors, shiftFactors):
""" Compute the log-likelihood
:param calibrationData: Calibration data consisting of the measured values of response. Each row contains the data
from one experiment. The length of each row equals the sum of the lengths of all response quantities.
:type calibrationData: numpy ndarray (atleast_2d)
:param prediction: Prediction of the response from the model, evaluated using the parameter values for
which the log-likelihood function needs to be calculated.
:type prediction: numpy ndarray (atleast_2d)
:param numExperiments: Number of experiments from which data is available, this is equal to the number of rows
(i.e., the first index) of the calibration data array
:type numExperiments: int
:param covarianceMatrixList: A list of length numExperiments * numResponses, where each item in the list contains
the covariacne matrix or variance value corresponding to that experiment and response quantity
:type covarianceMatrixList: list of numpy ndarrays
:param edpNamesList: A list containing the names of the response quantities
:type edpNamesList: list of strings
:param edpLengthsList: A list containing the length of each response quantity
:type edpLengthsList: list of ints
:param covarianceMultiplierList: A list containing the covariance matrices or variance values. The length of this
list is equal to the product of the number of experiments and the number of response quantities.
:type covarianceMultiplierList: list of numpy ndarrays
:param scaleFactors: A list containing the normalizing factors used to scale (i.e. divide) the model
prediction values. The length of this list is equal to the number of response quantities.
:type scaleFactors: list of ints
:param shiftFactors: A list containing the values used to shift the prediction values. The locShift values are 0.0,
unless the abs max of the data of that response quantity is 0. In this case, the locShift = 1.0. LocShift values
must be added to the response quantities since they are added to the data. The length of this list is equal to the
number of response quantities.
:type shiftFactors: list of ints
:return: loglikelihood. This is a scalar value, which is equal to the logpdf of a zero-mean multivariate normal
distribution and a user-supplied covariance structure. Block-diagonal covariance structures are supported. The value
of multipliers on the covariance block corresponding to each response quantity is also calibrated.
:rtype: float
"""
# Check if the correct number of covariance terms has been passed in
numResponses = len(edpLengthsList)
if len(covarianceMatrixList) != numExperiments * numResponses:
print("ERROR: The expected number of covariance matrices is {}, but only {} were passed "
"in.".format(numExperiments * numResponses, len(covarianceMatrixList)))
raise CovError("ERROR: The expected number of covariance matrices is {}, but only {} were passed "
"in.".format(numExperiments * numResponses, len(covarianceMatrixList)))
# Shift and normalize the prediction
currentPosition = 0
for j in range(len(edpLengthsList)):
prediction[:, currentPosition:currentPosition + edpLengthsList[j]] += shiftFactors[j]
prediction[:, currentPosition:currentPosition + edpLengthsList[j]] /= scaleFactors[j]
currentPosition += edpLengthsList[j]
# Compute the normalized residuals
allResiduals = prediction - calibrationData
# Loop over the normalized residuals to compute the log-likelihood
loglike = 0
covListIndex = 0
for i in range(numExperiments):
currentPosition = 0
for j in range(numResponses):
# Get the residuals corresponding to this response variable
length = edpLengthsList[j]
residuals = allResiduals[i, currentPosition:currentPosition + length]
currentPosition += length
# Get the covariance matrix corresponding to this response variable
cov = np.atleast_2d(covarianceMatrixList[covListIndex])
covListIndex += 1
# Multiply the covariance matrix by the value of the covariance multiplier
cov = cov * covarianceMultiplierList[j]
if np.shape(cov)[0] == np.shape(cov)[1] == 1:
# If there is a single variance value that is constant for all residual terms, then this is the case of
# having a sample of i.i.d. zero-mean normally distributed observations, and the log-likelihood can be
# computed more efficiently
var = cov[0][0]
sig = np.sqrt(var)
ll = -length * np.log(sig) - length / 2 * np.log(2 * np.pi) - 1 / (2 * var) * np.sum(residuals ** 2)
else:
if np.shape(cov)[0] != np.shape(cov)[1]:
cov = np.diag(cov.flatten())
# The multivariate normal log-pdf is made up of three terms:
# logpdf = -1/2*[(d*log(2*pi)) + (log(abs(det(cov)))) + (residual.T * inverse(cov) * residual) i.e.,
# Mahalanobis distance]
# = -1/2*[t1 + t2 + t3]
t1 = length * np.log(2 * np.pi)
eigenValues, eigenVectors = np.linalg.eigh(cov)
logdet = np.sum(np.log(eigenValues))
eigenValuesReciprocal = 1. / eigenValues
z = eigenVectors * np.sqrt(eigenValuesReciprocal)
mahalanobisDistance = np.square(np.dot(residuals, z)).sum()
ll = -0.5 * (t1 + logdet + mahalanobisDistance)
if not np.isnan(ll):
loglike += ll
else:
loglike += -np.inf
return loglike
|
py | b41027eb15471ab789e0504b6044295aedb5ab8a | """
File utilities.
"""
import os
from project.enums import file_extensions_enum
from project.enums import file_type_enum
from flask import current_app
def get_file_extension(path: str) -> str:
"""
Get file extension from path.
"""
split = os.path.splitext(path)
if not len(split):
return ''
extension = split[-1].lower()
return extension.replace('.', '')
def get_file_name(path: str) -> str:
"""
Get the file name with extension.
"""
return os.path.basename(path)
def get_file_dir(path: str) -> str:
"""
Get the dirname of the file.
"""
return os.path.dirname(path)
def get_file_name_without_extension(path: str) -> str:
"""
Get the file name without extension.
"""
return os.path.splitext(path)[0]
def get_file_type(path: str) -> str:
"""
Get file type by file extension.
"""
extension = get_file_extension(path)
if extension in file_extensions_enum.IMAGE_FILE:
return file_type_enum.IMAGE
elif extension in file_extensions_enum.VIDEO_FILE:
return file_type_enum.VIDEO
return file_type_enum.FILE
def append_index_to_file_name(path: str, index: int) -> str:
"""
Append index to file name.
"""
dir_name = get_file_dir(path)
file_name = get_file_name(path)
extension = get_file_extension(file_name)
file_name_without_ext = get_file_name_without_extension(file_name)
file_name_without_ext += '_' + str(index)
current_app.logger.info(os.path.join(
dir_name,
file_name_without_ext + '.' + extension
))
return os.path.join(
dir_name,
file_name_without_ext + '.' + extension
)
|
py | b41028274d2120b858683fa491bc5f1c7d457af5 | # -*- coding: utf-8 -*-
import traceback
from enum import IntEnum
from typing import Sequence, Optional
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QMenu, QHBoxLayout, QLabel, QVBoxLayout, QGridLayout, QLineEdit,
QPushButton, QAbstractItemView)
from PyQt5.QtGui import QFont, QStandardItem, QBrush
from electrum_dsv.util import bh2u, NotEnoughFunds, NoDynamicFeeEstimates
from electrum_dsv.i18n import _
from electrum_dsv.lnchannel import AbstractChannel, PeerState
from electrum_dsv.wallet import Abstract_Wallet
from electrum_dsv.lnutil import LOCAL, REMOTE, format_short_channel_id, LN_MAX_FUNDING_SAT
from electrum_dsv.lnworker import LNWallet
from .util import (MyTreeView, WindowModalDialog, Buttons, OkButton, CancelButton,
EnterButton, WaitingDialog, MONOSPACE_FONT, ColorScheme)
from .amountedit import BTCAmountEdit, FreezableLineEdit
ROLE_CHANNEL_ID = Qt.UserRole
class ChannelsList(MyTreeView):
update_rows = QtCore.pyqtSignal(Abstract_Wallet)
update_single_row = QtCore.pyqtSignal(Abstract_Wallet, AbstractChannel)
gossip_db_loaded = QtCore.pyqtSignal()
class Columns(IntEnum):
SHORT_CHANID = 0
NODE_ALIAS = 1
LOCAL_BALANCE = 2
REMOTE_BALANCE = 3
CHANNEL_STATUS = 4
headers = {
Columns.SHORT_CHANID: _('Short Channel ID'),
Columns.NODE_ALIAS: _('Node alias'),
Columns.LOCAL_BALANCE: _('Local'),
Columns.REMOTE_BALANCE: _('Remote'),
Columns.CHANNEL_STATUS: _('Status'),
}
filter_columns = [
Columns.SHORT_CHANID,
Columns.NODE_ALIAS,
Columns.CHANNEL_STATUS,
]
_default_item_bg_brush = None # type: Optional[QBrush]
def __init__(self, parent):
super().__init__(parent, self.create_menu, stretch_column=self.Columns.NODE_ALIAS,
editable_columns=[])
self.setModel(QtGui.QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.main_window = parent
self.gossip_db_loaded.connect(self.on_gossip_db)
self.update_rows.connect(self.do_update_rows)
self.update_single_row.connect(self.do_update_single_row)
self.network = self.parent.network
self.lnworker = self.parent.wallet.lnworker
self.lnbackups = self.parent.wallet.lnbackups
self.setSortingEnabled(True)
def format_fields(self, chan):
labels = {}
for subject in (REMOTE, LOCAL):
bal_minus_htlcs = chan.balance_minus_outgoing_htlcs(subject)//1000
label = self.parent.format_amount(bal_minus_htlcs)
other = subject.inverted()
bal_other = chan.balance(other)//1000
bal_minus_htlcs_other = chan.balance_minus_outgoing_htlcs(other)//1000
if bal_other != bal_minus_htlcs_other:
label += ' (+' + self.parent.format_amount(bal_other - bal_minus_htlcs_other) + ')'
labels[subject] = label
status = chan.get_state_for_GUI()
closed = chan.is_closed()
node_alias = self.lnworker.get_node_alias(chan.node_id)
return [
chan.short_id_for_GUI(),
node_alias,
'' if closed else labels[LOCAL],
'' if closed else labels[REMOTE],
status
]
def on_success(self, txid):
self.main_window.show_error('Channel closed' + '\n' + txid)
def on_failure(self, exc_info):
type_, e, tb = exc_info
traceback.print_tb(tb)
self.main_window.show_error('Failed to close channel:\n{}'.format(repr(e)))
def close_channel(self, channel_id):
msg = _('Close channel?')
if not self.parent.question(msg):
return
def task():
coro = self.lnworker.close_channel(channel_id)
return self.network.run_from_another_thread(coro)
WaitingDialog(self, 'please wait..', task, self.on_success, self.on_failure)
def force_close(self, channel_id):
chan = self.lnworker.channels[channel_id]
to_self_delay = chan.config[REMOTE].to_self_delay
msg = _('Force-close channel?') + '\n\n'\
+ _('Funds retrieved from this channel will not be available before {} blocks after forced closure.').format(to_self_delay) + ' '\
+ _('After that delay, funds will be sent to an address derived from your wallet seed.') + '\n\n'\
+ _('In the meantime, channel funds will not be recoverable from your seed, and might be lost if you lose your wallet.') + ' '\
+ _('To prevent that, you should have a backup of this channel on another device.')
if self.parent.question(msg):
def task():
coro = self.lnworker.force_close_channel(channel_id)
return self.network.run_from_another_thread(coro)
WaitingDialog(self, 'please wait..', task, self.on_success, self.on_failure)
def remove_channel(self, channel_id):
if self.main_window.question(_('Are you sure you want to delete this channel? This will purge associated transactions from your wallet history.')):
self.lnworker.remove_channel(channel_id)
def remove_channel_backup(self, channel_id):
if self.main_window.question(_('Remove channel backup?')):
self.lnbackups.remove_channel_backup(channel_id)
def export_channel_backup(self, channel_id):
msg = ' '.join([
_("Channel backups can be imported in another instance of the same wallet, by scanning this QR code."),
_("Please note that channel backups cannot be used to restore your channels."),
_("If you lose your wallet file, the only thing you can do with a backup is to request your channel to be closed, so that your funds will be sent on-chain."),
])
data = self.lnworker.export_channel_backup(channel_id)
self.main_window.show_qrcode(data, 'channel backup', help_text=msg,
show_copy_text_btn=True)
def request_force_close(self, channel_id):
def task():
coro = self.lnbackups.request_force_close(channel_id)
return self.network.run_from_another_thread(coro)
def on_success(b):
self.main_window.show_message('success')
WaitingDialog(self, 'please wait..', task, on_success, self.on_failure)
def create_menu(self, position):
menu = QMenu()
menu.setSeparatorsCollapsible(True) # consecutive separators are merged together
selected = self.selected_in_column(self.Columns.NODE_ALIAS)
if not selected:
menu.addAction(_("Import channel backup"), lambda: self.parent.do_process_from_text_channel_backup())
menu.exec_(self.viewport().mapToGlobal(position))
return
multi_select = len(selected) > 1
if multi_select:
return
idx = self.indexAt(position)
if not idx.isValid():
return
item = self.model().itemFromIndex(idx)
if not item:
return
channel_id = idx.sibling(idx.row(), self.Columns.NODE_ALIAS).data(ROLE_CHANNEL_ID)
if channel_id in self.lnbackups.channel_backups:
menu.addAction(_("Request force-close"), lambda: self.request_force_close(channel_id))
menu.addAction(_("Delete"), lambda: self.remove_channel_backup(channel_id))
menu.exec_(self.viewport().mapToGlobal(position))
return
chan = self.lnworker.channels[channel_id]
menu.addAction(_("Details..."), lambda: self.parent.show_channel(channel_id))
cc = self.add_copy_menu(menu, idx)
cc.addAction(_("Node ID"), lambda: self.place_text_on_clipboard(
chan.node_id.hex(), title=_("Node ID")))
cc.addAction(_("Long Channel ID"), lambda: self.place_text_on_clipboard(
channel_id.hex(), title=_("Long Channel ID")))
if not chan.is_closed():
if not chan.is_frozen_for_sending():
menu.addAction(_("Freeze (for sending)"), lambda: chan.set_frozen_for_sending(True))
else:
menu.addAction(_("Unfreeze (for sending)"), lambda: chan.set_frozen_for_sending(False))
if not chan.is_frozen_for_receiving():
menu.addAction(_("Freeze (for receiving)"), lambda: chan.set_frozen_for_receiving(True))
else:
menu.addAction(_("Unfreeze (for receiving)"), lambda: chan.set_frozen_for_receiving(False))
funding_tx = self.parent.wallet.db.get_transaction(chan.funding_outpoint.txid)
if funding_tx:
menu.addAction(_("View funding transaction"), lambda: self.parent.show_transaction(funding_tx))
if not chan.is_closed():
menu.addSeparator()
if chan.peer_state == PeerState.GOOD:
menu.addAction(_("Close channel"), lambda: self.close_channel(channel_id))
menu.addAction(_("Force-close channel"), lambda: self.force_close(channel_id))
else:
item = chan.get_closing_height()
if item:
txid, height, timestamp = item
closing_tx = self.lnworker.lnwatcher.db.get_transaction(txid)
if closing_tx:
menu.addAction(_("View closing transaction"), lambda: self.parent.show_transaction(closing_tx))
menu.addSeparator()
menu.addAction(_("Export backup"), lambda: self.export_channel_backup(channel_id))
if chan.is_redeemed():
menu.addSeparator()
menu.addAction(_("Delete"), lambda: self.remove_channel(channel_id))
menu.exec_(self.viewport().mapToGlobal(position))
@QtCore.pyqtSlot(Abstract_Wallet, AbstractChannel)
def do_update_single_row(self, wallet: Abstract_Wallet, chan: AbstractChannel):
if wallet != self.parent.wallet:
return
for row in range(self.model().rowCount()):
item = self.model().item(row, self.Columns.NODE_ALIAS)
if item.data(ROLE_CHANNEL_ID) != chan.channel_id:
continue
for column, v in enumerate(self.format_fields(chan)):
self.model().item(row, column).setData(v, QtCore.Qt.DisplayRole)
items = [self.model().item(row, column) for column in self.Columns]
self._update_chan_frozen_bg(chan=chan, items=items)
if wallet.lnworker:
self.update_can_send(wallet.lnworker)
self.update_swap_button(wallet.lnworker)
@QtCore.pyqtSlot()
def on_gossip_db(self):
self.do_update_rows(self.parent.wallet)
@QtCore.pyqtSlot(Abstract_Wallet)
def do_update_rows(self, wallet):
if wallet != self.parent.wallet:
return
channels = list(wallet.lnworker.channels.values()) if wallet.lnworker else []
backups = list(wallet.lnbackups.channel_backups.values())
if wallet.lnworker:
self.update_can_send(wallet.lnworker)
self.model().clear()
self.update_headers(self.headers)
for chan in channels + backups:
items = [QtGui.QStandardItem(x) for x in self.format_fields(chan)]
self.set_editability(items)
if self._default_item_bg_brush is None:
self._default_item_bg_brush = items[self.Columns.NODE_ALIAS].background()
items[self.Columns.NODE_ALIAS].setData(chan.channel_id, ROLE_CHANNEL_ID)
items[self.Columns.NODE_ALIAS].setFont(QFont(MONOSPACE_FONT))
items[self.Columns.LOCAL_BALANCE].setFont(QFont(MONOSPACE_FONT))
items[self.Columns.REMOTE_BALANCE].setFont(QFont(MONOSPACE_FONT))
self._update_chan_frozen_bg(chan=chan, items=items)
self.model().insertRow(0, items)
self.sortByColumn(self.Columns.SHORT_CHANID, Qt.DescendingOrder)
def _update_chan_frozen_bg(self, *, chan: AbstractChannel, items: Sequence[QStandardItem]):
assert self._default_item_bg_brush is not None
# frozen for sending
item = items[self.Columns.LOCAL_BALANCE]
if chan.is_frozen_for_sending():
item.setBackground(ColorScheme.BLUE.as_color(True))
item.setToolTip(_("This channel is frozen for sending. It will not be used for outgoing payments."))
else:
item.setBackground(self._default_item_bg_brush)
item.setToolTip("")
# frozen for receiving
item = items[self.Columns.REMOTE_BALANCE]
if chan.is_frozen_for_receiving():
item.setBackground(ColorScheme.BLUE.as_color(True))
item.setToolTip(_("This channel is frozen for receiving. It will not be included in invoices."))
else:
item.setBackground(self._default_item_bg_brush)
item.setToolTip("")
def update_can_send(self, lnworker: LNWallet):
msg = _('Can send') + ' ' + self.parent.format_amount(lnworker.num_sats_can_send())\
+ ' ' + self.parent.base_unit() + '; '\
+ _('can receive') + ' ' + self.parent.format_amount(lnworker.num_sats_can_receive())\
+ ' ' + self.parent.base_unit()
self.can_send_label.setText(msg)
def update_swap_button(self, lnworker: LNWallet):
if lnworker.num_sats_can_send() or lnworker.num_sats_can_receive():
self.swap_button.setEnabled(True)
else:
self.swap_button.setEnabled(False)
def get_toolbar(self):
h = QHBoxLayout()
self.can_send_label = QLabel('')
h.addWidget(self.can_send_label)
h.addStretch()
self.swap_button = EnterButton(_('Swap'), self.swap_dialog)
self.swap_button.setToolTip("Have at least one channel to do swaps.")
self.swap_button.setDisabled(True)
self.new_channel_button = EnterButton(_('Open Channel'), self.new_channel_with_warning)
self.new_channel_button.setEnabled(self.parent.wallet.has_lightning())
h.addWidget(self.new_channel_button)
# Doriancoin: disable swap for now
# h.addWidget(self.swap_button)
return h
def new_channel_with_warning(self):
if not self.parent.wallet.lnworker.channels:
warning1 = _("Lightning support in Electrum is experimental. "
"Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. "
"You must backup your wallet file everytime you create a new channel.")
answer = self.parent.question(
_('Do you want to create your first channel?') + '\n\n' +
_('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if answer:
self.new_channel_dialog()
else:
self.new_channel_dialog()
def statistics_dialog(self):
channel_db = self.parent.network.channel_db
capacity = self.parent.format_amount(channel_db.capacity()) + ' '+ self.parent.base_unit()
d = WindowModalDialog(self.parent, _('Lightning Network Statistics'))
d.setMinimumWidth(400)
vbox = QVBoxLayout(d)
h = QGridLayout()
h.addWidget(QLabel(_('Nodes') + ':'), 0, 0)
h.addWidget(QLabel('{}'.format(channel_db.num_nodes)), 0, 1)
h.addWidget(QLabel(_('Channels') + ':'), 1, 0)
h.addWidget(QLabel('{}'.format(channel_db.num_channels)), 1, 1)
h.addWidget(QLabel(_('Capacity') + ':'), 2, 0)
h.addWidget(QLabel(capacity), 2, 1)
vbox.addLayout(h)
vbox.addLayout(Buttons(OkButton(d)))
d.exec_()
def new_channel_dialog(self):
lnworker = self.parent.wallet.lnworker
d = WindowModalDialog(self.parent, _('Open Channel'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Enter Remote Node ID or connection string or invoice')))
remote_nodeid = QLineEdit()
remote_nodeid.setMinimumWidth(700)
amount_e = BTCAmountEdit(self.parent.get_decimal_point)
# max button
def spend_max():
amount_e.setFrozen(max_button.isChecked())
if not max_button.isChecked():
return
make_tx = self.parent.mktx_for_open_channel('!')
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
max_button.setChecked(False)
amount_e.setFrozen(False)
self.main_window.show_error(str(e))
return
amount = tx.output_value()
amount = min(amount, LN_MAX_FUNDING_SAT)
amount_e.setAmount(amount)
max_button = EnterButton(_("Max"), spend_max)
max_button.setFixedWidth(100)
max_button.setCheckable(True)
suggest_button = QPushButton(d, text=_('Suggest Peer'))
def on_suggest():
self.parent.wallet.network.start_gossip()
nodeid = bh2u(lnworker.lnrater.suggest_peer() or b'')
if not nodeid:
remote_nodeid.setText("")
remote_nodeid.setPlaceholderText(
"Please wait until the graph is synchronized to 30%, and then try again.")
else:
remote_nodeid.setText(nodeid)
remote_nodeid.repaint() # macOS hack for #6269
suggest_button.clicked.connect(on_suggest)
clear_button = QPushButton(d, text=_('Clear'))
def on_clear():
amount_e.setText('')
amount_e.setFrozen(False)
amount_e.repaint() # macOS hack for #6269
remote_nodeid.setText('')
remote_nodeid.repaint() # macOS hack for #6269
max_button.setChecked(False)
max_button.repaint() # macOS hack for #6269
clear_button.clicked.connect(on_clear)
h = QGridLayout()
h.addWidget(QLabel(_('Remote Node ID')), 0, 0)
h.addWidget(remote_nodeid, 0, 1, 1, 3)
h.addWidget(suggest_button, 1, 1)
h.addWidget(clear_button, 1, 2)
h.addWidget(QLabel('Amount'), 2, 0)
h.addWidget(amount_e, 2, 1)
h.addWidget(max_button, 2, 2)
vbox.addLayout(h)
ok_button = OkButton(d)
ok_button.setDefault(True)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
if max_button.isChecked() and amount_e.get_amount() < LN_MAX_FUNDING_SAT:
# if 'max' enabled and amount is strictly less than max allowed,
# that means we have fewer coins than max allowed, and hence we can
# spend all coins
funding_sat = '!'
else:
funding_sat = amount_e.get_amount()
connect_str = str(remote_nodeid.text()).strip()
if not connect_str or not funding_sat:
return
self.parent.open_channel(connect_str, funding_sat, 0)
def swap_dialog(self):
from .swap_dialog import SwapDialog
d = SwapDialog(self.parent)
d.run()
|
py | b4102864eb5c493f3744ff7153baf5677e5d96e0 | import data_parse, data_save |
py | b41028b834965cb033185f45a99b4ca7327010be | from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class RepDet(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(RepDet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
py | b41028e4b54455155ae292fc942d8b4a587b9243 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_10 import models
class TestResultResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[TestResult]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.TestResult]
):
"""
Keyword args:
items (list[TestResult])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `TestResultResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TestResultResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TestResultResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b410296439d56fe1b1c625fa0faf45870111927c | #%%
import tensorflow as tf
import tensorflow.contrib.slim as slim
def model_summary():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
# %%
config = tf.ConfigProto(allow_soft_placement = True)
with tf.device('/gpu:0'):
with tf.Session(config = config) as sess:
saver = tf.train.import_meta_graph('/nfs6/deeppath_models/checkpoints/run1a_3D_classifier/model.ckpt-69000.meta')
saver.restore(sess,tf.train.latest_checkpoint('/nfs6/deeppath_models/checkpoints/run1a_3D_classifier/'))
model_summary() |
py | b4102a17dbda25151bca1db479d5b6c64b88a040 |
from dataset.animeface import (
AnimeFace,
AnimeFaceCelebA,
AnimeFaceSR,
AnimeFaceXDoG,
AnimeFaceLabel,
AnimeFaceOneHot
)
from dataset.portrait import (
DanbooruPortrait,
DanbooruPortraitCelebA,
DanbooruPortraitSR,
DanbooruPortraitXDoG
)
from dataset.danbooru import (
Danbooru,
DanbooruSR,
DanbooruAutoPair
)
import os
import torch
from torch.utils.data import DataLoader, Dataset
from collections.abc import Iterable
def cycle(iterable: Iterable):
while True:
for i in iterable:
yield i
def to_loader(
dataset: Dataset,
batch_size: int,
shuffle: bool=True,
num_workers: int=os.cpu_count(),
pin_memory: bool=torch.cuda.is_available()
) -> DataLoader:
loader = DataLoader(
dataset, batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory
)
return loader
|
py | b4102a6f25befe629f1f340e676d6c760d541bbe | from JumpScale import j
base = j.data.capnp.getModelBaseClassCollection()
class RepoCollection(base):
"""
This class represent a collection of Issues
"""
def list(self, owner=0, name='', id=0, source="", returnIndex=False):
"""
List all keys of repo model with specified params.
@param owner int,, id of owner the repo belongs to.
@param name str,, name of repo.
@param id int,, repo id in db.
@param source str,, source of remote database.
@param returnIndexalse bool,, return the index used.
"""
if owner == "":
owner = ".*"
if name == "":
name = ".*"
if id == "" or id == 0:
id = ".*"
if source == "":
source = ".*"
regex = "%s:%s:%s:%s" % (owner, name, id, source)
return self._index.list(regex, returnIndex=returnIndex)
def find(self, owner='', name='', id=0, milestone=0, member=0, label='', source=""):
"""
List all instances of repo model with specified params.
@param owner int,, id of owner the repo belongs to.
@param name str,, name of repo.
@param id int,, repo id in db.
@param milestone int,, id of milestone in repo.
@param member int,, id of member in repo.
@param milestone int,, label in repo.
@param source str,, source of remote database.
@param returnIndexalse bool,, return the index used.
"""
res = []
for key in self.list(owner=owner, name=name, id=id, source=source):
res.append(self.get(key))
if milestone:
for model in res[::-1]:
for milestone_model in model.dictFiltered.get('milestones', []):
if milestone == milestone_model['id']:
break
else:
res.remove(model)
if member:
for model in res[::-1]:
for member_model in model.dictFiltered.get('members', []):
if member == member_model['userKey']:
break
else:
res.remove(model)
if label:
for model in res[::-1]:
if (label not in model.dictFiltered.get('labels', [])) or not model.dictFiltered.get('labels', False):
res.remove(model)
return res
def getFromId(self, id):
key = self._index.lookupGet("issue_id", id)
return self.get(key)
|
py | b4102b0eb86a27d0f28b768af761ef43ad0a59ed | import json
import torch
import argparse
import datetime
from commons.utils import train, eval, import_module
if __name__ == '__main__':
# Parse Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--train_config', type=str, default='', help='Path for train config json file')
parser.add_argument('--not_save', action='store_true')
args = parser.parse_args()
# Load Model
H_PARAMS = json.loads(open(args.train_config).read())
Model = import_module(H_PARAMS['model'])
model = Model(H_PARAMS)
# Show model name
model_name = H_PARAMS['model_name'] if 'model_name' in H_PARAMS.keys() else 'Unknown'
print('Loading model:{}'.format(model_name))
# Load Dataloader
DataLoader = import_module(H_PARAMS['dataloader'])
dataLoader = DataLoader(H_PARAMS)
# Load data
load_option = H_PARAMS['load_option'] if 'load_option' in H_PARAMS.keys() else None
dataLoader.load_data('train', load_option)
# Prepare Optimizer
model.optimizer = torch.optim.Adam(model.parameters(), lr=H_PARAMS['lr'], weight_decay=H_PARAMS['weight_decay'])
# Epoch
for epoch in range(H_PARAMS['epoch']):
print('Epoch {} @ {} '.format(epoch + 1, datetime.datetime.now()), end='')
# Training
total_loss = train(model, dataLoader)
print('Loss: {}'.format(total_loss))
# Evaluating
if not epoch % H_PARAMS['eval_freq']:
print('Evaluating...', end='')
total_acc = eval(model, dataLoader)
if not args.not_save:
# Save model if high acc
model.save_weights(total_acc)
|
py | b4102b92768cc71af2dbfbfeed7546c2813abd2d | #-*-coding:utf-8-*-
# Author : Zhang Zhichaung
# Date : 2019/6/20 下午2:46
import numpy as np
path = '/mnt/share/users/zzc/kitti_second/training/velodyne/000000.bin'
points = np.fromfile(path, dtype=np.float32, count=-1).reshape([-1, 4])
# path:待打开的文件对象, dtype: 返回的数据类型, count: int,要读取的项目数.
# reshape([a, b]),原来a*b个一维数组,每b个为一行,a=-1可表示任意长度为b倍数的数组,重组为b列
np.savetxt('/home/zzc/second.pytorch/test/000000_origin.txt', points)
|
py | b4102d2c2eedb01073ef53165a2286882395ca59 | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from yapily.models.other_type import OtherType # noqa: F401,E501
class EligibilityOtherEligibility(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'str',
'description': 'str',
'indicator': 'bool',
'name': 'str',
'notes': 'list[str]',
'other_type': 'OtherType',
'period': 'str',
'textual': 'str',
'type': 'str'
}
attribute_map = {
'amount': 'Amount',
'description': 'Description',
'indicator': 'Indicator',
'name': 'Name',
'notes': 'Notes',
'other_type': 'OtherType',
'period': 'Period',
'textual': 'Textual',
'type': 'Type'
}
def __init__(self, amount=None, description=None, indicator=None, name=None, notes=None, other_type=None, period=None, textual=None, type=None): # noqa: E501
"""EligibilityOtherEligibility - a model defined in Swagger""" # noqa: E501
self._amount = None
self._description = None
self._indicator = None
self._name = None
self._notes = None
self._other_type = None
self._period = None
self._textual = None
self._type = None
self.discriminator = None
if amount is not None:
self.amount = amount
if description is not None:
self.description = description
if indicator is not None:
self.indicator = indicator
if name is not None:
self.name = name
if notes is not None:
self.notes = notes
if other_type is not None:
self.other_type = other_type
if period is not None:
self.period = period
if textual is not None:
self.textual = textual
if type is not None:
self.type = type
@property
def amount(self):
"""Gets the amount of this EligibilityOtherEligibility. # noqa: E501
:return: The amount of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this EligibilityOtherEligibility.
:param amount: The amount of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
self._amount = amount
@property
def description(self):
"""Gets the description of this EligibilityOtherEligibility. # noqa: E501
:return: The description of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this EligibilityOtherEligibility.
:param description: The description of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
self._description = description
@property
def indicator(self):
"""Gets the indicator of this EligibilityOtherEligibility. # noqa: E501
:return: The indicator of this EligibilityOtherEligibility. # noqa: E501
:rtype: bool
"""
return self._indicator
@indicator.setter
def indicator(self, indicator):
"""Sets the indicator of this EligibilityOtherEligibility.
:param indicator: The indicator of this EligibilityOtherEligibility. # noqa: E501
:type: bool
"""
self._indicator = indicator
@property
def name(self):
"""Gets the name of this EligibilityOtherEligibility. # noqa: E501
:return: The name of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EligibilityOtherEligibility.
:param name: The name of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
self._name = name
@property
def notes(self):
"""Gets the notes of this EligibilityOtherEligibility. # noqa: E501
:return: The notes of this EligibilityOtherEligibility. # noqa: E501
:rtype: list[str]
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this EligibilityOtherEligibility.
:param notes: The notes of this EligibilityOtherEligibility. # noqa: E501
:type: list[str]
"""
self._notes = notes
@property
def other_type(self):
"""Gets the other_type of this EligibilityOtherEligibility. # noqa: E501
:return: The other_type of this EligibilityOtherEligibility. # noqa: E501
:rtype: OtherType
"""
return self._other_type
@other_type.setter
def other_type(self, other_type):
"""Sets the other_type of this EligibilityOtherEligibility.
:param other_type: The other_type of this EligibilityOtherEligibility. # noqa: E501
:type: OtherType
"""
self._other_type = other_type
@property
def period(self):
"""Gets the period of this EligibilityOtherEligibility. # noqa: E501
:return: The period of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this EligibilityOtherEligibility.
:param period: The period of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
allowed_values = ["Day", "Half Year", "Month", "Quarter", "Week", "AcademicTerm", "Year"] # noqa: E501
if period not in allowed_values:
raise ValueError(
"Invalid value for `period` ({0}), must be one of {1}" # noqa: E501
.format(period, allowed_values)
)
self._period = period
@property
def textual(self):
"""Gets the textual of this EligibilityOtherEligibility. # noqa: E501
:return: The textual of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._textual
@textual.setter
def textual(self, textual):
"""Sets the textual of this EligibilityOtherEligibility.
:param textual: The textual of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
self._textual = textual
@property
def type(self):
"""Gets the type of this EligibilityOtherEligibility. # noqa: E501
:return: The type of this EligibilityOtherEligibility. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EligibilityOtherEligibility.
:param type: The type of this EligibilityOtherEligibility. # noqa: E501
:type: str
"""
allowed_values = ["DirectDebits", "ExistingCustomers", "MinimumOperatingBalance", "MinimumDeposit", "NewCustomersOnly", "PreviousBankruptcyAllowed", "Other", "StudentsOnly", "SoleStudentAccount", "SoleUkAccount", "SwitchersOnly", "UCASFulltimeTwoYears"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EligibilityOtherEligibility):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4102df921bca0b0749ed68c2a70a6facd1a1291 | import os
import unittest
from typeguard import typechecked
from typing import List, Dict
import pandas as pd
from cgnal.core.logging.defaults import getDefaultLogger
from cgnal.core.tests.core import logTest, TestCase
from cgnal.core.utils.decorators import lazyproperty as lazy, param_check
from cgnal.core.utils.dict import (
groupIterable,
pairwise,
union,
flattenKeys,
unflattenKeys,
filterNones,
groupBy,
)
from cgnal.core.utils.fs import (
mkdir,
create_dir_if_not_exists,
get_lexicographic_dirname,
)
from cgnal.core.utils.pandas import is_sparse, loc
from tests import TMP_FOLDER
logger = getDefaultLogger()
class TestUtilsDict(TestCase):
@logTest
def test_groupIterable(self):
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6}, batch_size=3
)
],
[["a", "b", "c"], ["d", "e", "f"]],
)
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, batch_size=3
)
],
[["a", "b", "c"], ["d", "e"]],
)
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7},
batch_size=3,
)
],
[["a", "b", "c"], ["d", "e", "f"], ["g"]],
)
@logTest
def test_pairwise(self):
self.assertEqual(
[el for el in pairwise({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})],
[("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f")],
)
self.assertEqual([el for el in pairwise({"a": 1})], [])
@logTest
def test_union(self):
self.assertEqual(
union({"1": {"a": 1}}, filterNones({"1": {"a": None}, "b": 1})),
{"1": {"a": 1}, "b": 1},
)
self.assertEqual(
union({"1": {"a": 1}}, filterNones({"1": {"a": 2}, "b": None})),
{"1": {"a": 2}},
)
self.assertEqual(
union({"1": None}, {"1": 1, "2": 3}, {"1": {"1a": 1, "1b": 2}, "3": 4}),
{"1": {"1a": 1, "1b": 2}, "2": 3, "3": 4},
)
@logTest
def test_flattenKeys(self):
self.assertEqual(
flattenKeys({"a": {"b": {"c": 2}}, "d": 2, "e": 3}, sep="."),
{"a.b.c": 2, "d": 2, "e": 3},
)
self.assertEqual(
flattenKeys({"a": {"b": {"c": 2}}, "a": 2, "e": 3}), {"a": 2, "e": 3}
)
@logTest
def test_unflattenKeys(self):
self.assertEqual(
unflattenKeys({"a.b.c": 2, "d": 2, "e": 3}, sep="."),
{"a": {"b": {"c": 2}}, "d": 2, "e": 3},
)
self.assertEqual(
unflattenKeys({"a.b.c": 2, "d": 2, "e": 3}, sep="_"),
{"a.b.c": 2, "d": 2, "e": 3},
)
@logTest
def test_filterNones(self):
self.assertEqual(filterNones({"a": 1, "b": None}), {"a": 1})
@logTest
def test_groupBy(self):
self.assertEqual(
[(k, v) for k, v in groupBy(["abc", "ab", "bcd", "c"], key=len)],
[(1, ["c"]), (2, ["ab"]), (3, ["abc", "bcd"])],
)
class TestUtilsFs(TestCase):
@logTest
def test_mkdir(self):
directory = os.path.join("/tmp", "test_utils_fs")
mkdir(directory)
self.assertTrue(os.path.exists(directory))
os.rmdir(directory)
@logTest
def test_create_dir_if_not_exists(self):
directory = os.path.join("/tmp", "test_utils_fs")
create_dir_if_not_exists(directory)
self.assertTrue(os.path.exists(directory))
os.rmdir(directory)
@logTest
def test_get_lexicographic_dirname(self):
create_dir_if_not_exists(os.path.join("/tmp", "zzz"))
self.assertEqual(get_lexicographic_dirname("/tmp", first=False), "zzz")
os.rmdir(os.path.join("/tmp", "zzz"))
class TestPandas(TestCase):
@logTest
def test_is_sparse(self):
self.assertTrue(
is_sparse(
pd.DataFrame(
{
"v1": pd.arrays.SparseArray([0, 0, 0, 0, 1]),
"v2": pd.arrays.SparseArray([1, 0, 0, 0, 1]),
"v3": pd.arrays.SparseArray([1, 0, 0, 0, 0]),
}
)
)
)
self.assertFalse(
is_sparse(
pd.DataFrame(
{
"v1": [0, 0, 0, 0, 1],
"v2": pd.arrays.SparseArray([1, 0, 0, 0, 1]),
"v3": pd.arrays.SparseArray([1, 0, 0, 0, 0]),
}
)
)
)
@logTest
def test_loc(self):
res = pd.DataFrame({"v1": [0], "v2": [1], "v3": [1]})
self.assertTrue(
(
loc(
pd.DataFrame(
{
"v1": [0, 0, 0, 0, 1],
"v2": pd.arrays.SparseArray([1, 0, 0, 0, 1]),
"v3": pd.arrays.SparseArray([1, 0, 0, 0, 0]),
}
),
[0],
)
== res
)
.all()
.all()
)
# TODO: manca scipy nei requirements ?
self.assertTrue(
(
loc(
pd.DataFrame(
{
"v1": pd.arrays.SparseArray([0, 0, 0, 0, 1]),
"v2": pd.arrays.SparseArray([1, 0, 0, 0, 1]),
"v3": pd.arrays.SparseArray([1, 0, 0, 0, 0]),
}
),
[0],
)
== res
)
.all()
.all()
)
self.assertTrue(
is_sparse(
loc(
pd.DataFrame(
{
"v1": pd.arrays.SparseArray([0, 0, 0, 0, 1]),
"v2": pd.arrays.SparseArray([1, 0, 0, 0, 1]),
"v3": pd.arrays.SparseArray([1, 0, 0, 0, 0]),
}
),
[0],
)
)
)
@typechecked
class MyTestClass:
def __init__(self, param: str = "test"):
self.param = param
@lazy
def list_param(self) -> List:
return [1, 2, 3]
def dict_constructor(self, k_vals: List[str], v_vals: List[List[int]]) -> Dict:
return {k: v for k, v in zip(k_vals, v_vals)}
class MyClass:
def __init__(self, param: str = "test"):
self.param = param
@lazy
def list_param(self) -> List:
return [1, 2, 3]
# TODO: param_check decorator breakes when specification of types contained within collections is present.
# e.g. dict_constructor(self, k_vals: List[str], v_vals: List[List[int]])
# generates "...TypeError: Parameterized generics cannot be used with class or instance checks"
@param_check(with_none=False)
def dict_constructor(self, k_vals: List, v_vals: List) -> Dict:
return {k: v for k, v in zip(k_vals, v_vals)}
class TestDecorators(TestCase):
@logTest
def test_lazyproperty(self):
ex = MyClass()
self.assertEqual(ex.__dict__, {"param": "test"})
info = f"Testing lazyproperty decorator. Let's try to call the list_param={ex.list_param} attribute."
logger.info(info)
self.assertEqual(ex.__dict__, {"param": "test", "list_param": [1, 2, 3]})
@logTest
def test_param_check(self):
ex = MyClass()
self.assertEqual(
ex.dict_constructor(
k_vals=["a", "b", "c"], v_vals=[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
),
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]},
)
self.assertRaises(TypeError, ex.dict_constructor, k_vals="a", v_vals=[1, 2, 3])
self.assertRaises(
ValueError, ex.dict_constructor, k_vals=None, v_vals=[1, 2, 3]
)
@logTest
def test_param_check_with_typeguard(self):
ex = MyTestClass()
self.assertEqual(
ex.dict_constructor(
k_vals=["a", "b", "c"], v_vals=[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
),
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]},
)
self.assertRaises(TypeError, ex.dict_constructor, k_vals="a", v_vals=[1, 2, 3])
self.assertRaises(TypeError, ex.dict_constructor, k_vals=None, v_vals=[1, 2, 3])
def test_cache_io(self):
from cgnal.core.utils.decorators import Cached
# from time import sleep
class A(Cached):
def __init__(self, cnt):
self.cnt = cnt
@Cached.cache
def my_long_computation(self):
# _ = sleep(1)
self.cnt = self.cnt + 1
return self.cnt
a = A(0)
# This should compute the value
self.assertEqual(a.my_long_computation, 1)
# This should get the retrieve data
self.assertEqual(a.my_long_computation, 1)
filename = os.path.join(TMP_FOLDER, "save_pickles_test")
a.save_pickles(filename)
b = A(1)
b.load(filename)
# This should get the retrieve data
self.assertEqual(b.my_long_computation, 1)
b.clear_cache()
# This should compute the value
self.assertEqual(b.my_long_computation, 2)
class TestDocumentArchivers(TestCase):
url = "http://192.168.2.110:8686"
test_file = "tests/test.txt"
# @logTest
# def test_base_function(self):
# sync = CloudSync(self.url, TMP_FOLDER)
#
# namefile = sync.get_if_not_exists(self.test_file)
#
# self.assertTrue(os.path.exists( namefile ))
#
# os.remove( namefile )
#
#
# @logTest
# def test_decorator(self):
#
# sync = CloudSync(self.url, TMP_FOLDER)
#
# @sync.get_if_not_exists_decorator
# def decorated_function(filename):
# return filename
#
# namefile = decorated_function(self.test_file)
#
# self.assertTrue(os.path.exists( namefile ))
#
# os.remove( namefile )
#
# @logTest
# def test_multiple(self):
# sync = CloudSync(self.url, TMP_FOLDER)
#
# namefile = sync.get_if_not_exists(self.test_file)
#
# self.assertTrue(os.path.exists(namefile))
#
# sleep(3)
#
# time = os.path.getmtime(namefile)
#
# namefile = sync.get_if_not_exists(self.test_file)
#
# time2 = os.path.getmtime(namefile)
#
# self.assertTrue(time==time2)
#
# os.remove(namefile)
#
#
# @logTest
# def test_upload(self):
#
# sync = CloudSync(self.url, TMP_FOLDER)
#
# namefile = sync.get_if_not_exists(self.test_file)
#
# upload = f"{self.test_file}.upload"
#
# os.rename(namefile, sync.pathTo(upload))
#
# sync.upload(upload)
#
# os.remove(sync.pathTo(upload))
#
# namefile_new = sync.get_if_not_exists(upload)
#
# self.assertTrue(os.path.exists(namefile_new))
if __name__ == "__main__":
unittest.main()
|
py | b4102e1c33e5db175796260a6996b48b9b297487 | import json
from sure import expect
from tests.integration import VCRIntegrationTest, vcr
class TestCanvas(VCRIntegrationTest):
@vcr.use_cassette
def test_chain_send_result(self):
events = self.run_standalone("tests.integration.workflow.ChainTestWorkflow")
last_event = events[-1]
expect(last_event["eventType"]).to.equal("WorkflowExecutionCompleted")
result = json.loads(
last_event["workflowExecutionCompletedEventAttributes"]["result"]
)
expect(result).to.equal([6, 12])
@vcr.use_cassette
def test_child_workflow(self):
events = self.run_standalone(
"tests.integration.workflow.GroupTestWorkflowWithChild"
)
last_event = events[-1]
expect(last_event["eventType"]).to.equal("WorkflowExecutionCompleted")
result = json.loads(
last_event["workflowExecutionCompletedEventAttributes"]["result"]
)
expect(result).to.equal([[5, 10]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.